summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--deps/v8/.git-blame-ignore-revs22
-rw-r--r--deps/v8/.gitignore5
-rw-r--r--deps/v8/AUTHORS5
-rw-r--r--deps/v8/BUILD.gn100
-rw-r--r--deps/v8/ChangeLog1595
-rw-r--r--deps/v8/DEPS252
-rw-r--r--deps/v8/PRESUBMIT.py72
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h13
-rw-r--r--deps/v8/gni/isolate.gni2
-rw-r--r--deps/v8/gypfiles/features.gypi4
-rw-r--r--deps/v8/gypfiles/gyp_environment.py3
-rwxr-xr-xdeps/v8/gypfiles/gyp_v812
-rw-r--r--deps/v8/gypfiles/vs_toolchain.py371
-rw-r--r--deps/v8/gypfiles/win/msvs_dependencies.isolate8
-rw-r--r--deps/v8/include/v8-platform.h72
-rw-r--r--deps/v8/include/v8-version.h4
-rw-r--r--deps/v8/include/v8.h61
-rw-r--r--deps/v8/infra/mb/mb_config.pyl2
-rw-r--r--deps/v8/samples/hello-world.cc42
-rw-r--r--deps/v8/samples/process.cc26
-rw-r--r--deps/v8/src/OWNERS2
-rw-r--r--deps/v8/src/accessors.cc152
-rw-r--r--deps/v8/src/allocation.cc205
-rw-r--r--deps/v8/src/allocation.h94
-rw-r--r--deps/v8/src/api-arguments.h22
-rw-r--r--deps/v8/src/api-natives.cc7
-rw-r--r--deps/v8/src/api.cc324
-rw-r--r--deps/v8/src/api.h76
-rw-r--r--deps/v8/src/arguments.h2
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h23
-rw-r--r--deps/v8/src/arm/assembler-arm.cc277
-rw-r--r--deps/v8/src/arm/assembler-arm.h440
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc120
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h51
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc13
-rw-r--r--deps/v8/src/arm/eh-frame-arm.cc8
-rw-r--r--deps/v8/src/arm/frame-constants-arm.cc4
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc32
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc584
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h189
-rw-r--r--deps/v8/src/arm/simulator-arm.cc34
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h163
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc4
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h326
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc298
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.h68
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc5
-rw-r--r--deps/v8/src/arm64/eh-frame-arm64.cc10
-rw-r--r--deps/v8/src/arm64/frame-constants-arm64.cc5
-rw-r--r--deps/v8/src/arm64/frame-constants-arm64.h5
-rw-r--r--deps/v8/src/arm64/instructions-arm64-constants.cc46
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc3
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h44
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc43
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h17
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc572
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h239
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc45
-rw-r--r--deps/v8/src/arm64/utils-arm64.cc1
-rw-r--r--deps/v8/src/asmjs/asm-js.cc26
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc71
-rw-r--r--deps/v8/src/asmjs/asm-parser.h1
-rw-r--r--deps/v8/src/asmjs/asm-scanner.cc1
-rw-r--r--deps/v8/src/asmjs/switch-logic.cc2
-rw-r--r--deps/v8/src/assembler.cc72
-rw-r--r--deps/v8/src/assembler.h95
-rw-r--r--deps/v8/src/ast/ast-expression-rewriter.cc20
-rw-r--r--deps/v8/src/ast/ast-numbering.cc33
-rw-r--r--deps/v8/src/ast/ast-source-ranges.h4
-rw-r--r--deps/v8/src/ast/ast-traversal-visitor.h11
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc49
-rw-r--r--deps/v8/src/ast/ast-value-factory.h58
-rw-r--r--deps/v8/src/ast/ast.cc131
-rw-r--r--deps/v8/src/ast/ast.h391
-rw-r--r--deps/v8/src/ast/context-slot-cache.cc6
-rw-r--r--deps/v8/src/ast/prettyprinter.cc41
-rw-r--r--deps/v8/src/ast/scopes.cc42
-rw-r--r--deps/v8/src/ast/scopes.h10
-rw-r--r--deps/v8/src/background-parsing-task.cc1
-rw-r--r--deps/v8/src/bailout-reason.h5
-rw-r--r--deps/v8/src/base/bits.h1
-rw-r--r--deps/v8/src/base/build_config.h4
-rw-r--r--deps/v8/src/base/logging.cc16
-rw-r--r--deps/v8/src/base/logging.h101
-rw-r--r--deps/v8/src/base/platform/platform-aix.cc191
-rw-r--r--deps/v8/src/base/platform/platform-cygwin.cc215
-rw-r--r--deps/v8/src/base/platform/platform-freebsd.cc212
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc142
-rw-r--r--deps/v8/src/base/platform/platform-linux.cc226
-rw-r--r--deps/v8/src/base/platform/platform-macos.cc156
-rw-r--r--deps/v8/src/base/platform/platform-openbsd.cc227
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc91
-rw-r--r--deps/v8/src/base/platform/platform-qnx.cc219
-rw-r--r--deps/v8/src/base/platform/platform-solaris.cc133
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc229
-rw-r--r--deps/v8/src/base/platform/platform.h165
-rw-r--r--deps/v8/src/base/platform/time.cc2
-rw-r--r--deps/v8/src/base/template-utils.h15
-rw-r--r--deps/v8/src/base/tsan.h47
-rw-r--r--deps/v8/src/bootstrapper.cc488
-rw-r--r--deps/v8/src/bootstrapper.h34
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc434
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc571
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.cc6
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc65
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc8
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-bigint.cc164
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc31
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc1
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc109
-rw-r--r--deps/v8/src/builtins/builtins-collections.cc4
-rw-r--r--deps/v8/src/builtins/builtins-console.cc4
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc325
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.h21
-rw-r--r--deps/v8/src/builtins/builtins-constructor.h2
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h197
-rw-r--r--deps/v8/src/builtins/builtins-descriptors.h3
-rw-r--r--deps/v8/src/builtins/builtins-forin-gen.cc209
-rw-r--r--deps/v8/src/builtins/builtins-forin-gen.h38
-rw-r--r--deps/v8/src/builtins/builtins-function-gen.cc198
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc21
-rw-r--r--deps/v8/src/builtins/builtins-ic-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc217
-rw-r--r--deps/v8/src/builtins/builtins-intl-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc2
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-math-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-number-gen.cc71
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc222
-rw-r--r--deps/v8/src/builtins/builtins-object.cc32
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc312
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h3
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc264
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.h20
-rw-r--r--deps/v8/src/builtins/builtins-proxy-helpers-gen.cc160
-rw-r--r--deps/v8/src/builtins/builtins-proxy-helpers-gen.h38
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc17
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc433
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h23
-rw-r--r--deps/v8/src/builtins/builtins-string.cc21
-rw-r--r--deps/v8/src/builtins/builtins-typedarray-gen.cc404
-rw-r--r--deps/v8/src/builtins/builtins.cc84
-rw-r--r--deps/v8/src/builtins/builtins.h29
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc455
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc438
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc445
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc450
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc445
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc87
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc448
-rw-r--r--deps/v8/src/char-predicates.h2
-rw-r--r--deps/v8/src/code-events.h31
-rw-r--r--deps/v8/src/code-factory.cc76
-rw-r--r--deps/v8/src/code-factory.h12
-rw-r--r--deps/v8/src/code-stub-assembler.cc759
-rw-r--r--deps/v8/src/code-stub-assembler.h54
-rw-r--r--deps/v8/src/code-stubs.cc88
-rw-r--r--deps/v8/src/code-stubs.h92
-rw-r--r--deps/v8/src/codegen.cc56
-rw-r--r--deps/v8/src/codegen.h8
-rw-r--r--deps/v8/src/collector.h23
-rw-r--r--deps/v8/src/compilation-cache.cc11
-rw-r--r--deps/v8/src/compilation-cache.h8
-rw-r--r--deps/v8/src/compilation-info.cc43
-rw-r--r--deps/v8/src/compilation-info.h38
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc6
-rw-r--r--deps/v8/src/compiler.cc102
-rw-r--r--deps/v8/src/compiler.h11
-rw-r--r--deps/v8/src/compiler/access-builder.cc56
-rw-r--r--deps/v8/src/compiler/access-builder.h20
-rw-r--r--deps/v8/src/compiler/access-info.cc36
-rw-r--r--deps/v8/src/compiler/access-info.h7
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc268
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc24
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc169
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc2
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc2
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc17
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc647
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h72
-rw-r--r--deps/v8/src/compiler/c-linkage.cc2
-rw-r--r--deps/v8/src/compiler/code-assembler.cc396
-rw-r--r--deps/v8/src/compiler/code-assembler.h97
-rw-r--r--deps/v8/src/compiler/code-generator.cc27
-rw-r--r--deps/v8/src/compiler/code-generator.h29
-rw-r--r--deps/v8/src/compiler/common-operator.cc32
-rw-r--r--deps/v8/src/compiler/common-operator.h30
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc687
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h19
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc549
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h107
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc2313
-rw-r--r--deps/v8/src/compiler/escape-analysis.h239
-rw-r--r--deps/v8/src/compiler/frame.h8
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc1
-rw-r--r--deps/v8/src/compiler/graph-assembler.h133
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc4
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc98
-rw-r--r--deps/v8/src/compiler/graph-visualizer.h5
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc154
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc2
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc8
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.h2
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc29
-rw-r--r--deps/v8/src/compiler/instruction.cc4
-rw-r--r--deps/v8/src/compiler/instruction.h52
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc64
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc283
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h5
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc418
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h1
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc3
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc518
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h17
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc62
-rw-r--r--deps/v8/src/compiler/js-graph.cc2
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc424
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h13
-rw-r--r--deps/v8/src/compiler/js-inlining.cc7
-rw-r--r--deps/v8/src/compiler/js-inlining.h1
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc8
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h1
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc383
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h35
-rw-r--r--deps/v8/src/compiler/js-operator.cc131
-rw-r--r--deps/v8/src/compiler/js-operator.h47
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc132
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.h129
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc308
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h1
-rw-r--r--deps/v8/src/compiler/jump-threading.cc3
-rw-r--r--deps/v8/src/compiler/linkage.cc1
-rw-r--r--deps/v8/src/compiler/linkage.h44
-rw-r--r--deps/v8/src/compiler/load-elimination.cc170
-rw-r--r--deps/v8/src/compiler/load-elimination.h17
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.cc5
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc6
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc2
-rw-r--r--deps/v8/src/compiler/machine-operator.cc4
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc289
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc86
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc261
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc91
-rw-r--r--deps/v8/src/compiler/move-optimizer.cc2
-rw-r--r--deps/v8/src/compiler/new-escape-analysis-reducer.cc411
-rw-r--r--deps/v8/src/compiler/new-escape-analysis-reducer.h122
-rw-r--r--deps/v8/src/compiler/new-escape-analysis.cc739
-rw-r--r--deps/v8/src/compiler/new-escape-analysis.h181
-rw-r--r--deps/v8/src/compiler/node-properties.cc15
-rw-r--r--deps/v8/src/compiler/node-properties.h5
-rw-r--r--deps/v8/src/compiler/node.cc24
-rw-r--r--deps/v8/src/compiler/node.h2
-rw-r--r--deps/v8/src/compiler/opcodes.h120
-rw-r--r--deps/v8/src/compiler/operation-typer.cc38
-rw-r--r--deps/v8/src/compiler/operator-properties.cc2
-rw-r--r--deps/v8/src/compiler/osr.cc3
-rw-r--r--deps/v8/src/compiler/pipeline.cc107
-rw-r--r--deps/v8/src/compiler/pipeline.h2
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc301
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc43
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc62
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h36
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc6
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.cc6
-rw-r--r--deps/v8/src/compiler/register-allocator.cc26
-rw-r--r--deps/v8/src/compiler/register-allocator.h8
-rw-r--r--deps/v8/src/compiler/representation-change.cc12
-rw-r--r--deps/v8/src/compiler/representation-change.h2
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc203
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc2
-rw-r--r--deps/v8/src/compiler/schedule.cc6
-rw-r--r--deps/v8/src/compiler/scheduler.cc156
-rw-r--r--deps/v8/src/compiler/scheduler.h9
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc34
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc206
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc166
-rw-r--r--deps/v8/src/compiler/simplified-operator.h43
-rw-r--r--deps/v8/src/compiler/state-values-utils.cc22
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc6
-rw-r--r--deps/v8/src/compiler/typer.cc78
-rw-r--r--deps/v8/src/compiler/types.cc9
-rw-r--r--deps/v8/src/compiler/value-numbering-reducer.cc4
-rw-r--r--deps/v8/src/compiler/verifier.cc77
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc1142
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h144
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc28
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc297
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc4
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc10
-rw-r--r--deps/v8/src/compiler/x64/unwinding-info-writer-x64.cc6
-rw-r--r--deps/v8/src/compiler/zone-stats.cc2
-rw-r--r--deps/v8/src/contexts-inl.h1
-rw-r--r--deps/v8/src/contexts.cc71
-rw-r--r--deps/v8/src/contexts.h19
-rw-r--r--deps/v8/src/conversions-inl.h605
-rw-r--r--deps/v8/src/conversions.cc808
-rw-r--r--deps/v8/src/conversions.h33
-rw-r--r--deps/v8/src/counters.cc24
-rw-r--r--deps/v8/src/counters.h250
-rw-r--r--deps/v8/src/d8-console.cc17
-rw-r--r--deps/v8/src/d8.cc133
-rw-r--r--deps/v8/src/d8.h2
-rw-r--r--deps/v8/src/d8.js6
-rw-r--r--deps/v8/src/date.cc4
-rw-r--r--deps/v8/src/debug/debug-coverage.cc31
-rw-r--r--deps/v8/src/debug/debug-coverage.h8
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc86
-rw-r--r--deps/v8/src/debug/debug-evaluate.h9
-rw-r--r--deps/v8/src/debug/debug-frames.cc37
-rw-r--r--deps/v8/src/debug/debug-frames.h6
-rw-r--r--deps/v8/src/debug/debug-interface.h97
-rw-r--r--deps/v8/src/debug/debug-scope-iterator.cc22
-rw-r--r--deps/v8/src/debug/debug-scopes.cc25
-rw-r--r--deps/v8/src/debug/debug-scopes.h4
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc1
-rw-r--r--deps/v8/src/debug/debug-type-profile.cc102
-rw-r--r--deps/v8/src/debug/debug-type-profile.h45
-rw-r--r--deps/v8/src/debug/debug.cc99
-rw-r--r--deps/v8/src/debug/debug.h7
-rw-r--r--deps/v8/src/debug/liveedit.cc52
-rw-r--r--deps/v8/src/deoptimize-reason.h11
-rw-r--r--deps/v8/src/deoptimizer.cc599
-rw-r--r--deps/v8/src/deoptimizer.h79
-rw-r--r--deps/v8/src/detachable-vector.h73
-rw-r--r--deps/v8/src/disassembler.cc28
-rw-r--r--deps/v8/src/dtoa.cc1
-rw-r--r--deps/v8/src/elements.cc192
-rw-r--r--deps/v8/src/elements.h23
-rw-r--r--deps/v8/src/execution.cc42
-rw-r--r--deps/v8/src/external-reference-table.cc24
-rw-r--r--deps/v8/src/external-reference-table.h12
-rw-r--r--deps/v8/src/factory-inl.h137
-rw-r--r--deps/v8/src/factory.cc129
-rw-r--r--deps/v8/src/factory.h156
-rw-r--r--deps/v8/src/feedback-vector-inl.h94
-rw-r--r--deps/v8/src/feedback-vector.cc175
-rw-r--r--deps/v8/src/feedback-vector.h72
-rw-r--r--deps/v8/src/ffi/OWNERS4
-rw-r--r--deps/v8/src/ffi/ffi-compiler.cc125
-rw-r--r--deps/v8/src/ffi/ffi-compiler.h37
-rw-r--r--deps/v8/src/find-and-replace-pattern.h37
-rw-r--r--deps/v8/src/fixed-dtoa.cc7
-rw-r--r--deps/v8/src/flag-definitions.h103
-rw-r--r--deps/v8/src/flags.cc13
-rw-r--r--deps/v8/src/flags.h4
-rw-r--r--deps/v8/src/frame-constants.h12
-rw-r--r--deps/v8/src/frames-inl.h28
-rw-r--r--deps/v8/src/frames.cc117
-rw-r--r--deps/v8/src/frames.h73
-rw-r--r--deps/v8/src/futex-emulation.cc1
-rw-r--r--deps/v8/src/gdb-jit.cc38
-rw-r--r--deps/v8/src/global-handles.cc148
-rw-r--r--deps/v8/src/global-handles.h25
-rw-r--r--deps/v8/src/globals.h29
-rw-r--r--deps/v8/src/handles-inl.h7
-rw-r--r--deps/v8/src/handles.cc20
-rw-r--r--deps/v8/src/handles.h2
-rw-r--r--deps/v8/src/heap-symbols.h6
-rw-r--r--deps/v8/src/heap/barrier.h77
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc171
-rw-r--r--deps/v8/src/heap/concurrent-marking.h19
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc2
-rw-r--r--deps/v8/src/heap/gc-tracer.cc8
-rw-r--r--deps/v8/src/heap/heap-inl.h117
-rw-r--r--deps/v8/src/heap/heap.cc903
-rw-r--r--deps/v8/src/heap/heap.h64
-rw-r--r--deps/v8/src/heap/incremental-marking.cc103
-rw-r--r--deps/v8/src/heap/incremental-marking.h117
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h19
-rw-r--r--deps/v8/src/heap/mark-compact.cc271
-rw-r--r--deps/v8/src/heap/mark-compact.h85
-rw-r--r--deps/v8/src/heap/marking.h7
-rw-r--r--deps/v8/src/heap/memory-reducer.cc2
-rw-r--r--deps/v8/src/heap/object-stats.cc23
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h24
-rw-r--r--deps/v8/src/heap/objects-visiting.cc28
-rw-r--r--deps/v8/src/heap/objects-visiting.h3
-rw-r--r--deps/v8/src/heap/scavenger-inl.h6
-rw-r--r--deps/v8/src/heap/scavenger.cc23
-rw-r--r--deps/v8/src/heap/scavenger.h56
-rw-r--r--deps/v8/src/heap/sequential-marking-deque.cc100
-rw-r--r--deps/v8/src/heap/sequential-marking-deque.h156
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc629
-rw-r--r--deps/v8/src/heap/spaces-inl.h122
-rw-r--r--deps/v8/src/heap/spaces.cc298
-rw-r--r--deps/v8/src/heap/spaces.h150
-rw-r--r--deps/v8/src/heap/store-buffer.cc4
-rw-r--r--deps/v8/src/heap/store-buffer.h2
-rw-r--r--deps/v8/src/heap/worklist.h28
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h79
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc48
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h186
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc122
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h93
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc1
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc12
-rw-r--r--deps/v8/src/ia32/frame-constants-ia32.cc4
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc33
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc534
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h176
-rw-r--r--deps/v8/src/ic/access-compiler-data.h6
-rw-r--r--deps/v8/src/ic/access-compiler.cc30
-rw-r--r--deps/v8/src/ic/access-compiler.h29
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc457
-rw-r--r--deps/v8/src/ic/accessor-assembler.h22
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc29
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc41
-rw-r--r--deps/v8/src/ic/binary-op-assembler.cc77
-rw-r--r--deps/v8/src/ic/binary-op-assembler.h17
-rw-r--r--deps/v8/src/ic/handler-compiler.cc46
-rw-r--r--deps/v8/src/ic/handler-compiler.h17
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h38
-rw-r--r--deps/v8/src/ic/handler-configuration.cc466
-rw-r--r--deps/v8/src/ic/handler-configuration.h114
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc28
-rw-r--r--deps/v8/src/ic/ic-inl.h2
-rw-r--r--deps/v8/src/ic/ic.cc517
-rw-r--r--deps/v8/src/ic/ic.h42
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc59
-rw-r--r--deps/v8/src/ic/keyed-store-generic.h6
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc29
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc29
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc29
-rw-r--r--deps/v8/src/ic/s390/handler-compiler-s390.cc29
-rw-r--r--deps/v8/src/ic/stub-cache.cc42
-rw-r--r--deps/v8/src/ic/stub-cache.h30
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc27
-rw-r--r--deps/v8/src/inspector/BUILD.gn1
-rw-r--r--deps/v8/src/inspector/injected-script-source.js37
-rw-r--r--deps/v8/src/inspector/injected-script.cc4
-rw-r--r--deps/v8/src/inspector/inspector.gypi1
-rw-r--r--deps/v8/src/inspector/js_protocol.json56
-rw-r--r--deps/v8/src/inspector/script-breakpoint.h55
-rw-r--r--deps/v8/src/inspector/search-util.cc2
-rw-r--r--deps/v8/src/inspector/string-util.cc6
-rw-r--r--deps/v8/src/inspector/v8-console-agent-impl.cc2
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc2
-rw-r--r--deps/v8/src/inspector/v8-console.cc2
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc548
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h21
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc23
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h4
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc12
-rw-r--r--deps/v8/src/inspector/v8-debugger.h1
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.cc2
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.cc78
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc221
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.h5
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc4
-rw-r--r--deps/v8/src/inspector/wasm-translation.cc42
-rw-r--r--deps/v8/src/interface-descriptors.cc32
-rw-r--r--deps/v8/src/interface-descriptors.h32
-rw-r--r--deps/v8/src/interpreter/block-coverage-builder.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc8
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h1
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc88
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h23
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc1
-rw-r--r--deps/v8/src/interpreter/bytecode-decoder.cc20
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc123
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h7
-rw-r--r--deps/v8/src/interpreter/bytecode-label.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.h3
-rw-r--r--deps/v8/src/interpreter/bytecodes.h29
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc44
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h3
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc452
-rw-r--r--deps/v8/src/interpreter/interpreter.cc48
-rw-r--r--deps/v8/src/interpreter/interpreter.h7
-rw-r--r--deps/v8/src/isolate-inl.h30
-rw-r--r--deps/v8/src/isolate.cc497
-rw-r--r--deps/v8/src/isolate.h43
-rw-r--r--deps/v8/src/js/array.js28
-rw-r--r--deps/v8/src/js/intl.js5
-rw-r--r--deps/v8/src/js/max-min.js30
-rw-r--r--deps/v8/src/js/string.js111
-rw-r--r--deps/v8/src/js/templates.js84
-rw-r--r--deps/v8/src/js/typedarray.js78
-rw-r--r--deps/v8/src/js/v8natives.js17
-rw-r--r--deps/v8/src/keys.cc241
-rw-r--r--deps/v8/src/libplatform/default-platform.cc4
-rw-r--r--deps/v8/src/libplatform/default-platform.h1
-rw-r--r--deps/v8/src/list-inl.h251
-rw-r--r--deps/v8/src/list.h217
-rw-r--r--deps/v8/src/log.cc125
-rw-r--r--deps/v8/src/log.h1
-rw-r--r--deps/v8/src/lookup.cc15
-rw-r--r--deps/v8/src/lookup.h2
-rw-r--r--deps/v8/src/macro-assembler.h27
-rw-r--r--deps/v8/src/managed.h11
-rw-r--r--deps/v8/src/map-updater.cc33
-rw-r--r--deps/v8/src/map-updater.h4
-rw-r--r--deps/v8/src/messages.cc26
-rw-r--r--deps/v8/src/messages.h12
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h47
-rw-r--r--deps/v8/src/mips/assembler-mips.cc135
-rw-r--r--deps/v8/src/mips/assembler-mips.h273
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc138
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h55
-rw-r--r--deps/v8/src/mips/constants-mips.h16
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc11
-rw-r--r--deps/v8/src/mips/disasm-mips.cc3
-rw-r--r--deps/v8/src/mips/frame-constants-mips.cc4
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc32
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc902
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h203
-rw-r--r--deps/v8/src/mips/simulator-mips.cc1040
-rw-r--r--deps/v8/src/mips/simulator-mips.h8
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h38
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc137
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h265
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc138
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.h55
-rw-r--r--deps/v8/src/mips64/constants-mips64.h16
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc12
-rw-r--r--deps/v8/src/mips64/disasm-mips64.cc3
-rw-r--r--deps/v8/src/mips64/frame-constants-mips64.cc4
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc32
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc861
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h216
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc832
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h9
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h30
-rw-r--r--deps/v8/src/objects-debug.cc122
-rw-r--r--deps/v8/src/objects-inl.h346
-rw-r--r--deps/v8/src/objects-printer.cc53
-rw-r--r--deps/v8/src/objects.cc885
-rw-r--r--deps/v8/src/objects.h338
-rw-r--r--deps/v8/src/objects/bigint-inl.h56
-rw-r--r--deps/v8/src/objects/bigint.cc1346
-rw-r--r--deps/v8/src/objects/bigint.h187
-rw-r--r--deps/v8/src/objects/code-cache-inl.h28
-rw-r--r--deps/v8/src/objects/code-cache.h110
-rw-r--r--deps/v8/src/objects/debug-objects.cc8
-rw-r--r--deps/v8/src/objects/descriptor-array.h55
-rw-r--r--deps/v8/src/objects/dictionary.h12
-rw-r--r--deps/v8/src/objects/frame-array-inl.h1
-rw-r--r--deps/v8/src/objects/map-inl.h33
-rw-r--r--deps/v8/src/objects/map.h121
-rw-r--r--deps/v8/src/objects/module.cc20
-rw-r--r--deps/v8/src/objects/name-inl.h2
-rw-r--r--deps/v8/src/objects/property-descriptor-object-inl.h23
-rw-r--r--deps/v8/src/objects/property-descriptor-object.h64
-rw-r--r--deps/v8/src/objects/scope-info.cc2
-rw-r--r--deps/v8/src/objects/scope-info.h19
-rw-r--r--deps/v8/src/objects/script.h4
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h35
-rw-r--r--deps/v8/src/objects/shared-function-info.h26
-rw-r--r--deps/v8/src/objects/template-objects.cc129
-rw-r--r--deps/v8/src/objects/template-objects.h79
-rw-r--r--deps/v8/src/parsing/OWNERS2
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.cc1
-rw-r--r--deps/v8/src/parsing/parse-info.cc16
-rw-r--r--deps/v8/src/parsing/parse-info.h21
-rw-r--r--deps/v8/src/parsing/parser-base.h139
-rw-r--r--deps/v8/src/parsing/parser.cc157
-rw-r--r--deps/v8/src/parsing/parser.h20
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc2
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.cc336
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.h102
-rw-r--r--deps/v8/src/parsing/preparser.cc8
-rw-r--r--deps/v8/src/parsing/preparser.h41
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc4
-rw-r--r--deps/v8/src/parsing/scanner.cc50
-rw-r--r--deps/v8/src/parsing/scanner.h6
-rw-r--r--deps/v8/src/parsing/token.h1
-rw-r--r--deps/v8/src/perf-jit.cc7
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h64
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc79
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h281
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc93
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.h53
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc6
-rw-r--r--deps/v8/src/ppc/frame-constants-ppc.cc4
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc32
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc561
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h175
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc70
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc55
-rw-r--r--deps/v8/src/profiler/allocation-tracker.h16
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc4
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc25
-rw-r--r--deps/v8/src/profiler/heap-profiler.h6
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc149
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h10
-rw-r--r--deps/v8/src/profiler/profile-generator.cc82
-rw-r--r--deps/v8/src/profiler/profile-generator.h16
-rw-r--r--deps/v8/src/property-descriptor.cc29
-rw-r--r--deps/v8/src/property-descriptor.h3
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc22
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc68
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h2
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc22
-rw-r--r--deps/v8/src/regexp/interpreter-irregexp.cc8
-rw-r--r--deps/v8/src/regexp/jsregexp.cc69
-rw-r--r--deps/v8/src/regexp/jsregexp.h4
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc22
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc18
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc23
-rw-r--r--deps/v8/src/regexp/regexp-ast.cc4
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc64
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc11
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc26
-rw-r--r--deps/v8/src/regexp/regexp-stack.h2
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc23
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc27
-rw-r--r--deps/v8/src/register-configuration.cc14
-rw-r--r--deps/v8/src/runtime-profiler.cc101
-rw-r--r--deps/v8/src/runtime/runtime-array.cc47
-rw-r--r--deps/v8/src/runtime/runtime-bigint.cc70
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc116
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc73
-rw-r--r--deps/v8/src/runtime/runtime-forin.cc38
-rw-r--r--deps/v8/src/runtime/runtime-function.cc18
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc1
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc47
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc16
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc15
-rw-r--r--deps/v8/src/runtime/runtime-object.cc58
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc5
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc46
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc98
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc14
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc22
-rw-r--r--deps/v8/src/runtime/runtime-test.cc94
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc349
-rw-r--r--deps/v8/src/runtime/runtime-utils.h16
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc149
-rw-r--r--deps/v8/src/runtime/runtime.cc7
-rw-r--r--deps/v8/src/runtime/runtime.h74
-rw-r--r--deps/v8/src/s390/assembler-s390-inl.h89
-rw-r--r--deps/v8/src/s390/assembler-s390.cc46
-rw-r--r--deps/v8/src/s390/assembler-s390.h271
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc111
-rw-r--r--deps/v8/src/s390/code-stubs-s390.h68
-rw-r--r--deps/v8/src/s390/deoptimizer-s390.cc7
-rw-r--r--deps/v8/src/s390/frame-constants-s390.cc4
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc29
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc641
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h188
-rw-r--r--deps/v8/src/s390/simulator-s390.cc74
-rw-r--r--deps/v8/src/safepoint-table.h2
-rw-r--r--deps/v8/src/setup-isolate-deserialize.cc13
-rw-r--r--deps/v8/src/setup-isolate-full.cc19
-rw-r--r--deps/v8/src/setup-isolate.h15
-rw-r--r--deps/v8/src/small-pointer-list.h176
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer.cc243
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer.h89
-rw-r--r--deps/v8/src/snapshot/builtin-serializer.cc90
-rw-r--r--deps/v8/src/snapshot/builtin-serializer.h47
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc68
-rw-r--r--deps/v8/src/snapshot/code-serializer.h5
-rw-r--r--deps/v8/src/snapshot/default-serializer-allocator.cc153
-rw-r--r--deps/v8/src/snapshot/default-serializer-allocator.h74
-rw-r--r--deps/v8/src/snapshot/deserializer.cc149
-rw-r--r--deps/v8/src/snapshot/deserializer.h27
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc16
-rw-r--r--deps/v8/src/snapshot/partial-deserializer.cc1
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc31
-rw-r--r--deps/v8/src/snapshot/partial-serializer.h2
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc2
-rw-r--r--deps/v8/src/snapshot/serializer-common.h22
-rw-r--r--deps/v8/src/snapshot/serializer.cc459
-rw-r--r--deps/v8/src/snapshot/serializer.h131
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc155
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.h3
-rw-r--r--deps/v8/src/snapshot/snapshot.h79
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc21
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.h7
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc52
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h8
-rw-r--r--deps/v8/src/splay-tree-inl.h14
-rw-r--r--deps/v8/src/transitions-inl.h30
-rw-r--r--deps/v8/src/transitions.cc101
-rw-r--r--deps/v8/src/transitions.h3
-rw-r--r--deps/v8/src/trap-handler/handler-inside.cc78
-rw-r--r--deps/v8/src/trap-handler/handler-outside.cc109
-rw-r--r--deps/v8/src/trap-handler/handler-shared.cc1
-rw-r--r--deps/v8/src/trap-handler/trap-handler-internal.h8
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h9
-rw-r--r--deps/v8/src/type-hints.cc14
-rw-r--r--deps/v8/src/type-hints.h10
-rw-r--r--deps/v8/src/unicode-cache-inl.h8
-rw-r--r--deps/v8/src/unicode-cache.h1
-rw-r--r--deps/v8/src/unicode.cc157
-rw-r--r--deps/v8/src/unicode.h14
-rw-r--r--deps/v8/src/uri.cc75
-rw-r--r--deps/v8/src/utils.h48
-rw-r--r--deps/v8/src/v8.cc2
-rw-r--r--deps/v8/src/v8.gyp61
-rw-r--r--deps/v8/src/value-serializer.cc7
-rw-r--r--deps/v8/src/version.cc15
-rw-r--r--deps/v8/src/wasm/compilation-manager.cc29
-rw-r--r--deps/v8/src/wasm/compilation-manager.h12
-rw-r--r--deps/v8/src/wasm/decoder.h27
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h707
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc400
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h12
-rw-r--r--deps/v8/src/wasm/local-decl-encoder.cc20
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc49
-rw-r--r--deps/v8/src/wasm/memory-tracing.h28
-rw-r--r--deps/v8/src/wasm/module-compiler.cc1441
-rw-r--r--deps/v8/src/wasm/module-compiler.h429
-rw-r--r--deps/v8/src/wasm/module-decoder.cc158
-rw-r--r--deps/v8/src/wasm/module-decoder.h39
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc234
-rw-r--r--deps/v8/src/wasm/streaming-decoder.h135
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.cc118
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.h10
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc102
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc5
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h3
-rw-r--r--deps/v8/src/wasm/wasm-heap.cc101
-rw-r--r--deps/v8/src/wasm/wasm-heap.h66
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc156
-rw-r--r--deps/v8/src/wasm/wasm-js.cc140
-rw-r--r--deps/v8/src/wasm/wasm-js.h3
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc134
-rw-r--r--deps/v8/src/wasm/wasm-memory.h32
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc14
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h3
-rw-r--r--deps/v8/src/wasm/wasm-module.cc865
-rw-r--r--deps/v8/src/wasm/wasm-module.h195
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h210
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc382
-rw-r--r--deps/v8/src/wasm/wasm-objects.h253
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc38
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h40
-rw-r--r--deps/v8/src/wasm/wasm-result.h9
-rw-r--r--deps/v8/src/wasm/wasm-text.cc46
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h18
-rw-r--r--deps/v8/src/x64/assembler-x64.cc44
-rw-r--r--deps/v8/src/x64/assembler-x64.h266
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc161
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h99
-rw-r--r--deps/v8/src/x64/codegen-x64.cc6
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc14
-rw-r--r--deps/v8/src/x64/eh-frame-x64.cc6
-rw-r--r--deps/v8/src/x64/frame-constants-x64.cc4
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc21
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc692
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h235
-rw-r--r--deps/v8/src/zone/zone-list-inl.h164
-rw-r--r--deps/v8/src/zone/zone.h154
-rw-r--r--deps/v8/test/cctest/BUILD.gn67
-rw-r--r--deps/v8/test/cctest/cctest.gyp7
-rw-r--r--deps/v8/test/cctest/cctest.h7
-rw-r--r--deps/v8/test/cctest/cctest.status14
-rw-r--r--deps/v8/test/cctest/compiler/code-assembler-tester.h15
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h6
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.cc15
-rw-r--r--deps/v8/test/cctest/compiler/graph-builder-tester.h2
-rw-r--r--deps/v8/test/cctest/compiler/test-code-assembler.cc27
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc452
-rw-r--r--deps/v8/test/cctest/compiler/test-instruction.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-js-constant-cache.cc9
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc8
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc30
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc21
-rw-r--r--deps/v8/test/cctest/compiler/test-run-intrinsics.cc10
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsobjects.cc7
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc11
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc9
-rw-r--r--deps/v8/test/cctest/compiler/test-run-stubs.cc9
-rw-r--r--deps/v8/test/cctest/compiler/test-run-wasm-machops.cc72
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.h23
-rw-r--r--deps/v8/test/cctest/ffi/OWNERS4
-rw-r--r--deps/v8/test/cctest/ffi/test-ffi.cc222
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc3
-rw-r--r--deps/v8/test/cctest/heap/test-alloc.cc6
-rw-r--r--deps/v8/test/cctest/heap/test-array-buffer-tracker.cc1
-rw-r--r--deps/v8/test/cctest/heap/test-compaction.cc6
-rw-r--r--deps/v8/test/cctest/heap/test-concurrent-marking.cc40
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc189
-rw-r--r--deps/v8/test/cctest/heap/test-invalidated-slots.cc2
-rw-r--r--deps/v8/test/cctest/heap/test-lab.cc7
-rw-r--r--deps/v8/test/cctest/heap/test-mark-compact.cc25
-rw-r--r--deps/v8/test/cctest/heap/test-page-promotion.cc2
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc7
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc13
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden114
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden32
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden46
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden274
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden40
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden166
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden74
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden122
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden30
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden1046
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden130
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden27
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden99
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden9
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.cc2
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.h14
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc23
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc12
-rw-r--r--deps/v8/test/cctest/interpreter/test-source-positions.cc1
-rw-r--r--deps/v8/test/cctest/parsing/test-preparser.cc46
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner-streams.cc83
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner.cc8
-rw-r--r--deps/v8/test/cctest/setup-isolate-for-tests.cc16
-rw-r--r--deps/v8/test/cctest/setup-isolate-for-tests.h10
-rw-r--r--deps/v8/test/cctest/test-accessor-assembler.cc10
-rw-r--r--deps/v8/test/cctest/test-allocation.cc6
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc291
-rw-r--r--deps/v8/test/cctest/test-api.cc155
-rw-r--r--deps/v8/test/cctest/test-array-list.cc17
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc174
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc126
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc92
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc2248
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc2272
-rw-r--r--deps/v8/test/cctest/test-assembler-ppc.cc39
-rw-r--r--deps/v8/test/cctest/test-assembler-s390.cc38
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc68
-rw-r--r--deps/v8/test/cctest/test-atomicops.cc8
-rw-r--r--deps/v8/test/cctest/test-bignum-dtoa.cc2
-rw-r--r--deps/v8/test/cctest/test-bignum.cc2
-rw-r--r--deps/v8/test/cctest/test-code-cache.cc83
-rw-r--r--deps/v8/test/cctest/test-code-layout.cc11
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc272
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm.cc17
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm64.cc6
-rw-r--r--deps/v8/test/cctest/test-code-stubs-ia32.cc16
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips.cc13
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips64.cc13
-rw-r--r--deps/v8/test/cctest/test-code-stubs-x64.cc18
-rw-r--r--deps/v8/test/cctest/test-code-stubs.cc7
-rw-r--r--deps/v8/test/cctest/test-code-stubs.h6
-rw-r--r--deps/v8/test/cctest/test-compiler.cc69
-rw-r--r--deps/v8/test/cctest/test-conversions.cc9
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc75
-rw-r--r--deps/v8/test/cctest/test-debug.cc111
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc6
-rw-r--r--deps/v8/test/cctest/test-disasm-arm64.cc6
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc11
-rw-r--r--deps/v8/test/cctest/test-disasm-mips.cc6
-rw-r--r--deps/v8/test/cctest/test-disasm-mips64.cc6
-rw-r--r--deps/v8/test/cctest/test-disasm-ppc.cc7
-rw-r--r--deps/v8/test/cctest/test-disasm-s390.cc6
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc4
-rw-r--r--deps/v8/test/cctest/test-dtoa.cc2
-rw-r--r--deps/v8/test/cctest/test-elements-kind.cc2
-rw-r--r--deps/v8/test/cctest/test-fast-dtoa.cc2
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc49
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc40
-rw-r--r--deps/v8/test/cctest/test-fuzz-arm64.cc6
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc37
-rw-r--r--deps/v8/test/cctest/test-hashmap.cc10
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc82
-rw-r--r--deps/v8/test/cctest/test-identity-map.cc11
-rw-r--r--deps/v8/test/cctest/test-inobject-slack-tracking.cc65
-rw-r--r--deps/v8/test/cctest/test-javascript-arm64.cc41
-rw-r--r--deps/v8/test/cctest/test-js-arm64-variables.cc29
-rw-r--r--deps/v8/test/cctest/test-list.cc171
-rw-r--r--deps/v8/test/cctest/test-lockers.cc270
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc20
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc74
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc92
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc222
-rw-r--r--deps/v8/test/cctest/test-managed.cc4
-rw-r--r--deps/v8/test/cctest/test-mementos.cc6
-rw-r--r--deps/v8/test/cctest/test-object.cc179
-rw-r--r--deps/v8/test/cctest/test-orderedhashtable.cc2
-rw-r--r--deps/v8/test/cctest/test-parsing.cc207
-rw-r--r--deps/v8/test/cctest/test-platform-linux.cc30
-rw-r--r--deps/v8/test/cctest/test-platform-win32.cc34
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc18
-rw-r--r--deps/v8/test/cctest/test-regexp.cc5
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-arm.cc84
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc86
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc93
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-x64.cc74
-rw-r--r--deps/v8/test/cctest/test-serialize.cc317
-rw-r--r--deps/v8/test/cctest/test-simulator-arm.cc56
-rw-r--r--deps/v8/test/cctest/test-simulator-arm64.cc55
-rw-r--r--deps/v8/test/cctest/test-strings.cc25
-rw-r--r--deps/v8/test/cctest/test-strtod.cc2
-rw-r--r--deps/v8/test/cctest/test-symbols.cc9
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc26
-rw-r--r--deps/v8/test/cctest/test-transitions.cc2
-rw-r--r--deps/v8/test/cctest/test-types.cc10
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc9
-rw-r--r--deps/v8/test/cctest/test-usecounters.cc9
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.cc2
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.h6
-rw-r--r--deps/v8/test/cctest/test-version.cc23
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc12
-rw-r--r--deps/v8/test/cctest/test-weaksets.cc10
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc90
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc112
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc187
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc7
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-js.cc282
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc132
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc18
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc935
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc82
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc820
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc12
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc10
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc127
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-trap-position.cc26
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc522
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h593
-rw-r--r--deps/v8/test/common/wasm/flag-utils.h6
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h17
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc78
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.h15
-rw-r--r--deps/v8/test/debugger/debug/debug-break-microtask.js52
-rw-r--r--deps/v8/test/debugger/debug/debug-compile-optimized.js15
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-ops.js2
-rw-r--r--deps/v8/test/debugger/debug/debug-liveedit-inline.js30
-rw-r--r--deps/v8/test/debugger/debug/debug-step-microtask.js52
-rw-r--r--deps/v8/test/debugger/debug/es8/promise-finally.js46
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-222893.js26
-rw-r--r--deps/v8/test/debugger/debugger.status6
-rw-r--r--deps/v8/test/fuzzer/README.md2
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.cc5
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.h2
-rw-r--r--deps/v8/test/fuzzer/fuzzer.cc4
-rw-r--r--deps/v8/test/fuzzer/fuzzer.gyp4
-rw-r--r--deps/v8/test/fuzzer/parser.cc6
-rw-r--r--deps/v8/test/fuzzer/wasm-async.cc93
-rw-r--r--deps/v8/test/fuzzer/wasm-call.cc49
-rw-r--r--deps/v8/test/fuzzer/wasm-code.cc25
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc36
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc72
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.h10
-rw-r--r--deps/v8/test/fuzzer/wasm.cc27
-rw-r--r--deps/v8/test/fuzzer/wasm_async/regression-761784.wasmbin0 -> 103 bytes
-rw-r--r--deps/v8/test/inspector/cpu-profiler/coverage.js2
-rw-r--r--deps/v8/test/inspector/debugger/breakpoints-expected.txt66
-rw-r--r--deps/v8/test/inspector/debugger/breakpoints.js117
-rw-r--r--deps/v8/test/inspector/debugger/call-frame-url-expected.txt15
-rw-r--r--deps/v8/test/inspector/debugger/call-frame-url.js20
-rw-r--r--deps/v8/test/inspector/debugger/caught-uncaught-exceptions-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/caught-uncaught-exceptions.js6
-rw-r--r--deps/v8/test/inspector/debugger/provisional-breakpoint-for-anonymous-script-expected.txt11
-rw-r--r--deps/v8/test/inspector/debugger/provisional-breakpoint-for-anonymous-script.js69
-rw-r--r--deps/v8/test/inspector/debugger/restore-breakpoint.js24
-rw-r--r--deps/v8/test/inspector/debugger/suspended-generator-scopes-expected.txt106
-rw-r--r--deps/v8/test/inspector/debugger/suspended-generator-scopes.js116
-rw-r--r--deps/v8/test/inspector/inspector.isolate1
-rw-r--r--deps/v8/test/inspector/isolate-data.cc2
-rw-r--r--deps/v8/test/inspector/protocol-test.js23
-rw-r--r--deps/v8/test/inspector/runtime/call-function-on-async-expected.txt39
-rw-r--r--deps/v8/test/inspector/runtime/call-function-on-async.js38
-rw-r--r--deps/v8/test/inspector/runtime/command-line-api-expected.txt4
-rw-r--r--deps/v8/test/inspector/runtime/console-methods-expected.txt103
-rw-r--r--deps/v8/test/inspector/runtime/console-methods.js2
-rw-r--r--deps/v8/test/inspector/runtime/es6-module-expected.txt1
-rw-r--r--deps/v8/test/inspector/runtime/function-scopes-expected.txt53
-rw-r--r--deps/v8/test/inspector/runtime/function-scopes.js56
-rw-r--r--deps/v8/test/inspector/runtime/internal-properties-expected.txt20
-rw-r--r--deps/v8/test/inspector/sessions/debugger-stepping-and-breakpoints-expected.txt16
-rw-r--r--deps/v8/test/inspector/task-runner.cc2
-rw-r--r--deps/v8/test/inspector/type-profiler/type-profile-disable-expected.txt9
-rw-r--r--deps/v8/test/inspector/type-profiler/type-profile-disable.js47
-rw-r--r--deps/v8/test/inspector/type-profiler/type-profile-expected.txt8
-rw-r--r--deps/v8/test/inspector/type-profiler/type-profile-start-stop-expected.txt49
-rw-r--r--deps/v8/test/inspector/type-profiler/type-profile-start-stop.js170
-rw-r--r--deps/v8/test/inspector/type-profiler/type-profile-with-classes-expected.txt15
-rw-r--r--deps/v8/test/inspector/type-profiler/type-profile-with-classes.js43
-rw-r--r--deps/v8/test/inspector/type-profiler/type-profile-with-to-string-tag-expected.txt16
-rw-r--r--deps/v8/test/inspector/type-profiler/type-profile-with-to-string-tag.js46
-rw-r--r--deps/v8/test/inspector/type-profiler/type-profile.js39
-rw-r--r--deps/v8/test/js-perf-test/Array/filter.js2
-rw-r--r--deps/v8/test/js-perf-test/Array/join.js2
-rw-r--r--deps/v8/test/js-perf-test/Array/map.js2
-rw-r--r--deps/v8/test/js-perf-test/Array/to-string.js2
-rw-r--r--deps/v8/test/js-perf-test/JSTests.json50
-rw-r--r--deps/v8/test/js-perf-test/Parsing/comments.js41
-rw-r--r--deps/v8/test/js-perf-test/Parsing/run.js27
-rw-r--r--deps/v8/test/js-perf-test/Proxies/proxies.js40
-rw-r--r--deps/v8/test/memory/Memory.json4
-rw-r--r--deps/v8/test/message/regress/regress-crbug-691194.out2
-rw-r--r--deps/v8/test/message/settimeout.js26
-rw-r--r--deps/v8/test/message/settimeout.out13
-rw-r--r--deps/v8/test/message/typedarray.out6
-rw-r--r--deps/v8/test/message/wasm-trace-memory-interpreted.js7
-rw-r--r--deps/v8/test/message/wasm-trace-memory-interpreted.out9
-rw-r--r--deps/v8/test/message/wasm-trace-memory.js37
-rw-r--r--deps/v8/test/message/wasm-trace-memory.out9
-rw-r--r--deps/v8/test/mjsunit/array-constructor-feedback.js7
-rw-r--r--deps/v8/test/mjsunit/array-reduce.js20
-rw-r--r--deps/v8/test/mjsunit/asm/math-max.js78
-rw-r--r--deps/v8/test/mjsunit/asm/math-min.js78
-rw-r--r--deps/v8/test/mjsunit/asm/noexpose-wasm.js37
-rw-r--r--deps/v8/test/mjsunit/code-coverage-precise.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/array-push-1.js239
-rw-r--r--deps/v8/test/mjsunit/compiler/array-push-2.js65
-rw-r--r--deps/v8/test/mjsunit/compiler/array-push-3.js51
-rw-r--r--deps/v8/test/mjsunit/compiler/array-subclass.js396
-rw-r--r--deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js35
-rw-r--r--deps/v8/test/mjsunit/compiler/constant-fold-tostring.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/constructor-inlining.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/dead-string-char-code-at.js81
-rw-r--r--deps/v8/test/mjsunit/compiler/dead-string-char-code-at2.js81
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-closure.js47
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-simple-try-catch.js28
-rw-r--r--deps/v8/test/mjsunit/compiler/for-in-1.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/for-in-2.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/for-in-3.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/for-in-4.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/for-in-5.js19
-rw-r--r--deps/v8/test/mjsunit/compiler/function-caller.js25
-rw-r--r--deps/v8/test/mjsunit/compiler/object-constructor.js51
-rw-r--r--deps/v8/test/mjsunit/compiler/object-is.js143
-rw-r--r--deps/v8/test/mjsunit/compiler/polymorphic-symbols.js48
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-700883.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-758096.js54
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-761892.js15
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-762057.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-780658.js29
-rw-r--r--deps/v8/test/mjsunit/compiler/stress-deopt-count-1.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/stress-deopt-count-2.js48
-rw-r--r--deps/v8/test/mjsunit/compiler/typedarray-prototype-tostringtag.js84
-rw-r--r--deps/v8/test/mjsunit/es6/new-target.js9
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-get.js11
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-set.js98
-rw-r--r--deps/v8/test/mjsunit/es6/proxies.js13
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-777182.js9
-rw-r--r--deps/v8/test/mjsunit/es6/string-html.js3
-rw-r--r--deps/v8/test/mjsunit/es6/string-repeat.js5
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-set-bytelength-not-smi.js21
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray.js108
-rw-r--r--deps/v8/test/mjsunit/function-bind.js52
-rw-r--r--deps/v8/test/mjsunit/harmony/array-sort-comparefn.js38
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint.js355
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-large.js1120
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-large1.js2204
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-large2.js1104
-rw-r--r--deps/v8/test/mjsunit/harmony/promise-prototype-finally.js81
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-named-captures.js37
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-771470.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-772649.js11
-rw-r--r--deps/v8/test/mjsunit/messages.js21
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js14
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status7
-rw-r--r--deps/v8/test/mjsunit/optimized-foreach-polymorph.js111
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2435.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6838-1.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6838-2.js101
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6838-3.js39
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6907.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-719380.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-760268.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-760790.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-761639.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-772190.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-774475.js938
-rw-r--r--deps/v8/test/mjsunit/regress/regress-781218.js43
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-537444.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-593697-2.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-598998.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-608278.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-647217.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-714696.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-722871.js113
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-764219.js35
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-768080.js64
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-768367.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-769852.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-770543.js31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-770581.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-771971.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-772056.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-772610.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-772672.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-772689.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-772720.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-772897.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-774994.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-783132.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-6940.js9
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-775710.js20
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-648079.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-702460.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-763697.js15
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-769846.js15
-rw-r--r--deps/v8/test/mjsunit/skipping-inner-functions-bailout.js6
-rw-r--r--deps/v8/test/mjsunit/skipping-inner-functions.js70
-rw-r--r--deps/v8/test/mjsunit/string-charcodeat.js3
-rw-r--r--deps/v8/test/mjsunit/third_party/regexp-pcre/regexp-pcre.js2
-rw-r--r--deps/v8/test/mjsunit/type-profile/collect-type-profile.js90
-rw-r--r--deps/v8/test/mjsunit/unbox-double-arrays.js14
-rw-r--r--deps/v8/test/mjsunit/value-of.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-with-wasm-off.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics.js218
-rw-r--r--deps/v8/test/mjsunit/wasm/disable-trap-handler.js9
-rw-r--r--deps/v8/test/mjsunit/wasm/disallow-codegen.js104
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions.js334
-rw-r--r--deps/v8/test/mjsunit/wasm/expose-wasm.js7
-rw-r--r--deps/v8/test/mjsunit/wasm/js-api.js84
-rw-r--r--deps/v8/test/mjsunit/wasm/memory-external-call.js149
-rw-r--r--deps/v8/test/mjsunit/wasm/module-memory.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/shared-memory.js51
-rw-r--r--deps/v8/test/mjsunit/wasm/stack.js29
-rw-r--r--deps/v8/test/mjsunit/wasm/streaming-compile.js7
-rw-r--r--deps/v8/test/mjsunit/wasm/streaming-error-position.js374
-rw-r--r--deps/v8/test/mjsunit/wasm/streaming-trap-location.js7
-rw-r--r--deps/v8/test/mjsunit/wasm/trap-location.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/unreachable-validation.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-constants.js28
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js29
-rw-r--r--deps/v8/test/mozilla/mozilla.status4
-rw-r--r--deps/v8/test/test262/local-tests/test/intl402/NumberFormat/prototype/formatToParts/default-parameter.js30
-rw-r--r--deps/v8/test/test262/test262.status103
-rw-r--r--deps/v8/test/test262/testcfg.py9
-rw-r--r--deps/v8/test/unittests/BUILD.gn46
-rw-r--r--deps/v8/test/unittests/api/remote-object-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/logging-unittest.cc150
-rw-r--r--deps/v8/test/unittests/base/template-utils-unittest.cc22
-rw-r--r--deps/v8/test/unittests/code-stub-assembler-unittest.cc72
-rw-r--r--deps/v8/test/unittests/code-stub-assembler-unittest.h35
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/code-assembler-unittest.cc254
-rw-r--r--deps/v8/test/unittests/compiler/code-assembler-unittest.h37
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/diamond-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/escape-analysis-unittest.cc523
-rw-r--r--deps/v8/test/unittests/compiler/graph-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc12
-rw-r--r--deps/v8/test/unittests/compiler/instruction-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc22
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc9
-rw-r--r--deps/v8/test/unittests/compiler/js-operator-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/node-cache-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/node-properties-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc313
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h79
-rw-r--r--deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/typed-optimization-unittest.cc2
-rw-r--r--deps/v8/test/unittests/detachable-vector-unittest.cc67
-rw-r--r--deps/v8/test/unittests/eh-frame-iterator-unittest.cc8
-rw-r--r--deps/v8/test/unittests/eh-frame-writer-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/barrier-unittest.cc145
-rw-r--r--deps/v8/test/unittests/heap/heap-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/marking-unittest.cc6
-rw-r--r--deps/v8/test/unittests/heap/worklist-unittest.cc26
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc31
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc227
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc24
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc4
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc10
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc386
-rw-r--r--deps/v8/test/unittests/libplatform/default-platform-unittest.cc6
-rw-r--r--deps/v8/test/unittests/libplatform/task-queue-unittest.cc2
-rw-r--r--deps/v8/test/unittests/libplatform/worker-thread-unittest.cc36
-rw-r--r--deps/v8/test/unittests/test-utils.cc5
-rw-r--r--deps/v8/test/unittests/test-utils.h5
-rw-r--r--deps/v8/test/unittests/unicode-unittest.cc403
-rw-r--r--deps/v8/test/unittests/unittests.gyp10
-rw-r--r--deps/v8/test/unittests/wasm/control-transfer-unittest.cc7
-rw-r--r--deps/v8/test/unittests/wasm/decoder-unittest.cc5
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc124
-rw-r--r--deps/v8/test/unittests/wasm/leb-helper-unittest.cc2
-rw-r--r--deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc6
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc54
-rw-r--r--deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc211
-rw-r--r--deps/v8/test/unittests/wasm/wasm-heap-unittest.cc157
-rw-r--r--deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc3
-rw-r--r--deps/v8/test/unittests/wasm/wasm-opcodes-unittest.cc1
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.status1
-rw-r--r--deps/v8/test/webkit/js-continue-break-restrictions-expected.txt6
-rw-r--r--deps/v8/test/webkit/webkit.status4
-rw-r--r--deps/v8/third_party/eu-strip/README.v824
-rwxr-xr-xdeps/v8/third_party/eu-strip/bin/eu-stripbin0 -> 105120 bytes
-rwxr-xr-xdeps/v8/tools/check-inline-includes.sh21
-rwxr-xr-xdeps/v8/tools/dev/gm.py10
-rwxr-xr-xdeps/v8/tools/grokdump.py93
-rw-r--r--deps/v8/tools/luci-go/linux64/isolate.sha11
-rw-r--r--deps/v8/tools/luci-go/mac64/isolate.sha11
-rw-r--r--deps/v8/tools/luci-go/win64/isolate.exe.sha11
-rwxr-xr-xdeps/v8/tools/perf-to-html.py32
-rwxr-xr-xdeps/v8/tools/presubmit.py7
-rwxr-xr-xdeps/v8/tools/run-tests.py16
-rw-r--r--deps/v8/tools/testrunner/local/variants.py10
-rw-r--r--deps/v8/tools/v8heapconst.py515
-rwxr-xr-xdeps/v8/tools/wasm/update-wasm-spec-tests.sh2
-rw-r--r--deps/v8/tools/whitespace.txt4
1209 files changed, 68264 insertions, 45166 deletions
diff --git a/deps/v8/.git-blame-ignore-revs b/deps/v8/.git-blame-ignore-revs
new file mode 100644
index 0000000000..58d0039ab9
--- /dev/null
+++ b/deps/v8/.git-blame-ignore-revs
@@ -0,0 +1,22 @@
+# git hyper-blame master ignore list.
+#
+# This file contains a list of git hashes of revisions to be ignored by git
+# hyper-blame (in depot_tools). These revisions are considered "unimportant" in
+# that they are unlikely to be what you are interested in when blaming.
+#
+# Instructions:
+# - Only large (generally automated) reformatting or renaming CLs should be
+# added to this list. Do not put things here just because you feel they are
+# trivial or unimportant. If in doubt, do not put it on this list.
+# - Precede each revision with a comment containing the first line of its log.
+# For bulk work over many commits, place all commits in a block with a single
+# comment at the top describing the work done in those commits.
+# - Only put full 40-character hashes on this list (not short hashes or any
+# other revision reference).
+# - Append to the bottom of the file (revisions should be in chronological order
+# from oldest to newest).
+# - Because you must use a hash, you need to append to this list in a follow-up
+# CL to the actual reformatting CL that you are trying to ignore.
+
+# Update of quotations in DEPS file.
+e50b49a0e38b34e2b28e026f4d1c7e0da0c7bb1a
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index cf45930889..6861c70994 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -68,6 +68,7 @@
!/testing/gtest/include/gtest/gtest_prod.h
/third_party/*
!/third_party/binutils
+!/third_party/eu-strip
!/third_party/inspector_protocol
/tools/clang
/tools/gcmole/gcmole-tools
@@ -75,9 +76,7 @@
/tools/gyp
/tools/jsfunfuzz/jsfunfuzz
/tools/jsfunfuzz/jsfunfuzz.tar.gz
-/tools/luci-go/linux64/isolate
-/tools/luci-go/mac64/isolate
-/tools/luci-go/win64/isolate.exe
+/tools/luci-go
/tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o
/tools/swarming_client
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index c54cad02b2..be50e6e499 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -30,6 +30,7 @@ Yandex LLC <*@yandex-team.ru>
StrongLoop, Inc. <*@strongloop.com>
Facebook, Inc. <*@fb.com>
Facebook, Inc. <*@oculus.com>
+Vewd Software AS <*@vewd.com>
Aaron Bieber <deftly@gmail.com>
Abdulla Kamar <abdulla.kamar@gmail.com>
@@ -42,7 +43,7 @@ Alexis Campailla <alexis@janeasystems.com>
Andreas Anyuru <andreas.anyuru@gmail.com>
Andrew Paprocki <andrew@ishiboo.com>
Andrei Kashcha <anvaka@gmail.com>
-Anna Henningsen <addaleax@gmail.com>
+Anna Henningsen <anna@addaleax.net>
Bangfu Tao <bangfu.tao@samsung.com>
Ben Noordhuis <info@bnoordhuis.nl>
Benjamin Tan <demoneaux@gmail.com>
@@ -135,4 +136,4 @@ Wiktor Garbacz <wiktor.garbacz@gmail.com>
Yu Yin <xwafish@gmail.com>
Zac Hansen <xaxxon@gmail.com>
Zhongping Wang <kewpie.w.zp@gmail.com>
-柳荣一 <admin@web-tinker.com> \ No newline at end of file
+柳荣一 <admin@web-tinker.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 4d3fcd9648..daed449c0a 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -87,7 +87,7 @@ declare_args() {
v8_enable_concurrent_marking = false
# Sets -dV8_CSA_WRITE_BARRIER
- v8_enable_csa_write_barrier = false
+ v8_enable_csa_write_barrier = true
# Build the snapshot with unwinding information for perf.
# Sets -dV8_USE_SNAPSHOT_WITH_UNWINDING_INFO.
@@ -110,6 +110,9 @@ declare_args() {
# Similar to the ARM hard float ABI but on MIPS.
v8_use_mips_abi_hardfloat = true
+ # Controls the threshold for on-heap/off-heap Typed Arrays.
+ v8_typed_array_max_size_in_heap = 64
+
# List of extra files to snapshot. They will be snapshotted in order so
# if files export symbols used by later files, they should go first.
#
@@ -205,8 +208,12 @@ config("libsampler_config") {
# This config should only be applied to code using V8 and not any V8 code
# itself.
config("external_config") {
+ defines = []
if (is_component_build) {
- defines = [ "USING_V8_SHARED" ]
+ defines += [ "USING_V8_SHARED" ]
+ }
+ if (v8_enable_v8_checks) {
+ defines += [ "V8_ENABLE_CHECKS" ] # Used in "include/v8.h".
}
include_dirs = [
"include",
@@ -237,6 +244,8 @@ config("features") {
defines +=
[ "V8_PROMISE_INTERNAL_FIELD_COUNT=${v8_promise_internal_field_count}" ]
}
+ defines +=
+ [ "V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP=${v8_typed_array_max_size_in_heap}" ]
if (v8_enable_future) {
defines += [ "V8_ENABLE_FUTURE" ]
}
@@ -557,14 +566,12 @@ action("js2c") {
"src/js/macros.py",
"src/messages.h",
"src/js/prologue.js",
- "src/js/max-min.js",
"src/js/v8natives.js",
"src/js/array.js",
"src/js/string.js",
"src/js/typedarray.js",
"src/js/weak-collection.js",
"src/js/messages.js",
- "src/js/templates.js",
"src/js/spread.js",
"src/js/proxy.js",
"src/debug/mirrors.js",
@@ -859,7 +866,7 @@ source_set("v8_maybe_snapshot") {
} else {
# Ignore v8_use_external_startup_data setting if no snapshot is used.
public_deps = [
- ":v8_builtins_setup",
+ ":v8_init",
":v8_nosnapshot",
]
}
@@ -959,7 +966,7 @@ if (v8_use_external_startup_data) {
}
}
-v8_source_set("v8_builtins_generators") {
+v8_source_set("v8_initializers") {
visibility = [
":*",
"test/cctest:*",
@@ -990,8 +997,6 @@ v8_source_set("v8_builtins_generators") {
"src/builtins/builtins-conversion-gen.cc",
"src/builtins/builtins-date-gen.cc",
"src/builtins/builtins-debug-gen.cc",
- "src/builtins/builtins-forin-gen.cc",
- "src/builtins/builtins-forin-gen.h",
"src/builtins/builtins-function-gen.cc",
"src/builtins/builtins-generator-gen.cc",
"src/builtins/builtins-global-gen.cc",
@@ -1009,8 +1014,6 @@ v8_source_set("v8_builtins_generators") {
"src/builtins/builtins-promise-gen.h",
"src/builtins/builtins-proxy-gen.cc",
"src/builtins/builtins-proxy-gen.h",
- "src/builtins/builtins-proxy-helpers-gen.cc",
- "src/builtins/builtins-proxy-helpers-gen.h",
"src/builtins/builtins-regexp-gen.cc",
"src/builtins/builtins-regexp-gen.h",
"src/builtins/builtins-sharedarraybuffer-gen.cc",
@@ -1021,6 +1024,7 @@ v8_source_set("v8_builtins_generators") {
"src/builtins/builtins-utils-gen.h",
"src/builtins/builtins-wasm-gen.cc",
"src/builtins/setup-builtins-internal.cc",
+ "src/heap/setup-heap-internal.cc",
"src/ic/accessor-assembler.cc",
"src/ic/accessor-assembler.h",
"src/ic/binary-op-assembler.cc",
@@ -1042,6 +1046,10 @@ v8_source_set("v8_builtins_generators") {
# TODO(mostynb@opera.com): don't exclude these http://crbug.com/752428
"src/builtins/builtins-async-iterator-gen.cc",
"src/builtins/builtins-async-generator-gen.cc",
+
+ # This source file takes an unusually large amount of time to
+ # compile. Build it separately to avoid bottlenecks.
+ "src/builtins/builtins-regexp-gen.cc",
]
}
@@ -1094,11 +1102,11 @@ v8_source_set("v8_builtins_generators") {
configs = [ ":internal_config" ]
}
-v8_source_set("v8_builtins_setup") {
+v8_source_set("v8_init") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
- ":v8_builtins_generators",
+ ":v8_initializers",
]
sources = [
@@ -1229,6 +1237,7 @@ v8_source_set("v8_base") {
"src/builtins/builtins-api.cc",
"src/builtins/builtins-array.cc",
"src/builtins/builtins-arraybuffer.cc",
+ "src/builtins/builtins-bigint.cc",
"src/builtins/builtins-boolean.cc",
"src/builtins/builtins-call.cc",
"src/builtins/builtins-callsite.cc",
@@ -1423,10 +1432,6 @@ v8_source_set("v8_base") {
"src/compiler/memory-optimizer.h",
"src/compiler/move-optimizer.cc",
"src/compiler/move-optimizer.h",
- "src/compiler/new-escape-analysis-reducer.cc",
- "src/compiler/new-escape-analysis-reducer.h",
- "src/compiler/new-escape-analysis.cc",
- "src/compiler/new-escape-analysis.h",
"src/compiler/node-aux-data.h",
"src/compiler/node-cache.cc",
"src/compiler/node-cache.h",
@@ -1528,6 +1533,8 @@ v8_source_set("v8_base") {
"src/debug/debug-scopes.h",
"src/debug/debug-stack-trace-iterator.cc",
"src/debug/debug-stack-trace-iterator.h",
+ "src/debug/debug-type-profile.cc",
+ "src/debug/debug-type-profile.h",
"src/debug/debug.cc",
"src/debug/debug.h",
"src/debug/interface-types.h",
@@ -1537,6 +1544,7 @@ v8_source_set("v8_base") {
"src/deoptimize-reason.h",
"src/deoptimizer.cc",
"src/deoptimizer.h",
+ "src/detachable-vector.h",
"src/disasm.h",
"src/disassembler.cc",
"src/disassembler.h",
@@ -1567,6 +1575,7 @@ v8_source_set("v8_base") {
"src/extensions/trigger-failure-extension.h",
"src/external-reference-table.cc",
"src/external-reference-table.h",
+ "src/factory-inl.h",
"src/factory.cc",
"src/factory.h",
"src/fast-dtoa.cc",
@@ -1574,13 +1583,10 @@ v8_source_set("v8_base") {
"src/feedback-vector-inl.h",
"src/feedback-vector.cc",
"src/feedback-vector.h",
- "src/ffi/ffi-compiler.cc",
- "src/ffi/ffi-compiler.h",
"src/field-index-inl.h",
"src/field-index.h",
"src/field-type.cc",
"src/field-type.h",
- "src/find-and-replace-pattern.h",
"src/fixed-dtoa.cc",
"src/fixed-dtoa.h",
"src/flag-definitions.h",
@@ -1604,6 +1610,7 @@ v8_source_set("v8_base") {
"src/heap/array-buffer-tracker-inl.h",
"src/heap/array-buffer-tracker.cc",
"src/heap/array-buffer-tracker.h",
+ "src/heap/barrier.h",
"src/heap/code-stats.cc",
"src/heap/code-stats.h",
"src/heap/concurrent-marking.cc",
@@ -1645,8 +1652,6 @@ v8_source_set("v8_base") {
"src/heap/scavenger-inl.h",
"src/heap/scavenger.cc",
"src/heap/scavenger.h",
- "src/heap/sequential-marking-deque.cc",
- "src/heap/sequential-marking-deque.h",
"src/heap/slot-set.h",
"src/heap/spaces-inl.h",
"src/heap/spaces.cc",
@@ -1737,8 +1742,6 @@ v8_source_set("v8_base") {
"src/layout-descriptor-inl.h",
"src/layout-descriptor.cc",
"src/layout-descriptor.h",
- "src/list-inl.h",
- "src/list.h",
"src/locked-queue-inl.h",
"src/locked-queue.h",
"src/log-inl.h",
@@ -1770,8 +1773,9 @@ v8_source_set("v8_base") {
"src/objects.h",
"src/objects/arguments-inl.h",
"src/objects/arguments.h",
- "src/objects/code-cache-inl.h",
- "src/objects/code-cache.h",
+ "src/objects/bigint-inl.h",
+ "src/objects/bigint.cc",
+ "src/objects/bigint.h",
"src/objects/compilation-cache-inl.h",
"src/objects/compilation-cache.h",
"src/objects/debug-objects-inl.h",
@@ -1796,6 +1800,8 @@ v8_source_set("v8_base") {
"src/objects/name.h",
"src/objects/object-macros-undef.h",
"src/objects/object-macros.h",
+ "src/objects/property-descriptor-object-inl.h",
+ "src/objects/property-descriptor-object.h",
"src/objects/regexp-match-info.h",
"src/objects/scope-info.cc",
"src/objects/scope-info.h",
@@ -1806,6 +1812,8 @@ v8_source_set("v8_base") {
"src/objects/string-inl.h",
"src/objects/string-table.h",
"src/objects/string.h",
+ "src/objects/template-objects.cc",
+ "src/objects/template-objects.h",
"src/ostreams.cc",
"src/ostreams.h",
"src/parsing/duplicate-finder.h",
@@ -1902,6 +1910,7 @@ v8_source_set("v8_base") {
"src/runtime-profiler.h",
"src/runtime/runtime-array.cc",
"src/runtime/runtime-atomics.cc",
+ "src/runtime/runtime-bigint.cc",
"src/runtime/runtime-classes.cc",
"src/runtime/runtime-collections.cc",
"src/runtime/runtime-compiler.cc",
@@ -1939,9 +1948,14 @@ v8_source_set("v8_base") {
"src/setup-isolate.h",
"src/signature.h",
"src/simulator.h",
- "src/small-pointer-list.h",
+ "src/snapshot/builtin-deserializer.cc",
+ "src/snapshot/builtin-deserializer.h",
+ "src/snapshot/builtin-serializer.cc",
+ "src/snapshot/builtin-serializer.h",
"src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
+ "src/snapshot/default-serializer-allocator.cc",
+ "src/snapshot/default-serializer-allocator.h",
"src/snapshot/deserializer.cc",
"src/snapshot/deserializer.h",
"src/snapshot/natives-common.cc",
@@ -2033,6 +2047,8 @@ v8_source_set("v8_base") {
"src/wasm/leb-helper.h",
"src/wasm/local-decl-encoder.cc",
"src/wasm/local-decl-encoder.h",
+ "src/wasm/memory-tracing.cc",
+ "src/wasm/memory-tracing.h",
"src/wasm/module-compiler.cc",
"src/wasm/module-compiler.h",
"src/wasm/module-decoder.cc",
@@ -2048,15 +2064,20 @@ v8_source_set("v8_base") {
"src/wasm/wasm-debug.cc",
"src/wasm/wasm-external-refs.cc",
"src/wasm/wasm-external-refs.h",
+ "src/wasm/wasm-heap.cc",
+ "src/wasm/wasm-heap.h",
"src/wasm/wasm-interpreter.cc",
"src/wasm/wasm-interpreter.h",
"src/wasm/wasm-js.cc",
"src/wasm/wasm-js.h",
"src/wasm/wasm-limits.h",
+ "src/wasm/wasm-memory.cc",
+ "src/wasm/wasm-memory.h",
"src/wasm/wasm-module-builder.cc",
"src/wasm/wasm-module-builder.h",
"src/wasm/wasm-module.cc",
"src/wasm/wasm-module.h",
+ "src/wasm/wasm-objects-inl.h",
"src/wasm/wasm-objects.cc",
"src/wasm/wasm-objects.h",
"src/wasm/wasm-opcodes.cc",
@@ -2073,6 +2094,7 @@ v8_source_set("v8_base") {
"src/zone/zone-chunk-list.h",
"src/zone/zone-containers.h",
"src/zone/zone-handle-set.h",
+ "src/zone/zone-list-inl.h",
"src/zone/zone-segment.cc",
"src/zone/zone-segment.h",
"src/zone/zone.cc",
@@ -2083,8 +2105,15 @@ v8_source_set("v8_base") {
jumbo_excluded_sources = [
# TODO(mostynb@opera.com): don't exclude these http://crbug.com/752428
"src/profiler/heap-snapshot-generator.cc", # Macro clash in mman-linux.h
- "src/compiler/escape-analysis.cc", # Symbol clashes with new-escape-analysis.cc
- "src/compiler/escape-analysis-reducer.cc", # Symbol clashes with new-escape-analysis-reducer.cc
+
+ # These source files take an unusually large amount of time to
+ # compile. Build them separately to avoid bottlenecks.
+ "src/api.cc",
+ "src/code-stub-assembler.cc",
+ "src/elements.cc",
+ "src/heap/heap.cc",
+ "src/objects.cc",
+ "src/parsing/parser.cc",
]
}
@@ -2210,6 +2239,7 @@ v8_source_set("v8_base") {
"src/arm64/eh-frame-arm64.cc",
"src/arm64/frame-constants-arm64.cc",
"src/arm64/frame-constants-arm64.h",
+ "src/arm64/instructions-arm64-constants.cc",
"src/arm64/instructions-arm64.cc",
"src/arm64/instructions-arm64.h",
"src/arm64/instrument-arm64.cc",
@@ -2236,6 +2266,13 @@ v8_source_set("v8_base") {
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
]
+ if (use_jumbo_build) {
+ jumbo_excluded_sources += [
+ # TODO(mostynb@opera.com): fix this code so it doesn't need
+ # to be excluded, see the comments inside.
+ "src/arm64/instructions-arm64-constants.cc",
+ ]
+ }
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ###
"src/compiler/mips/code-generator-mips.cc",
@@ -2456,6 +2493,7 @@ v8_component("v8_libbase") {
"src/base/sys-info.h",
"src/base/template-utils.h",
"src/base/timezone-cache.h",
+ "src/base/tsan.h",
"src/base/utils/random-number-generator.cc",
"src/base/utils/random-number-generator.h",
]
@@ -2638,7 +2676,7 @@ if (current_toolchain == v8_snapshot_toolchain) {
deps = [
":v8_base",
- ":v8_builtins_setup",
+ ":v8_init",
":v8_libbase",
":v8_libplatform",
":v8_nosnapshot",
@@ -2751,7 +2789,7 @@ if (is_component_build) {
]
if (v8_use_snapshot) {
- public_deps += [ ":v8_builtins_generators" ]
+ public_deps += [ ":v8_initializers" ]
}
configs = [ ":internal_config" ]
@@ -2777,7 +2815,7 @@ if (is_component_build) {
]
if (v8_use_snapshot) {
- public_deps += [ ":v8_builtins_generators" ]
+ public_deps += [ ":v8_initializers" ]
}
public_configs = [ ":external_config" ]
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index ffd5fb388d..bed8ed9770 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,1598 @@
+2017-10-10: Version 6.3.292
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-10: Version 6.3.291
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-10: Version 6.3.290
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-10: Version 6.3.289
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-09: Version 6.3.288
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-09: Version 6.3.287
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-09: Version 6.3.286
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-09: Version 6.3.285
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-09: Version 6.3.284
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-09: Version 6.3.283
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-09: Version 6.3.282
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-09: Version 6.3.281
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-09: Version 6.3.280
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-09: Version 6.3.279
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-09: Version 6.3.278
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-09: Version 6.3.277
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-09: Version 6.3.276
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-08: Version 6.3.275
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-07: Version 6.3.274
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-06: Version 6.3.273
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-06: Version 6.3.272
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-06: Version 6.3.271
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-06: Version 6.3.270
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-06: Version 6.3.269
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-06: Version 6.3.268
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-06: Version 6.3.267
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-05: Version 6.3.266
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-05: Version 6.3.265
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-05: Version 6.3.264
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-05: Version 6.3.263
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-05: Version 6.3.262
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-05: Version 6.3.261
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-05: Version 6.3.260
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-05: Version 6.3.259
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-05: Version 6.3.258
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-04: Version 6.3.257
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-04: Version 6.3.256
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-04: Version 6.3.255
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-04: Version 6.3.254
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-04: Version 6.3.253
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-03: Version 6.3.252
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-03: Version 6.3.251
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-03: Version 6.3.250
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-02: Version 6.3.249
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-02: Version 6.3.248
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-02: Version 6.3.247
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-02: Version 6.3.246
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-02: Version 6.3.245
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-02: Version 6.3.244
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-02: Version 6.3.243
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-02: Version 6.3.242
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-02: Version 6.3.241
+
+ Performance and stability improvements on all platforms.
+
+
+2017-10-01: Version 6.3.240
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-29: Version 6.3.239
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-29: Version 6.3.238
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-29: Version 6.3.237
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-29: Version 6.3.236
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-29: Version 6.3.235
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-29: Version 6.3.234
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-29: Version 6.3.233
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-29: Version 6.3.232
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-29: Version 6.3.231
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-29: Version 6.3.230
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-29: Version 6.3.229
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-29: Version 6.3.228
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-29: Version 6.3.227
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-28: Version 6.3.226
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-28: Version 6.3.225
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-28: Version 6.3.224
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-28: Version 6.3.223
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-28: Version 6.3.222
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-27: Version 6.3.221
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-27: Version 6.3.220
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-27: Version 6.3.219
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-27: Version 6.3.218
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-27: Version 6.3.217
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-26: Version 6.3.216
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-26: Version 6.3.215
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-26: Version 6.3.214
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-26: Version 6.3.213
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-25: Version 6.3.212
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-25: Version 6.3.211
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-25: Version 6.3.210
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-25: Version 6.3.209
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-25: Version 6.3.208
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-25: Version 6.3.207
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-25: Version 6.3.206
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-25: Version 6.3.205
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-25: Version 6.3.204
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-25: Version 6.3.203
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-25: Version 6.3.202
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-25: Version 6.3.201
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-24: Version 6.3.200
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-24: Version 6.3.199
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-22: Version 6.3.198
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-22: Version 6.3.197
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-21: Version 6.3.196
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-21: Version 6.3.195
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-21: Version 6.3.194
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-21: Version 6.3.193
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-21: Version 6.3.192
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-21: Version 6.3.191
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-21: Version 6.3.190
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-21: Version 6.3.189
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-21: Version 6.3.188
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-21: Version 6.3.187
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-21: Version 6.3.186
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-21: Version 6.3.185
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-20: Version 6.3.184
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-20: Version 6.3.183
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-20: Version 6.3.182
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-19: Version 6.3.181
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-19: Version 6.3.180
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-18: Version 6.3.179
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-18: Version 6.3.178
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-18: Version 6.3.177
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-18: Version 6.3.176
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-18: Version 6.3.175
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-18: Version 6.3.174
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-18: Version 6.3.173
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-18: Version 6.3.172
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-18: Version 6.3.171
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-18: Version 6.3.170
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-18: Version 6.3.169
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-18: Version 6.3.168
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-18: Version 6.3.167
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-18: Version 6.3.166
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-18: Version 6.3.165
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-17: Version 6.3.164
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-16: Version 6.3.163
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-15: Version 6.3.162
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-15: Version 6.3.161
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-15: Version 6.3.160
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-15: Version 6.3.159
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-15: Version 6.3.158
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-15: Version 6.3.157
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-15: Version 6.3.156
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-15: Version 6.3.155
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-15: Version 6.3.154
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-15: Version 6.3.153
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-15: Version 6.3.152
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-15: Version 6.3.151
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-14: Version 6.3.150
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-14: Version 6.3.149
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-14: Version 6.3.148
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-14: Version 6.3.147
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-14: Version 6.3.146
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-14: Version 6.3.145
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-14: Version 6.3.144
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-14: Version 6.3.143
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-14: Version 6.3.142
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-14: Version 6.3.141
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-14: Version 6.3.140
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-14: Version 6.3.139
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-13: Version 6.3.138
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-13: Version 6.3.137
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-13: Version 6.3.136
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-13: Version 6.3.135
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-13: Version 6.3.134
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-13: Version 6.3.133
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-13: Version 6.3.132
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-13: Version 6.3.131
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-13: Version 6.3.130
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-13: Version 6.3.129
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-13: Version 6.3.128
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-13: Version 6.3.127
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-12: Version 6.3.126
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-12: Version 6.3.125
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-12: Version 6.3.124
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-12: Version 6.3.123
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-12: Version 6.3.122
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-12: Version 6.3.121
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-12: Version 6.3.120
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-12: Version 6.3.119
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-12: Version 6.3.118
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-12: Version 6.3.117
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-12: Version 6.3.116
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-12: Version 6.3.115
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-12: Version 6.3.114
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-12: Version 6.3.113
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-12: Version 6.3.112
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-12: Version 6.3.111
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.110
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.109
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.108
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.107
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.106
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.105
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.104
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.103
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.102
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.101
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.100
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.99
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.98
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.97
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.96
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.95
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.94
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-11: Version 6.3.93
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-10: Version 6.3.92
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-09: Version 6.3.91
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-08: Version 6.3.90
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-08: Version 6.3.89
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-08: Version 6.3.88
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-08: Version 6.3.87
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-08: Version 6.3.86
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-08: Version 6.3.85
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-08: Version 6.3.84
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-08: Version 6.3.83
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-08: Version 6.3.82
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-08: Version 6.3.81
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-08: Version 6.3.80
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-08: Version 6.3.79
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-08: Version 6.3.78
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-08: Version 6.3.77
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-08: Version 6.3.76
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-08: Version 6.3.75
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-08: Version 6.3.74
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.73
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.72
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.71
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.70
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.69
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.68
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.67
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.66
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.65
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.64
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.63
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.62
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.61
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.60
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.59
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.58
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.57
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.56
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.55
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-07: Version 6.3.54
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-06: Version 6.3.53
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-06: Version 6.3.52
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-06: Version 6.3.51
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-06: Version 6.3.50
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-06: Version 6.3.49
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-06: Version 6.3.48
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-06: Version 6.3.47
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-06: Version 6.3.46
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-06: Version 6.3.45
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-06: Version 6.3.44
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-06: Version 6.3.43
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-06: Version 6.3.42
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-05: Version 6.3.41
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-05: Version 6.3.40
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-05: Version 6.3.39
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-05: Version 6.3.38
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-05: Version 6.3.37
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-05: Version 6.3.36
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-05: Version 6.3.35
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-05: Version 6.3.34
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-05: Version 6.3.33
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-05: Version 6.3.32
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-05: Version 6.3.31
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-05: Version 6.3.30
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-05: Version 6.3.29
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.28
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.27
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.26
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.25
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.24
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.23
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.22
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.21
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.20
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.19
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.18
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.17
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.16
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.15
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.14
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.13
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.12
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.11
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.10
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.9
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.8
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.7
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.6
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-04: Version 6.3.5
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-03: Version 6.3.4
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-03: Version 6.3.3
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-01: Version 6.3.2
+
+ Performance and stability improvements on all platforms.
+
+
+2017-09-01: Version 6.3.1
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-31: Version 6.2.441
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-31: Version 6.2.440
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-31: Version 6.2.439
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.438
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.437
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.436
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.435
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.434
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.433
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.432
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.431
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.430
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.429
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.428
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.427
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.426
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.425
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.424
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.423
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.422
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.421
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.420
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.419
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-30: Version 6.2.418
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-29: Version 6.2.417
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-29: Version 6.2.416
+
+ Performance and stability improvements on all platforms.
+
+
+2017-08-29: Version 6.2.415
+
+ Performance and stability improvements on all platforms.
+
+
2017-08-29: Version 6.2.414
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 4b64895ced..b675dd830e 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -3,72 +3,73 @@
# all paths in here must match this assumption.
vars = {
- "chromium_url": "https://chromium.googlesource.com",
+ 'chromium_url': 'https://chromium.googlesource.com',
}
deps = {
- "v8/build":
- Var("chromium_url") + "/chromium/src/build.git" + "@" + "48a2b7b39debc7c77c868c9ddb0a360af1ebc367",
- "v8/tools/gyp":
- Var("chromium_url") + "/external/gyp.git" + "@" + "d61a9397e668fa9843c4aa7da9e79460fe590bfb",
- "v8/third_party/icu":
- Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "21d33b1a09a77f033478ea4ffffb61e6970f83bd",
- "v8/third_party/instrumented_libraries":
- Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "644afd349826cb68204226a16c38bde13abe9c3c",
- "v8/buildtools":
- Var("chromium_url") + "/chromium/buildtools.git" + "@" + "5af0a3a8b89827a8634132080a39ab4b63dee489",
- "v8/base/trace_event/common":
- Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "65d1d42a5df6c0a563a6fdfa58a135679185e5d9",
- "v8/third_party/jinja2":
- Var("chromium_url") + "/chromium/src/third_party/jinja2.git" + "@" + "d34383206fa42d52faa10bb9931d6d538f3a57e0",
- "v8/third_party/markupsafe":
- Var("chromium_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "8f45f5cfa0009d2a70589bcda0349b8cb2b72783",
- "v8/tools/swarming_client":
- Var('chromium_url') + '/external/swarming.client.git' + '@' + "42721e128da760b345ab60d7cf34e300269112d7",
- "v8/testing/gtest":
- Var("chromium_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
- "v8/testing/gmock":
- Var("chromium_url") + "/external/googlemock.git" + "@" + "0421b6f358139f02e102c9c332ce19a33faf75be",
- "v8/test/benchmarks/data":
- Var("chromium_url") + "/v8/deps/third_party/benchmarks.git" + "@" + "05d7188267b4560491ff9155c5ee13e207ecd65f",
- "v8/test/mozilla/data":
- Var("chromium_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
- "v8/test/test262/data":
- Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "1b911a8f8abf4cb63882cfbe72dcd4c82bb8ad91",
- "v8/test/test262/harness":
- Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "0f2acdd882c84cff43b9d60df7574a1901e2cdcd",
- "v8/tools/clang":
- Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "40f69660bf3cd407e72b8ae240fdd6c513dddbfe",
- "v8/test/wasm-js":
- Var("chromium_url") + "/external/github.com/WebAssembly/spec.git" + "@" + "17b4a4d98c80b1ec736649d5a73496a0e6d12d4c",
-}
-
-deps_os = {
- "android": {
- "v8/third_party/android_tools":
- Var("chromium_url") + "/android_tools.git" + "@" + "e9d4018e149d50172ed462a7c21137aa915940ec",
- "v8/third_party/catapult":
- Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "7149cbfdfd26a5dd8e5d96cbb1da9356e2813a5d",
+ 'v8/build':
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + 'adaf9e56105b814105e2d49bc4fa63e2cd4795f5',
+ 'v8/tools/gyp':
+ Var('chromium_url') + '/external/gyp.git' + '@' + 'd61a9397e668fa9843c4aa7da9e79460fe590bfb',
+ 'v8/third_party/icu':
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '21d33b1a09a77f033478ea4ffffb61e6970f83bd',
+ 'v8/third_party/instrumented_libraries':
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '644afd349826cb68204226a16c38bde13abe9c3c',
+ 'v8/buildtools':
+ Var('chromium_url') + '/chromium/buildtools.git' + '@' + 'f6d165d9d842ddd29056c127a5f3a3c5d8e0d2e3',
+ 'v8/base/trace_event/common':
+ Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'abcc4153b783b5e2c2dafcfbf658017ecb56989a',
+ 'v8/third_party/android_tools': {
+ 'url': Var('chromium_url') + '/android_tools.git' + '@' + 'ca9dc7245b888c75307f0619e4a39fb46a82de66',
+ 'condition': 'checkout_android',
+ },
+ 'v8/third_party/catapult': {
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + 'a48a6afde0ff7eeb1c847744192977e412107d6a',
+ 'condition': 'checkout_android',
},
+ 'v8/third_party/jinja2':
+ Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'd34383206fa42d52faa10bb9931d6d538f3a57e0',
+ 'v8/third_party/markupsafe':
+ Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783',
+ 'v8/tools/swarming_client':
+ Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '5e8001d9a710121ce7a68efd0804430a34b4f9e4',
+ 'v8/testing/gtest':
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '6f8a66431cb592dad629028a50b3dd418a408c87',
+ 'v8/testing/gmock':
+ Var('chromium_url') + '/external/googlemock.git' + '@' + '0421b6f358139f02e102c9c332ce19a33faf75be',
+ 'v8/test/benchmarks/data':
+ Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f',
+ 'v8/test/mozilla/data':
+ Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
+ 'v8/test/test262/data':
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '290799bbeeba86245a355894b6ff2bb33d946d9e',
+ 'v8/test/test262/harness':
+ Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '0f2acdd882c84cff43b9d60df7574a1901e2cdcd',
+ 'v8/tools/clang':
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'b3169f97cc1a9daa1a9fbae15752588079792098',
+ 'v8/tools/luci-go':
+ Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + '9f54aa9fe06499b6bac378ae1f045be2158cf2cc',
+ 'v8/test/wasm-js':
+ Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '89573ee3eabc690637deeb1b8dadec13a963ec30',
}
recursedeps = [
- "v8/buildtools",
- "v8/third_party/android_tools",
+ 'v8/buildtools',
+ 'v8/third_party/android_tools',
]
include_rules = [
# Everybody can use some things.
- "+include",
- "+unicode",
- "+third_party/fdlibm",
+ '+include',
+ '+unicode',
+ '+third_party/fdlibm',
]
# checkdeps.py shouldn't check for includes in these directories:
skip_child_includes = [
- "build",
- "gypfiles",
- "third_party",
+ 'build',
+ 'gypfiles',
+ 'third_party',
]
hooks = [
@@ -85,41 +86,45 @@ hooks = [
},
# Pull clang-format binaries using checked-in hashes.
{
- "name": "clang_format_win",
- "pattern": ".",
- "action": [ "download_from_google_storage",
- "--no_resume",
- "--platform=win32",
- "--no_auth",
- "--bucket", "chromium-clang-format",
- "-s", "v8/buildtools/win/clang-format.exe.sha1",
+ 'name': 'clang_format_win',
+ 'pattern': '.',
+ 'condition': 'host_os == "win"',
+ 'action': [ 'download_from_google_storage',
+ '--no_resume',
+ '--platform=win32',
+ '--no_auth',
+ '--bucket', 'chromium-clang-format',
+ '-s', 'v8/buildtools/win/clang-format.exe.sha1',
],
},
{
- "name": "clang_format_mac",
- "pattern": ".",
- "action": [ "download_from_google_storage",
- "--no_resume",
- "--platform=darwin",
- "--no_auth",
- "--bucket", "chromium-clang-format",
- "-s", "v8/buildtools/mac/clang-format.sha1",
+ 'name': 'clang_format_mac',
+ 'pattern': '.',
+ 'condition': 'host_os == "mac"',
+ 'action': [ 'download_from_google_storage',
+ '--no_resume',
+ '--platform=darwin',
+ '--no_auth',
+ '--bucket', 'chromium-clang-format',
+ '-s', 'v8/buildtools/mac/clang-format.sha1',
],
},
{
- "name": "clang_format_linux",
- "pattern": ".",
- "action": [ "download_from_google_storage",
- "--no_resume",
- "--platform=linux*",
- "--no_auth",
- "--bucket", "chromium-clang-format",
- "-s", "v8/buildtools/linux64/clang-format.sha1",
+ 'name': 'clang_format_linux',
+ 'pattern': '.',
+ 'condition': 'host_os == "linux"',
+ 'action': [ 'download_from_google_storage',
+ '--no_resume',
+ '--platform=linux*',
+ '--no_auth',
+ '--bucket', 'chromium-clang-format',
+ '-s', 'v8/buildtools/linux64/clang-format.sha1',
],
},
{
'name': 'gcmole',
'pattern': '.',
+ # TODO(machenbach): Insert condition and remove GYP_DEFINES dependency.
'action': [
'python',
'v8/tools/gcmole/download_gcmole_tools.py',
@@ -128,6 +133,7 @@ hooks = [
{
'name': 'jsfunfuzz',
'pattern': '.',
+ # TODO(machenbach): Insert condition and remove GYP_DEFINES dependency.
'action': [
'python',
'v8/tools/jsfunfuzz/download_jsfunfuzz.py',
@@ -137,6 +143,7 @@ hooks = [
{
'name': 'luci-go_win',
'pattern': '.',
+ 'condition': 'host_os == "win"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=win32',
@@ -148,6 +155,7 @@ hooks = [
{
'name': 'luci-go_mac',
'pattern': '.',
+ 'condition': 'host_os == "mac"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=darwin',
@@ -159,6 +167,7 @@ hooks = [
{
'name': 'luci-go_linux',
'pattern': '.',
+ 'condition': 'host_os == "linux"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=linux*',
@@ -169,58 +178,61 @@ hooks = [
},
# Pull GN using checked-in hashes.
{
- "name": "gn_win",
- "pattern": ".",
- "action": [ "download_from_google_storage",
- "--no_resume",
- "--platform=win32",
- "--no_auth",
- "--bucket", "chromium-gn",
- "-s", "v8/buildtools/win/gn.exe.sha1",
+ 'name': 'gn_win',
+ 'pattern': '.',
+ 'condition': 'host_os == "win"',
+ 'action': [ 'download_from_google_storage',
+ '--no_resume',
+ '--platform=win32',
+ '--no_auth',
+ '--bucket', 'chromium-gn',
+ '-s', 'v8/buildtools/win/gn.exe.sha1',
],
},
{
- "name": "gn_mac",
- "pattern": ".",
- "action": [ "download_from_google_storage",
- "--no_resume",
- "--platform=darwin",
- "--no_auth",
- "--bucket", "chromium-gn",
- "-s", "v8/buildtools/mac/gn.sha1",
+ 'name': 'gn_mac',
+ 'pattern': '.',
+ 'condition': 'host_os == "mac"',
+ 'action': [ 'download_from_google_storage',
+ '--no_resume',
+ '--platform=darwin',
+ '--no_auth',
+ '--bucket', 'chromium-gn',
+ '-s', 'v8/buildtools/mac/gn.sha1',
],
},
{
- "name": "gn_linux",
- "pattern": ".",
- "action": [ "download_from_google_storage",
- "--no_resume",
- "--platform=linux*",
- "--no_auth",
- "--bucket", "chromium-gn",
- "-s", "v8/buildtools/linux64/gn.sha1",
+ 'name': 'gn_linux',
+ 'pattern': '.',
+ 'condition': 'host_os == "linux"',
+ 'action': [ 'download_from_google_storage',
+ '--no_resume',
+ '--platform=linux*',
+ '--no_auth',
+ '--bucket', 'chromium-gn',
+ '-s', 'v8/buildtools/linux64/gn.sha1',
],
},
{
- "name": "wasm_spec_tests",
- "pattern": ".",
- "action": [ "download_from_google_storage",
- "--no_resume",
- "--no_auth",
- "-u",
- "--bucket", "v8-wasm-spec-tests",
- "-s", "v8/test/wasm-spec-tests/tests.tar.gz.sha1",
+ 'name': 'wasm_spec_tests',
+ 'pattern': '.',
+ 'action': [ 'download_from_google_storage',
+ '--no_resume',
+ '--no_auth',
+ '-u',
+ '--bucket', 'v8-wasm-spec-tests',
+ '-s', 'v8/test/wasm-spec-tests/tests.tar.gz.sha1',
],
},
{
- "name": "closure_compiler",
- "pattern": ".",
- "action": [ "download_from_google_storage",
- "--no_resume",
- "--no_auth",
- "-u",
- "--bucket", "chromium-v8-closure-compiler",
- "-s", "v8/src/inspector/build/closure-compiler.tar.gz.sha1",
+ 'name': 'closure_compiler',
+ 'pattern': '.',
+ 'action': [ 'download_from_google_storage',
+ '--no_resume',
+ '--no_auth',
+ '-u',
+ '--bucket', 'chromium-v8-closure-compiler',
+ '-s', 'v8/src/inspector/build/closure-compiler.tar.gz.sha1',
],
},
{
@@ -240,6 +252,7 @@ hooks = [
# GYP_DEFINES.
'name': 'instrumented_libraries',
'pattern': '\\.sha1',
+ # TODO(machenbach): Insert condition and remove GYP_DEFINES dependency.
'action': [
'python',
'v8/third_party/instrumented_libraries/scripts/download_binaries.py',
@@ -249,6 +262,7 @@ hooks = [
# Update the Windows toolchain if necessary.
'name': 'win_toolchain',
'pattern': '.',
+ 'condition': 'checkout_win',
'action': ['python', 'v8/build/vs_toolchain.py', 'update'],
},
# Pull binutils for linux, enabled debug fission for faster linking /
@@ -257,21 +271,21 @@ hooks = [
{
'name': 'binutils',
'pattern': 'v8/third_party/binutils',
+ 'condition': 'host_os == "linux"',
'action': [
'python',
'v8/third_party/binutils/download.py',
],
},
{
- # Pull clang if needed or requested via GYP_DEFINES.
# Note: On Win, this should run after win_toolchain, as it may use it.
'name': 'clang',
'pattern': '.',
- 'action': ['python', 'v8/tools/clang/scripts/update.py', '--if-needed'],
+ 'action': ['python', 'v8/tools/clang/scripts/update.py'],
},
{
# A change to a .gyp, .gypi, or to GYP itself should run the generator.
- "pattern": ".",
- "action": ["python", "v8/gypfiles/gyp_v8", "--running-as-hook"],
+ 'pattern': '.',
+ 'action': ['python', 'v8/gypfiles/gyp_v8', '--running-as-hook'],
},
]
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 2d79ae682c..1ef291f6fa 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -153,16 +153,17 @@ def _CheckUnwantedDependencies(input_api, output_api):
return results
+# TODO(mstarzinger): Similar checking should be made available as part of
+# tools/presubmit.py (note that tools/check-inline-includes.sh exists).
def _CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api):
"""Attempts to prevent inclusion of inline headers into normal header
files. This tries to establish a layering where inline headers can be
included by other inline headers or compilation units only."""
file_inclusion_pattern = r'(?!.+-inl\.h).+\.h'
include_directive_pattern = input_api.re.compile(r'#include ".+-inl.h"')
- include_warning = (
- 'You might be including an inline header (e.g. foo-inl.h) within a\n'
- 'normal header (e.g. bar.h) file. Can you avoid introducing the\n'
- '#include? The commit queue will not block on this warning.')
+ include_error = (
+ 'You are including an inline header (e.g. foo-inl.h) within a normal\n'
+ 'header (e.g. bar.h) file. This violates layering of dependencies.')
def FilterFile(affected_file):
black_list = (_EXCLUDED_PATHS +
@@ -181,7 +182,7 @@ def _CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api):
'%s:%d\n %s' % (local_path, line_number, line.strip()))
if problems:
- return [output_api.PresubmitPromptOrNotify(include_warning, problems)]
+ return [output_api.PresubmitError(include_error, problems)]
else:
return []
@@ -279,6 +280,7 @@ def _CommonChecks(input_api, output_api):
_CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api))
results.extend(_CheckMissingFiles(input_api, output_api))
results.extend(_CheckJSONFiles(input_api, output_api))
+ results.extend(_CheckMacroUndefs(input_api, output_api))
return results
@@ -337,6 +339,66 @@ def _CheckJSONFiles(input_api, output_api):
return [output_api.PresubmitError(r) for r in results]
+def _CheckMacroUndefs(input_api, output_api):
+ """
+ Checks that each #define in a .cc file is eventually followed by an #undef.
+
+ TODO(clemensh): This check should eventually be enabled for all cc files via
+ tools/presubmit.py (https://crbug.com/v8/6811).
+ """
+ def FilterFile(affected_file):
+ # Skip header files, as they often define type lists which are used in
+ # other files.
+ white_list = (r'.+\.cc',r'.+\.cpp',r'.+\.c')
+ return input_api.FilterSourceFile(affected_file, white_list=white_list)
+
+ def TouchesMacros(f):
+ for line in f.GenerateScmDiff().splitlines():
+ if not line.startswith('+') and not line.startswith('-'):
+ continue
+ if define_pattern.match(line[1:]) or undef_pattern.match(line[1:]):
+ return True
+ return False
+
+ define_pattern = input_api.re.compile(r'#define (\w+)')
+ undef_pattern = input_api.re.compile(r'#undef (\w+)')
+ errors = []
+ for f in input_api.AffectedFiles(
+ file_filter=FilterFile, include_deletes=False):
+ if not TouchesMacros(f):
+ continue
+
+ defined_macros = dict()
+ with open(f.LocalPath()) as fh:
+ line_nr = 0
+ for line in fh:
+ line_nr += 1
+
+ define_match = define_pattern.match(line)
+ if define_match:
+ name = define_match.group(1)
+ defined_macros[name] = line_nr
+
+ undef_match = undef_pattern.match(line)
+ if undef_match:
+ name = undef_match.group(1)
+ if not name in defined_macros:
+ errors.append('{}:{}: Macro named \'{}\' was not defined before.'
+ .format(f.LocalPath(), line_nr, name))
+ else:
+ del defined_macros[name]
+ for name, line_nr in sorted(defined_macros.items(), key=lambda e: e[1]):
+ errors.append('{}:{}: Macro missing #undef: {}'
+ .format(f.LocalPath(), line_nr, name))
+
+ if errors:
+ return [output_api.PresubmitPromptOrNotify(
+ 'Detected mismatches in #define / #undef in the file(s) where you '
+ 'modified preprocessor macros.',
+ errors)]
+ return []
+
+
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index bdc450d568..132a4ea66f 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -783,13 +783,22 @@
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
-
#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(category_group, name, \
id, timestamp) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
-
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP1( \
+ category_group, name, id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT_WITH_TIMESTAMP0( \
+ category_group, name, id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0( \
category_group, name, id, timestamp) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
diff --git a/deps/v8/gni/isolate.gni b/deps/v8/gni/isolate.gni
index f5453e5606..4bdf0c0fad 100644
--- a/deps/v8/gni/isolate.gni
+++ b/deps/v8/gni/isolate.gni
@@ -176,7 +176,7 @@ template("v8_isolate_run") {
if (is_win) {
args += [
"--config-variable",
- "msvs_version=2015",
+ "msvs_version=2017",
]
} else {
args += [
diff --git a/deps/v8/gypfiles/features.gypi b/deps/v8/gypfiles/features.gypi
index 6b0b293db6..1d3f67daee 100644
--- a/deps/v8/gypfiles/features.gypi
+++ b/deps/v8/gypfiles/features.gypi
@@ -86,6 +86,9 @@
# Enable concurrent marking.
'v8_enable_concurrent_marking%': 0,
+
+ # Controls the threshold for on-heap/off-heap Typed Arrays.
+ 'v8_typed_array_max_size_in_heap%': 64,
},
'target_defaults': {
'conditions': [
@@ -166,6 +169,7 @@
}, # configurations
'defines': [
'V8_GYP_BUILD',
+ 'V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP=<(v8_typed_array_max_size_in_heap)',
], # defines
}, # target_defaults
}
diff --git a/deps/v8/gypfiles/gyp_environment.py b/deps/v8/gypfiles/gyp_environment.py
index 76ae841ffb..fe6b51f28a 100644
--- a/deps/v8/gypfiles/gyp_environment.py
+++ b/deps/v8/gypfiles/gyp_environment.py
@@ -10,7 +10,6 @@ make sure settings are consistent between them, all setup should happen here.
import os
import sys
-import vs_toolchain
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
V8_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
@@ -56,5 +55,3 @@ def set_environment():
if not os.environ.get('GYP_GENERATORS'):
# Default to ninja on all platforms.
os.environ['GYP_GENERATORS'] = 'ninja'
-
- vs_toolchain.SetEnvironmentAndGetRuntimeDllDirs()
diff --git a/deps/v8/gypfiles/gyp_v8 b/deps/v8/gypfiles/gyp_v8
index b8b5f742b1..e419b5e89e 100755
--- a/deps/v8/gypfiles/gyp_v8
+++ b/deps/v8/gypfiles/gyp_v8
@@ -38,7 +38,6 @@ import platform
import shlex
import subprocess
import sys
-import vs_toolchain
script_dir = os.path.dirname(os.path.realpath(__file__))
v8_root = os.path.abspath(os.path.join(script_dir, os.pardir))
@@ -101,16 +100,7 @@ def additional_include_files(args=[]):
def run_gyp(args):
- rc = gyp.main(args)
-
- vs2013_runtime_dll_dirs = vs_toolchain.SetEnvironmentAndGetRuntimeDllDirs()
- if vs2013_runtime_dll_dirs:
- x64_runtime, x86_runtime = vs2013_runtime_dll_dirs
- vs_toolchain.CopyVsRuntimeDlls(
- os.path.join(v8_root, GetOutputDirectory()),
- (x86_runtime, x64_runtime))
-
- if rc != 0:
+ if gyp.main(args) != 0:
print 'Error running GYP'
sys.exit(rc)
diff --git a/deps/v8/gypfiles/vs_toolchain.py b/deps/v8/gypfiles/vs_toolchain.py
deleted file mode 100644
index d7676c8da0..0000000000
--- a/deps/v8/gypfiles/vs_toolchain.py
+++ /dev/null
@@ -1,371 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import glob
-import json
-import os
-import pipes
-import shutil
-import subprocess
-import sys
-
-
-script_dir = os.path.dirname(os.path.realpath(__file__))
-chrome_src = os.path.abspath(os.path.join(script_dir, os.pardir))
-SRC_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-sys.path.insert(1, os.path.join(chrome_src, 'tools'))
-sys.path.insert(0, os.path.join(chrome_src, 'tools', 'gyp', 'pylib'))
-json_data_file = os.path.join(script_dir, 'win_toolchain.json')
-
-
-import gyp
-
-
-# Use MSVS2013 as the default toolchain.
-CURRENT_DEFAULT_TOOLCHAIN_VERSION = '2013'
-
-
-def SetEnvironmentAndGetRuntimeDllDirs():
- """Sets up os.environ to use the depot_tools VS toolchain with gyp, and
- returns the location of the VS runtime DLLs so they can be copied into
- the output directory after gyp generation.
- """
- vs_runtime_dll_dirs = None
- depot_tools_win_toolchain = \
- bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1')))
- # When running on a non-Windows host, only do this if the SDK has explicitly
- # been downloaded before (in which case json_data_file will exist).
- if ((sys.platform in ('win32', 'cygwin') or os.path.exists(json_data_file))
- and depot_tools_win_toolchain):
- if ShouldUpdateToolchain():
- Update()
- with open(json_data_file, 'r') as tempf:
- toolchain_data = json.load(tempf)
-
- toolchain = toolchain_data['path']
- version = toolchain_data['version']
- win_sdk = toolchain_data.get('win_sdk')
- if not win_sdk:
- win_sdk = toolchain_data['win8sdk']
- wdk = toolchain_data['wdk']
- # TODO(scottmg): The order unfortunately matters in these. They should be
- # split into separate keys for x86 and x64. (See CopyVsRuntimeDlls call
- # below). http://crbug.com/345992
- vs_runtime_dll_dirs = toolchain_data['runtime_dirs']
-
- os.environ['GYP_MSVS_OVERRIDE_PATH'] = toolchain
- os.environ['GYP_MSVS_VERSION'] = version
- # We need to make sure windows_sdk_path is set to the automated
- # toolchain values in GYP_DEFINES, but don't want to override any
- # otheroptions.express
- # values there.
- gyp_defines_dict = gyp.NameValueListToDict(gyp.ShlexEnv('GYP_DEFINES'))
- gyp_defines_dict['windows_sdk_path'] = win_sdk
- os.environ['GYP_DEFINES'] = ' '.join('%s=%s' % (k, pipes.quote(str(v)))
- for k, v in gyp_defines_dict.iteritems())
- os.environ['WINDOWSSDKDIR'] = win_sdk
- os.environ['WDK_DIR'] = wdk
- # Include the VS runtime in the PATH in case it's not machine-installed.
- runtime_path = os.path.pathsep.join(vs_runtime_dll_dirs)
- os.environ['PATH'] = runtime_path + os.path.pathsep + os.environ['PATH']
- elif sys.platform == 'win32' and not depot_tools_win_toolchain:
- if not 'GYP_MSVS_OVERRIDE_PATH' in os.environ:
- os.environ['GYP_MSVS_OVERRIDE_PATH'] = DetectVisualStudioPath()
- if not 'GYP_MSVS_VERSION' in os.environ:
- os.environ['GYP_MSVS_VERSION'] = GetVisualStudioVersion()
-
- return vs_runtime_dll_dirs
-
-
-def _RegistryGetValueUsingWinReg(key, value):
- """Use the _winreg module to obtain the value of a registry key.
-
- Args:
- key: The registry key.
- value: The particular registry value to read.
- Return:
- contents of the registry key's value, or None on failure. Throws
- ImportError if _winreg is unavailable.
- """
- import _winreg
- try:
- root, subkey = key.split('\\', 1)
- assert root == 'HKLM' # Only need HKLM for now.
- with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
- return _winreg.QueryValueEx(hkey, value)[0]
- except WindowsError:
- return None
-
-
-def _RegistryGetValue(key, value):
- try:
- return _RegistryGetValueUsingWinReg(key, value)
- except ImportError:
- raise Exception('The python library _winreg not found.')
-
-
-def GetVisualStudioVersion():
- """Return GYP_MSVS_VERSION of Visual Studio.
- """
- return os.environ.get('GYP_MSVS_VERSION', CURRENT_DEFAULT_TOOLCHAIN_VERSION)
-
-
-def DetectVisualStudioPath():
- """Return path to the GYP_MSVS_VERSION of Visual Studio.
- """
-
- # Note that this code is used from
- # build/toolchain/win/setup_toolchain.py as well.
- version_as_year = GetVisualStudioVersion()
- year_to_version = {
- '2013': '12.0',
- '2015': '14.0',
- }
- if version_as_year not in year_to_version:
- raise Exception(('Visual Studio version %s (from GYP_MSVS_VERSION)'
- ' not supported. Supported versions are: %s') % (
- version_as_year, ', '.join(year_to_version.keys())))
- version = year_to_version[version_as_year]
- keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
- r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version]
- for key in keys:
- path = _RegistryGetValue(key, 'InstallDir')
- if not path:
- continue
- path = os.path.normpath(os.path.join(path, '..', '..'))
- return path
-
- raise Exception(('Visual Studio Version %s (from GYP_MSVS_VERSION)'
- ' not found.') % (version_as_year))
-
-
-def _VersionNumber():
- """Gets the standard version number ('120', '140', etc.) based on
- GYP_MSVS_VERSION."""
- vs_version = GetVisualStudioVersion()
- if vs_version == '2013':
- return '120'
- elif vs_version == '2015':
- return '140'
- else:
- raise ValueError('Unexpected GYP_MSVS_VERSION')
-
-
-def _CopyRuntimeImpl(target, source, verbose=True):
- """Copy |source| to |target| if it doesn't already exist or if it
- needs to be updated.
- """
- if (os.path.isdir(os.path.dirname(target)) and
- (not os.path.isfile(target) or
- os.stat(target).st_mtime != os.stat(source).st_mtime)):
- if verbose:
- print 'Copying %s to %s...' % (source, target)
- if os.path.exists(target):
- os.unlink(target)
- shutil.copy2(source, target)
-
-
-def _CopyRuntime2013(target_dir, source_dir, dll_pattern):
- """Copy both the msvcr and msvcp runtime DLLs, only if the target doesn't
- exist, but the target directory does exist."""
- for file_part in ('p', 'r'):
- dll = dll_pattern % file_part
- target = os.path.join(target_dir, dll)
- source = os.path.join(source_dir, dll)
- _CopyRuntimeImpl(target, source)
-
-
-def _CopyRuntime2015(target_dir, source_dir, dll_pattern, suffix):
- """Copy both the msvcp and vccorlib runtime DLLs, only if the target doesn't
- exist, but the target directory does exist."""
- for file_part in ('msvcp', 'vccorlib', 'vcruntime'):
- dll = dll_pattern % file_part
- target = os.path.join(target_dir, dll)
- source = os.path.join(source_dir, dll)
- _CopyRuntimeImpl(target, source)
- ucrt_src_dir = os.path.join(source_dir, 'api-ms-win-*.dll')
- print 'Copying %s to %s...' % (ucrt_src_dir, target_dir)
- for ucrt_src_file in glob.glob(ucrt_src_dir):
- file_part = os.path.basename(ucrt_src_file)
- ucrt_dst_file = os.path.join(target_dir, file_part)
- _CopyRuntimeImpl(ucrt_dst_file, ucrt_src_file, False)
- _CopyRuntimeImpl(os.path.join(target_dir, 'ucrtbase' + suffix),
- os.path.join(source_dir, 'ucrtbase' + suffix))
-
-
-def _CopyRuntime(target_dir, source_dir, target_cpu, debug):
- """Copy the VS runtime DLLs, only if the target doesn't exist, but the target
- directory does exist. Handles VS 2013 and VS 2015."""
- suffix = "d.dll" if debug else ".dll"
- if GetVisualStudioVersion() == '2015':
- _CopyRuntime2015(target_dir, source_dir, '%s140' + suffix, suffix)
- else:
- _CopyRuntime2013(target_dir, source_dir, 'msvc%s120' + suffix)
-
- # Copy the PGO runtime library to the release directories.
- if not debug and os.environ.get('GYP_MSVS_OVERRIDE_PATH'):
- pgo_x86_runtime_dir = os.path.join(os.environ.get('GYP_MSVS_OVERRIDE_PATH'),
- 'VC', 'bin')
- pgo_x64_runtime_dir = os.path.join(pgo_x86_runtime_dir, 'amd64')
- pgo_runtime_dll = 'pgort' + _VersionNumber() + '.dll'
- if target_cpu == "x86":
- source_x86 = os.path.join(pgo_x86_runtime_dir, pgo_runtime_dll)
- if os.path.exists(source_x86):
- _CopyRuntimeImpl(os.path.join(target_dir, pgo_runtime_dll), source_x86)
- elif target_cpu == "x64":
- source_x64 = os.path.join(pgo_x64_runtime_dir, pgo_runtime_dll)
- if os.path.exists(source_x64):
- _CopyRuntimeImpl(os.path.join(target_dir, pgo_runtime_dll),
- source_x64)
- else:
- raise NotImplementedError("Unexpected target_cpu value:" + target_cpu)
-
-
-def CopyVsRuntimeDlls(output_dir, runtime_dirs):
- """Copies the VS runtime DLLs from the given |runtime_dirs| to the output
- directory so that even if not system-installed, built binaries are likely to
- be able to run.
-
- This needs to be run after gyp has been run so that the expected target
- output directories are already created.
-
- This is used for the GYP build and gclient runhooks.
- """
- x86, x64 = runtime_dirs
- out_debug = os.path.join(output_dir, 'Debug')
- out_release = os.path.join(output_dir, 'Release')
- out_debug_x64 = os.path.join(output_dir, 'Debug_x64')
- out_release_x64 = os.path.join(output_dir, 'Release_x64')
-
- _CopyRuntime(out_debug, x86, "x86", debug=True)
- _CopyRuntime(out_release, x86, "x86", debug=False)
- _CopyRuntime(out_debug_x64, x64, "x64", debug=True)
- _CopyRuntime(out_release_x64, x64, "x64", debug=False)
-
-
-def CopyDlls(target_dir, configuration, target_cpu):
- """Copy the VS runtime DLLs into the requested directory as needed.
-
- configuration is one of 'Debug' or 'Release'.
- target_cpu is one of 'x86' or 'x64'.
-
- The debug configuration gets both the debug and release DLLs; the
- release config only the latter.
-
- This is used for the GN build.
- """
- vs_runtime_dll_dirs = SetEnvironmentAndGetRuntimeDllDirs()
- if not vs_runtime_dll_dirs:
- return
-
- x64_runtime, x86_runtime = vs_runtime_dll_dirs
- runtime_dir = x64_runtime if target_cpu == 'x64' else x86_runtime
- _CopyRuntime(target_dir, runtime_dir, target_cpu, debug=False)
- if configuration == 'Debug':
- _CopyRuntime(target_dir, runtime_dir, target_cpu, debug=True)
-
-
-def _GetDesiredVsToolchainHashes():
- """Load a list of SHA1s corresponding to the toolchains that we want installed
- to build with."""
- if GetVisualStudioVersion() == '2015':
- # Update 2.
- return ['95ddda401ec5678f15eeed01d2bee08fcbc5ee97']
- else:
- return ['03a4e939cd325d6bc5216af41b92d02dda1366a6']
-
-
-def ShouldUpdateToolchain():
- """Check if the toolchain should be upgraded."""
- if not os.path.exists(json_data_file):
- return True
- with open(json_data_file, 'r') as tempf:
- toolchain_data = json.load(tempf)
- version = toolchain_data['version']
- env_version = GetVisualStudioVersion()
- # If there's a mismatch between the version set in the environment and the one
- # in the json file then the toolchain should be updated.
- return version != env_version
-
-
-def Update(force=False):
- """Requests an update of the toolchain to the specific hashes we have at
- this revision. The update outputs a .json of the various configuration
- information required to pass to gyp which we use in |GetToolchainDir()|.
- """
- if force != False and force != '--force':
- print >>sys.stderr, 'Unknown parameter "%s"' % force
- return 1
- if force == '--force' or os.path.exists(json_data_file):
- force = True
-
- depot_tools_win_toolchain = \
- bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1')))
- if ((sys.platform in ('win32', 'cygwin') or force) and
- depot_tools_win_toolchain):
- import find_depot_tools
- depot_tools_path = find_depot_tools.add_depot_tools_to_path()
- # Necessary so that get_toolchain_if_necessary.py will put the VS toolkit
- # in the correct directory.
- os.environ['GYP_MSVS_VERSION'] = GetVisualStudioVersion()
- get_toolchain_args = [
- sys.executable,
- os.path.join(depot_tools_path,
- 'win_toolchain',
- 'get_toolchain_if_necessary.py'),
- '--output-json', json_data_file,
- ] + _GetDesiredVsToolchainHashes()
- if force:
- get_toolchain_args.append('--force')
- subprocess.check_call(get_toolchain_args)
-
- return 0
-
-
-def NormalizePath(path):
- while path.endswith("\\"):
- path = path[:-1]
- return path
-
-
-def GetToolchainDir():
- """Gets location information about the current toolchain (must have been
- previously updated by 'update'). This is used for the GN build."""
- runtime_dll_dirs = SetEnvironmentAndGetRuntimeDllDirs()
-
- # If WINDOWSSDKDIR is not set, search the default SDK path and set it.
- if not 'WINDOWSSDKDIR' in os.environ:
- default_sdk_path = 'C:\\Program Files (x86)\\Windows Kits\\10'
- if os.path.isdir(default_sdk_path):
- os.environ['WINDOWSSDKDIR'] = default_sdk_path
-
- print '''vs_path = "%s"
-sdk_path = "%s"
-vs_version = "%s"
-wdk_dir = "%s"
-runtime_dirs = "%s"
-''' % (
- NormalizePath(os.environ['GYP_MSVS_OVERRIDE_PATH']),
- NormalizePath(os.environ['WINDOWSSDKDIR']),
- GetVisualStudioVersion(),
- NormalizePath(os.environ.get('WDK_DIR', '')),
- os.path.pathsep.join(runtime_dll_dirs or ['None']))
-
-
-def main():
- commands = {
- 'update': Update,
- 'get_toolchain_dir': GetToolchainDir,
- 'copy_dlls': CopyDlls,
- }
- if len(sys.argv) < 2 or sys.argv[1] not in commands:
- print >>sys.stderr, 'Expected one of: %s' % ', '.join(commands)
- return 1
- return commands[sys.argv[1]](*sys.argv[2:])
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/deps/v8/gypfiles/win/msvs_dependencies.isolate b/deps/v8/gypfiles/win/msvs_dependencies.isolate
index 79ae11a1ae..2859126659 100644
--- a/deps/v8/gypfiles/win/msvs_dependencies.isolate
+++ b/deps/v8/gypfiles/win/msvs_dependencies.isolate
@@ -25,8 +25,8 @@
],
},
}],
- # VS2015 runtimes
- ['OS=="win" and msvs_version==2015 and component=="shared_library" and (CONFIGURATION_NAME=="Debug" or CONFIGURATION_NAME=="Debug_x64")', {
+ # VS2015/2017 runtimes
+ ['OS=="win" and (msvs_version==2015 or msvs_version==2017) and component=="shared_library" and (CONFIGURATION_NAME=="Debug" or CONFIGURATION_NAME=="Debug_x64")', {
'variables': {
'files': [
'<(PRODUCT_DIR)/msvcp140d.dll',
@@ -36,7 +36,7 @@
],
},
}],
- ['OS=="win" and msvs_version==2015 and component=="shared_library" and (CONFIGURATION_NAME=="Release" or CONFIGURATION_NAME=="Release_x64")', {
+ ['OS=="win" and (msvs_version==2015 or msvs_version==2017) and component=="shared_library" and (CONFIGURATION_NAME=="Release" or CONFIGURATION_NAME=="Release_x64")', {
'variables': {
'files': [
'<(PRODUCT_DIR)/msvcp140.dll',
@@ -46,7 +46,7 @@
],
},
}],
- ['OS=="win" and msvs_version==2015 and component=="shared_library"', {
+ ['OS=="win" and (msvs_version==2015 or msvs_version==2017) and component=="shared_library"', {
# Windows 10 Universal C Runtime binaries.
'variables': {
'files': [
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index 6c3c4292c5..74630b6f19 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -37,51 +37,6 @@ class IdleTask {
};
/**
- * A TaskRunner allows scheduling of tasks. The TaskRunner may still be used to
- * post tasks after the isolate gets destructed, but these tasks may not get
- * executed anymore. All tasks posted to a given TaskRunner will be invoked in
- * sequence. Tasks can be posted from any thread.
- */
-class TaskRunner {
- public:
- /**
- * Schedules a task to be invoked by this TaskRunner. The TaskRunner
- * implementation takes ownership of |task|.
- */
- virtual void PostTask(std::unique_ptr<Task> task) = 0;
-
- /**
- * Schedules a task to be invoked by this TaskRunner. The task is scheduled
- * after the given number of seconds |delay_in_seconds|. The TaskRunner
- * implementation takes ownership of |task|.
- */
- virtual void PostDelayedTask(std::unique_ptr<Task> task,
- double delay_in_seconds) = 0;
-
- /**
- * Schedules an idle task to be invoked by this TaskRunner. The task is
- * scheduled when the embedder is idle. Requires that
- * TaskRunner::SupportsIdleTasks(isolate) is true. Idle tasks may be reordered
- * relative to other task types and may be starved for an arbitrarily long
- * time if no idle time is available. The TaskRunner implementation takes
- * ownership of |task|.
- */
- virtual void PostIdleTask(std::unique_ptr<IdleTask> task) = 0;
-
- /**
- * Returns true if idle tasks are enabled for this TaskRunner.
- */
- virtual bool IdleTasksEnabled() = 0;
-
- TaskRunner() = default;
- virtual ~TaskRunner() = default;
-
- private:
- TaskRunner(const TaskRunner&) = delete;
- TaskRunner& operator=(const TaskRunner&) = delete;
-};
-
-/**
* The interface represents complex arguments to trace events.
*/
class ConvertableToTraceFormat {
@@ -196,28 +151,6 @@ class Platform {
virtual size_t NumberOfAvailableBackgroundThreads() { return 0; }
/**
- * Returns a TaskRunner which can be used to post a task on the foreground.
- * This function should only be called from a foreground thread.
- */
- virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
- Isolate* isolate) {
- // TODO(ahaas): Make this function abstract after it got implemented on all
- // platforms.
- return {};
- }
-
- /**
- * Returns a TaskRunner which can be used to post a task on a background.
- * This function should only be called from a foreground thread.
- */
- virtual std::shared_ptr<v8::TaskRunner> GetBackgroundTaskRunner(
- Isolate* isolate) {
- // TODO(ahaas): Make this function abstract after it got implemented on all
- // platforms.
- return {};
- }
-
- /**
* Schedules a task to be invoked on a background thread. |expected_runtime|
* indicates that the task will run a long time. The Platform implementation
* takes ownership of |task|. There is no guarantee about order of execution
@@ -276,10 +209,7 @@ class Platform {
* Current wall-clock time in milliseconds since epoch.
* This function is expected to return at least millisecond-precision values.
*/
- virtual double CurrentClockTimeMillis() {
- // TODO(dats): Make pure virtual after V8 roll in Chromium.
- return 0.0;
- }
+ virtual double CurrentClockTimeMillis() = 0;
typedef void (*StackTracePrinter)();
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 2760fe3f29..46bb92f650 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,8 +9,8 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 6
-#define V8_MINOR_VERSION 2
-#define V8_BUILD_NUMBER 414
+#define V8_MINOR_VERSION 3
+#define V8_BUILD_NUMBER 292
#define V8_PATCH_LEVEL 46
// Use 1 for candidates and 0 otherwise.
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index ecabe2aa6d..f100153364 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -150,6 +150,10 @@ template<typename T> class CustomArguments;
class PropertyCallbackArguments;
class FunctionCallbackArguments;
class GlobalHandles;
+
+namespace wasm {
+class StreamingDecoder;
+} // namespace wasm
} // namespace internal
namespace debug {
@@ -986,7 +990,7 @@ class V8_EXPORT Data {
* A container type that holds relevant metadata for module loading.
*
* This is passed back to the embedder as part of
- * HostImportDynamicallyCallback for module loading.
+ * HostImportModuleDynamicallyCallback for module loading.
*/
class V8_EXPORT ScriptOrModule {
public:
@@ -1011,7 +1015,7 @@ class V8_EXPORT ScriptOrModule {
* pass host defined options to the ScriptOptions during compilation.
*
* This is passed back to the embedder as part of
- * HostImportDynamicallyCallback for module loading.
+ * HostImportModuleDynamicallyCallback for module loading.
*
*/
class V8_EXPORT PrimitiveArray {
@@ -1432,6 +1436,7 @@ class V8_EXPORT ScriptCompiler {
kProduceParserCache,
kConsumeParserCache,
kProduceCodeCache,
+ kProduceFullCodeCache,
kConsumeCodeCache
};
@@ -1829,7 +1834,7 @@ class V8_EXPORT JSON {
* \return The corresponding string if successfully stringified.
*/
static V8_WARN_UNUSED_RESULT MaybeLocal<String> Stringify(
- Local<Context> context, Local<Object> json_object,
+ Local<Context> context, Local<Value> json_object,
Local<String> gap = Local<String>());
};
@@ -4264,6 +4269,7 @@ class V8_EXPORT WasmModuleObjectBuilderStreaming final {
#endif
std::vector<Buffer> received_buffers_;
size_t total_size_ = 0;
+ std::shared_ptr<internal::wasm::StreamingDecoder> streaming_decoder_;
};
class V8_EXPORT WasmModuleObjectBuilder final {
@@ -5176,6 +5182,8 @@ typedef void (*NamedPropertyDeleterCallback)(
/**
* Returns an array containing the names of the properties the named
* property getter intercepts.
+ *
+ * Note: The values in the array must be of type v8::Name.
*/
typedef void (*NamedPropertyEnumeratorCallback)(
const PropertyCallbackInfo<Array>& info);
@@ -5299,6 +5307,8 @@ typedef void (*GenericNamedPropertyDeleterCallback)(
/**
* Returns an array containing the names of the properties the named
* property getter intercepts.
+ *
+ * Note: The values in the array must be of type v8::Name.
*/
typedef void (*GenericNamedPropertyEnumeratorCallback)(
const PropertyCallbackInfo<Array>& info);
@@ -5379,7 +5389,10 @@ typedef void (*IndexedPropertyDeleterCallback)(
const PropertyCallbackInfo<Boolean>& info);
/**
- * See `v8::GenericNamedPropertyEnumeratorCallback`.
+ * Returns an array containing the indices of the properties the indexed
+ * property getter intercepts.
+ *
+ * Note: The values in the array must be uint32_t.
*/
typedef void (*IndexedPropertyEnumeratorCallback)(
const PropertyCallbackInfo<Array>& info);
@@ -6182,6 +6195,9 @@ typedef void (*FatalErrorCallback)(const char* location, const char* message);
typedef void (*OOMErrorCallback)(const char* location, bool is_heap_oom);
+typedef void (*DcheckErrorCallback)(const char* file, int line,
+ const char* message);
+
typedef void (*MessageCallback)(Local<Message> message, Local<Value> data);
// --- Tracing ---
@@ -6252,7 +6268,7 @@ typedef void (*CallCompletedCallback)(Isolate*);
typedef void (*DeprecatedCallCompletedCallback)();
/**
- * HostImportDynamicallyCallback is called when we require the
+ * HostImportModuleDynamicallyCallback is called when we require the
* embedder to load a module. This is used as part of the dynamic
* import syntax.
*
@@ -7032,6 +7048,7 @@ class V8_EXPORT Isolate {
kPromiseConstructorReturnedUndefined = 38,
kConstructorNonUndefinedPrimitiveReturn = 39,
kLabeledExpressionStatement = 40,
+ kLineOrParagraphSeparatorAsLineTerminator = 41,
// If you add new values here, you'll also need to update Chromium's:
// UseCounter.h, V8PerIsolateData.cpp, histograms.xml
@@ -7887,6 +7904,9 @@ class V8_EXPORT V8 {
static StartupData WarmUpSnapshotDataBlob(StartupData cold_startup_blob,
const char* warmup_source);
+ /** Set the callback to invoke in case of Dcheck failures. */
+ static void SetDcheckErrorHandler(DcheckErrorCallback that);
+
/**
* Adds a message listener.
*
@@ -7956,9 +7976,8 @@ class V8_EXPORT V8 {
* This function removes callback which was installed by
* AddGCPrologueCallback function.
*/
- V8_INLINE static V8_DEPRECATED(
- "Use isolate version",
- void RemoveGCPrologueCallback(GCCallback callback));
+ static V8_DEPRECATED("Use isolate version",
+ void RemoveGCPrologueCallback(GCCallback callback));
/**
* Enables the host application to receive a notification after a
@@ -7979,9 +7998,8 @@ class V8_EXPORT V8 {
* This function removes callback which was installed by
* AddGCEpilogueCallback function.
*/
- V8_INLINE static V8_DEPRECATED(
- "Use isolate version",
- void RemoveGCEpilogueCallback(GCCallback callback));
+ static V8_DEPRECATED("Use isolate version",
+ void RemoveGCEpilogueCallback(GCCallback callback));
/**
* Initializes V8. This function needs to be called before the first Isolate
@@ -9069,11 +9087,11 @@ class Internals {
static const int kNodeIsIndependentShift = 3;
static const int kNodeIsActiveShift = 4;
- static const int kJSApiObjectType = 0xbd;
- static const int kJSObjectType = 0xbe;
static const int kFirstNonstringType = 0x80;
- static const int kOddballType = 0x82;
- static const int kForeignType = 0x86;
+ static const int kOddballType = 0x83;
+ static const int kForeignType = 0x87;
+ static const int kJSApiObjectType = 0xbf;
+ static const int kJSObjectType = 0xc0;
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
@@ -10365,19 +10383,6 @@ void V8::SetFatalErrorHandler(FatalErrorCallback callback) {
isolate->SetFatalErrorHandler(callback);
}
-void V8::RemoveGCPrologueCallback(GCCallback callback) {
- Isolate* isolate = Isolate::GetCurrent();
- isolate->RemoveGCPrologueCallback(
- reinterpret_cast<Isolate::GCCallback>(callback));
-}
-
-
-void V8::RemoveGCEpilogueCallback(GCCallback callback) {
- Isolate* isolate = Isolate::GetCurrent();
- isolate->RemoveGCEpilogueCallback(
- reinterpret_cast<Isolate::GCCallback>(callback));
-}
-
void V8::TerminateExecution(Isolate* isolate) { isolate->TerminateExecution(); }
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index 62bae4bfdc..3cbcddc073 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -104,7 +104,7 @@
'V8 Linux - predictable': 'gn_release_x86_predictable',
'V8 Linux - full debug': 'gn_full_debug_x86',
'V8 Linux - interpreted regexp': 'gn_release_x86_interpreted_regexp',
- 'V8 Random Deopt Fuzzer - debug': 'gn_debug_x86',
+ 'V8 Random Deopt Fuzzer - debug': 'gn_debug_x64',
},
'client.v8.clusterfuzz': {
'V8 Mac64 ASAN - release builder':
diff --git a/deps/v8/samples/hello-world.cc b/deps/v8/samples/hello-world.cc
index 8a2122c96b..9d8058da41 100644
--- a/deps/v8/samples/hello-world.cc
+++ b/deps/v8/samples/hello-world.cc
@@ -9,53 +9,53 @@
#include "include/libplatform/libplatform.h"
#include "include/v8.h"
-using namespace v8;
-
int main(int argc, char* argv[]) {
// Initialize V8.
- V8::InitializeICUDefaultLocation(argv[0]);
- V8::InitializeExternalStartupData(argv[0]);
- Platform* platform = platform::CreateDefaultPlatform();
- V8::InitializePlatform(platform);
- V8::Initialize();
+ v8::V8::InitializeICUDefaultLocation(argv[0]);
+ v8::V8::InitializeExternalStartupData(argv[0]);
+ v8::Platform* platform = v8::platform::CreateDefaultPlatform();
+ v8::V8::InitializePlatform(platform);
+ v8::V8::Initialize();
// Create a new Isolate and make it the current one.
- Isolate::CreateParams create_params;
+ v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator =
v8::ArrayBuffer::Allocator::NewDefaultAllocator();
- Isolate* isolate = Isolate::New(create_params);
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
{
- Isolate::Scope isolate_scope(isolate);
+ v8::Isolate::Scope isolate_scope(isolate);
// Create a stack-allocated handle scope.
- HandleScope handle_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
// Create a new context.
- Local<Context> context = Context::New(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
// Enter the context for compiling and running the hello world script.
- Context::Scope context_scope(context);
+ v8::Context::Scope context_scope(context);
// Create a string containing the JavaScript source code.
- Local<String> source =
- String::NewFromUtf8(isolate, "'Hello' + ', World!'",
- NewStringType::kNormal).ToLocalChecked();
+ v8::Local<v8::String> source =
+ v8::String::NewFromUtf8(isolate, "'Hello' + ', World!'",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
// Compile the source code.
- Local<Script> script = Script::Compile(context, source).ToLocalChecked();
+ v8::Local<v8::Script> script =
+ v8::Script::Compile(context, source).ToLocalChecked();
// Run the script to get the result.
- Local<Value> result = script->Run(context).ToLocalChecked();
+ v8::Local<v8::Value> result = script->Run(context).ToLocalChecked();
// Convert the result to an UTF8 string and print it.
- String::Utf8Value utf8(isolate, result);
+ v8::String::Utf8Value utf8(isolate, result);
printf("%s\n", *utf8);
}
// Dispose the isolate and tear down V8.
isolate->Dispose();
- V8::Dispose();
- V8::ShutdownPlatform();
+ v8::V8::Dispose();
+ v8::V8::ShutdownPlatform();
delete platform;
delete create_params.array_buffer_allocator;
return 0;
diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc
index 5ebe1dbfc4..7ee85a84f9 100644
--- a/deps/v8/samples/process.cc
+++ b/deps/v8/samples/process.cc
@@ -35,8 +35,30 @@
#include <map>
#include <string>
-using namespace std;
-using namespace v8;
+using std::map;
+using std::pair;
+using std::string;
+
+using v8::Context;
+using v8::EscapableHandleScope;
+using v8::External;
+using v8::Function;
+using v8::FunctionTemplate;
+using v8::Global;
+using v8::HandleScope;
+using v8::Isolate;
+using v8::Local;
+using v8::MaybeLocal;
+using v8::Name;
+using v8::NamedPropertyHandlerConfiguration;
+using v8::NewStringType;
+using v8::Object;
+using v8::ObjectTemplate;
+using v8::PropertyCallbackInfo;
+using v8::Script;
+using v8::String;
+using v8::TryCatch;
+using v8::Value;
// These interfaces represent an existing request processing interface.
// The idea is to imagine a real application that uses these interfaces
diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS
index 83a275c80f..44e4dc517a 100644
--- a/deps/v8/src/OWNERS
+++ b/deps/v8/src/OWNERS
@@ -4,4 +4,4 @@ per-file intl.*=jshin@chromium.org
per-file typing-asm.*=aseemgarg@chromium.org
per-file typing-asm.*=bradnelson@chromium.org
-# COMPONENT: Blink>JavaScript>Runtime
+# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index ab5d11adb6..fd991f5167 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -727,12 +727,11 @@ void Accessors::FunctionLengthGetter(
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
- Handle<Object> result;
- if (!JSFunction::GetLength(isolate, function).ToHandle(&result)) {
- result = handle(Smi::kZero, isolate);
+ int length = 0;
+ if (!JSFunction::GetLength(isolate, function).To(&length)) {
isolate->OptionalRescheduleException(false);
}
-
+ Handle<Object> result(Smi::FromInt(length), isolate);
info.GetReturnValue().Set(Utils::ToLocal(result));
}
@@ -851,7 +850,10 @@ Handle<Object> GetFunctionArguments(Isolate* isolate,
}
// Find the frame that holds the actual arguments passed to the function.
- it.AdvanceToArgumentsFrame();
+ if (it.frame()->has_adapted_arguments()) {
+ it.AdvanceOneFrame();
+ DCHECK(it.frame()->is_arguments_adaptor());
+ }
frame = it.frame();
// Get the number of arguments and construct an arguments object
@@ -930,47 +932,99 @@ static inline bool AllowAccessToFunction(Context* current_context,
class FrameFunctionIterator {
public:
explicit FrameFunctionIterator(Isolate* isolate)
- : isolate_(isolate), frame_iterator_(isolate) {
+ : isolate_(isolate), frame_iterator_(isolate), inlined_frame_index_(-1) {
GetFrames();
}
- MaybeHandle<JSFunction> next() {
- while (true) {
- if (frames_.empty()) return MaybeHandle<JSFunction>();
- Handle<JSFunction> next_function =
- frames_.back().AsJavaScript().function();
- frames_.pop_back();
- if (frames_.empty()) {
- GetFrames();
- }
- // Skip functions from other origins.
- if (!AllowAccessToFunction(isolate_->context(), *next_function)) continue;
- return next_function;
- }
- }
// Iterate through functions until the first occurrence of 'function'.
- // Returns true if 'function' is found, and false if the iterator ends
- // without finding it.
+ // Returns true if one is found, and false if the iterator ends before.
bool Find(Handle<JSFunction> function) {
- Handle<JSFunction> next_function;
do {
- if (!next().ToHandle(&next_function)) return false;
- } while (!next_function.is_identical_to(function));
+ if (!next().ToHandle(&function_)) return false;
+ } while (!function_.is_identical_to(function));
+ return true;
+ }
+
+ // Iterate through functions until the next non-toplevel one is found.
+ // Returns true if one is found, and false if the iterator ends before.
+ bool FindNextNonTopLevel() {
+ do {
+ if (!next().ToHandle(&function_)) return false;
+ } while (function_->shared()->is_toplevel());
return true;
}
+ // Iterate through function until the first native or user-provided function
+ // is found. Functions not defined in user-provided scripts are not visible
+ // unless directly exposed, in which case the native flag is set on them.
+ // Returns true if one is found, and false if the iterator ends before.
+ bool FindFirstNativeOrUserJavaScript() {
+ while (!function_->shared()->native() &&
+ !function_->shared()->IsUserJavaScript()) {
+ if (!next().ToHandle(&function_)) return false;
+ }
+ return true;
+ }
+
+ // In case of inlined frames the function could have been materialized from
+ // deoptimization information. If that is the case we need to make sure that
+ // subsequent call will see the same function, since we are about to hand out
+ // the value to JavaScript. Make sure to store the materialized value and
+ // trigger a deoptimization of the underlying frame.
+ Handle<JSFunction> MaterializeFunction() {
+ if (inlined_frame_index_ == 0) return function_;
+
+ JavaScriptFrame* frame = frame_iterator_.frame();
+ TranslatedState translated_values(frame);
+ translated_values.Prepare(frame->fp());
+
+ TranslatedFrame* translated_frame =
+ translated_values.GetFrameFromJSFrameIndex(inlined_frame_index_);
+ TranslatedFrame::iterator iter = translated_frame->begin();
+
+ // First value is the function.
+ bool should_deoptimize = iter->IsMaterializedObject();
+ Handle<Object> value = iter->GetValue();
+ if (should_deoptimize) {
+ translated_values.StoreMaterializedValuesAndDeopt(frame);
+ }
+
+ return Handle<JSFunction>::cast(value);
+ }
+
private:
+ MaybeHandle<JSFunction> next() {
+ while (true) {
+ inlined_frame_index_--;
+ if (inlined_frame_index_ == -1) {
+ if (!frame_iterator_.done()) {
+ frame_iterator_.Advance();
+ frames_.clear();
+ GetFrames();
+ }
+ if (inlined_frame_index_ == -1) return MaybeHandle<JSFunction>();
+ inlined_frame_index_--;
+ }
+ Handle<JSFunction> next_function =
+ frames_[inlined_frame_index_].AsJavaScript().function();
+ // Skip functions from other origins.
+ if (!AllowAccessToFunction(isolate_->context(), *next_function)) continue;
+ return next_function;
+ }
+ }
void GetFrames() {
- DCHECK(frames_.empty());
+ DCHECK_EQ(-1, inlined_frame_index_);
if (frame_iterator_.done()) return;
JavaScriptFrame* frame = frame_iterator_.frame();
frame->Summarize(&frames_);
- DCHECK(!frames_.empty());
- frame_iterator_.Advance();
+ inlined_frame_index_ = static_cast<int>(frames_.size());
+ DCHECK_LT(0, inlined_frame_index_);
}
Isolate* isolate_;
+ Handle<JSFunction> function_;
JavaScriptFrameIterator frame_iterator_;
std::vector<FrameSummary> frames_;
+ int inlined_frame_index_;
};
@@ -980,28 +1034,27 @@ MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
if (function->shared()->native()) {
return MaybeHandle<JSFunction>();
}
- // Find the function from the frames.
+ // Find the function from the frames. Return null in case no frame
+ // corresponding to the given function was found.
if (!it.Find(function)) {
- // No frame corresponding to the given function found. Return null.
return MaybeHandle<JSFunction>();
}
// Find previously called non-toplevel function.
- Handle<JSFunction> caller;
- do {
- if (!it.next().ToHandle(&caller)) return MaybeHandle<JSFunction>();
- } while (caller->shared()->is_toplevel());
-
- // If caller is not user code and caller's caller is also not user code,
- // use that instead.
- MaybeHandle<JSFunction> potential_caller = caller;
- while (!potential_caller.is_null() &&
- !potential_caller.ToHandleChecked()->shared()->IsUserJavaScript()) {
- caller = potential_caller.ToHandleChecked();
- potential_caller = it.next();
+ if (!it.FindNextNonTopLevel()) {
+ return MaybeHandle<JSFunction>();
}
- if (!caller->shared()->native() && !potential_caller.is_null()) {
- caller = potential_caller.ToHandleChecked();
+ // Find the first user-land JavaScript function (or the entry point into
+ // native JavaScript builtins in case such a builtin was the caller).
+ if (!it.FindFirstNativeOrUserJavaScript()) {
+ return MaybeHandle<JSFunction>();
}
+
+ // Materialize the function that the iterator is currently sitting on. Note
+ // that this might trigger deoptimization in case the function was actually
+ // materialized. Identity of the function must be preserved because we are
+ // going to return it to JavaScript after this point.
+ Handle<JSFunction> caller = it.MaterializeFunction();
+
// Censor if the caller is not a sloppy mode function.
// Change from ES5, which used to throw, see:
// https://bugs.ecmascript.org/show_bug.cgi?id=310
@@ -1056,18 +1109,11 @@ void Accessors::BoundFunctionLengthGetter(
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
- Handle<Smi> target_length;
- Handle<JSFunction> target(JSFunction::cast(function->bound_target_function()),
- isolate);
- if (!JSFunction::GetLength(isolate, target).ToHandle(&target_length)) {
- target_length = handle(Smi::kZero, isolate);
+ int length = 0;
+ if (!JSBoundFunction::GetLength(isolate, function).To(&length)) {
isolate->OptionalRescheduleException(false);
return;
}
-
- int bound_length = function->bound_arguments()->length();
- int length = Max(0, target_length->value() - bound_length);
-
Handle<Object> result(Smi::FromInt(length), isolate);
info.GetReturnValue().Set(Utils::ToLocal(result));
}
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index d64a476b0a..4753d2d855 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -6,8 +6,11 @@
#include <stdlib.h> // For free, malloc.
#include "src/base/bits.h"
+#include "src/base/lazy-instance.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/flags.h"
#include "src/utils.h"
#include "src/v8.h"
@@ -15,6 +18,10 @@
#include <malloc.h> // NOLINT
#endif
+#if defined(LEAK_SANITIZER)
+#include <sanitizer/lsan_interface.h>
+#endif
+
namespace v8 {
namespace internal {
@@ -99,32 +106,218 @@ void AlignedFree(void *ptr) {
#endif
}
-bool AllocVirtualMemory(size_t size, void* hint, base::VirtualMemory* result) {
- base::VirtualMemory first_try(size, hint);
+VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
+
+VirtualMemory::VirtualMemory(size_t size, void* hint)
+ : address_(base::OS::ReserveRegion(size, hint)), size_(size) {
+#if defined(LEAK_SANITIZER)
+ __lsan_register_root_region(address_, size_);
+#endif
+}
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
+ : address_(nullptr), size_(0) {
+ address_ = base::OS::ReserveAlignedRegion(size, alignment, hint, &size_);
+#if defined(LEAK_SANITIZER)
+ __lsan_register_root_region(address_, size_);
+#endif
+}
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = base::OS::ReleaseRegion(address(), size());
+ DCHECK(result);
+ USE(result);
+ }
+}
+
+void VirtualMemory::Reset() {
+ address_ = nullptr;
+ size_ = 0;
+}
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ CHECK(InVM(address, size));
+ return base::OS::CommitRegion(address, size, is_executable);
+}
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ CHECK(InVM(address, size));
+ return base::OS::UncommitRegion(address, size);
+}
+
+bool VirtualMemory::Guard(void* address) {
+ CHECK(InVM(address, base::OS::CommitPageSize()));
+ base::OS::Guard(address, base::OS::CommitPageSize());
+ return true;
+}
+
+size_t VirtualMemory::ReleasePartial(void* free_start) {
+ DCHECK(IsReserved());
+ // Notice: Order is important here. The VirtualMemory object might live
+ // inside the allocated region.
+ const size_t free_size = size_ - (reinterpret_cast<size_t>(free_start) -
+ reinterpret_cast<size_t>(address_));
+ CHECK(InVM(free_start, free_size));
+ DCHECK_LT(address_, free_start);
+ DCHECK_LT(free_start, reinterpret_cast<void*>(
+ reinterpret_cast<size_t>(address_) + size_));
+#if defined(LEAK_SANITIZER)
+ __lsan_unregister_root_region(address_, size_);
+ __lsan_register_root_region(address_, size_ - free_size);
+#endif
+ const bool result = base::OS::ReleasePartialRegion(free_start, free_size);
+ USE(result);
+ DCHECK(result);
+ size_ -= free_size;
+ return free_size;
+}
+
+void VirtualMemory::Release() {
+ DCHECK(IsReserved());
+ // Notice: Order is important here. The VirtualMemory object might live
+ // inside the allocated region.
+ void* address = address_;
+ size_t size = size_;
+ CHECK(InVM(address, size));
+ Reset();
+ bool result = base::OS::ReleaseRegion(address, size);
+ USE(result);
+ DCHECK(result);
+}
+
+void VirtualMemory::TakeControl(VirtualMemory* from) {
+ DCHECK(!IsReserved());
+ address_ = from->address_;
+ size_ = from->size_;
+ from->Reset();
+}
+
+bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
+ VirtualMemory first_try(size, hint);
if (first_try.IsReserved()) {
result->TakeControl(&first_try);
return true;
}
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
- base::VirtualMemory second_try(size, hint);
+ VirtualMemory second_try(size, hint);
result->TakeControl(&second_try);
return result->IsReserved();
}
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
- base::VirtualMemory* result) {
- base::VirtualMemory first_try(size, alignment, hint);
+ VirtualMemory* result) {
+ VirtualMemory first_try(size, alignment, hint);
if (first_try.IsReserved()) {
result->TakeControl(&first_try);
return true;
}
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
- base::VirtualMemory second_try(size, alignment, hint);
+ VirtualMemory second_try(size, alignment, hint);
result->TakeControl(&second_try);
return result->IsReserved();
}
+namespace {
+
+struct RNGInitializer {
+ static void Construct(void* mem) {
+ auto rng = new (mem) base::RandomNumberGenerator();
+ int64_t random_seed = FLAG_random_seed;
+ if (random_seed) {
+ rng->SetSeed(random_seed);
+ }
+ }
+};
+
+} // namespace
+
+static base::LazyInstance<base::RandomNumberGenerator, RNGInitializer>::type
+ random_number_generator = LAZY_INSTANCE_INITIALIZER;
+
+void* GetRandomMmapAddr() {
+#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
+ defined(THREAD_SANITIZER)
+ // Dynamic tools do not support custom mmap addresses.
+ return NULL;
+#endif
+ uintptr_t raw_addr;
+ random_number_generator.Pointer()->NextBytes(&raw_addr, sizeof(raw_addr));
+#if V8_OS_POSIX
+#if V8_TARGET_ARCH_X64
+ // Currently available CPUs have 48 bits of virtual addressing. Truncate
+ // the hint address to 46 bits to give the kernel a fighting chance of
+ // fulfilling our placement request.
+ raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#elif V8_TARGET_ARCH_PPC64
+#if V8_OS_AIX
+ // AIX: 64 bits of virtual addressing, but we limit address range to:
+ // a) minimize Segment Lookaside Buffer (SLB) misses and
+ raw_addr &= V8_UINT64_C(0x3ffff000);
+ // Use extra address space to isolate the mmap regions.
+ raw_addr += V8_UINT64_C(0x400000000000);
+#elif V8_TARGET_BIG_ENDIAN
+ // Big-endian Linux: 44 bits of virtual addressing.
+ raw_addr &= V8_UINT64_C(0x03fffffff000);
+#else
+ // Little-endian Linux: 48 bits of virtual addressing.
+ raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#endif
+#elif V8_TARGET_ARCH_S390X
+ // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
+ // of virtual addressing. Truncate to 40 bits to allow kernel chance to
+ // fulfill request.
+ raw_addr &= V8_UINT64_C(0xfffffff000);
+#elif V8_TARGET_ARCH_S390
+ // 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
+ // to fulfill request.
+ raw_addr &= 0x1ffff000;
+#else
+ raw_addr &= 0x3ffff000;
+
+#ifdef __sun
+ // For our Solaris/illumos mmap hint, we pick a random address in the bottom
+ // half of the top half of the address space (that is, the third quarter).
+ // Because we do not MAP_FIXED, this will be treated only as a hint -- the
+ // system will not fail to mmap() because something else happens to already
+ // be mapped at our random address. We deliberately set the hint high enough
+ // to get well above the system's break (that is, the heap); Solaris and
+ // illumos will try the hint and if that fails allocate as if there were
+ // no hint at all. The high hint prevents the break from getting hemmed in
+ // at low values, ceding half of the address space to the system heap.
+ raw_addr += 0x80000000;
+#elif V8_OS_AIX
+ // The range 0x30000000 - 0xD0000000 is available on AIX;
+ // choose the upper range.
+ raw_addr += 0x90000000;
+#else
+ // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+ // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
+ // 10.6 and 10.7.
+ raw_addr += 0x20000000;
+#endif
+#endif
+#else // V8_OS_WIN
+// The address range used to randomize RWX allocations in OS::Allocate
+// Try not to map pages into the default range that windows loads DLLs
+// Use a multiple of 64k to prevent committing unused memory.
+// Note: This does not guarantee RWX regions will be within the
+// range kAllocationRandomAddressMin to kAllocationRandomAddressMax
+#ifdef V8_HOST_ARCH_64_BIT
+ static const uintptr_t kAllocationRandomAddressMin = 0x0000000080000000;
+ static const uintptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
+#else
+ static const uintptr_t kAllocationRandomAddressMin = 0x04000000;
+ static const uintptr_t kAllocationRandomAddressMax = 0x3FFF0000;
+#endif
+ raw_addr <<= kPageSizeBits;
+ raw_addr += kAllocationRandomAddressMin;
+ raw_addr &= kAllocationRandomAddressMax;
+#endif // V8_OS_WIN
+ return reinterpret_cast<void*>(raw_addr);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index e0e147bb7c..a78db1a881 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -43,6 +43,14 @@ T* NewArray(size_t size) {
return result;
}
+template <typename T,
+ typename = typename std::enable_if<IS_TRIVIALLY_COPYABLE(T)>::type>
+T* NewArray(size_t size, T default_val) {
+ T* result = reinterpret_cast<T*>(NewArray<uint8_t>(sizeof(T) * size));
+ for (size_t i = 0; i < size; ++i) result[i] = default_val;
+ return result;
+}
+
template <typename T>
void DeleteArray(T* array) {
delete[] array;
@@ -68,9 +76,91 @@ class FreeStoreAllocationPolicy {
void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void *ptr);
-bool AllocVirtualMemory(size_t size, void* hint, base::VirtualMemory* result);
+// Represents and controls an area of reserved memory.
+class V8_EXPORT_PRIVATE VirtualMemory {
+ public:
+ // Empty VirtualMemory object, controlling no reserved memory.
+ VirtualMemory();
+
+ // Reserves virtual memory with size.
+ explicit VirtualMemory(size_t size, void* hint);
+
+ // Reserves virtual memory containing an area of the given size that
+ // is aligned per alignment. This may not be at the position returned
+ // by address().
+ VirtualMemory(size_t size, size_t alignment, void* hint);
+
+ // Construct a virtual memory by assigning it some already mapped address
+ // and size.
+ VirtualMemory(void* address, size_t size) : address_(address), size_(size) {}
+
+ // Releases the reserved memory, if any, controlled by this VirtualMemory
+ // object.
+ ~VirtualMemory();
+
+ // Returns whether the memory has been reserved.
+ bool IsReserved() const { return address_ != nullptr; }
+
+ // Initialize or resets an embedded VirtualMemory object.
+ void Reset();
+
+ // Returns the start address of the reserved memory.
+ // If the memory was reserved with an alignment, this address is not
+ // necessarily aligned. The user might need to round it up to a multiple of
+ // the alignment to get the start of the aligned block.
+ void* address() const {
+ DCHECK(IsReserved());
+ return address_;
+ }
+
+ void* end() const {
+ DCHECK(IsReserved());
+ return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address_) +
+ size_);
+ }
+
+ // Returns the size of the reserved memory. The returned value is only
+ // meaningful when IsReserved() returns true.
+ // If the memory was reserved with an alignment, this size may be larger
+ // than the requested size.
+ size_t size() const { return size_; }
+
+ // Commits real memory. Returns whether the operation succeeded.
+ bool Commit(void* address, size_t size, bool is_executable);
+
+ // Uncommit real memory. Returns whether the operation succeeded.
+ bool Uncommit(void* address, size_t size);
+
+ // Creates a single guard page at the given address.
+ bool Guard(void* address);
+
+ // Releases the memory after |free_start|. Returns the bytes released.
+ size_t ReleasePartial(void* free_start);
+
+ void Release();
+
+ // Assign control of the reserved region to a different VirtualMemory object.
+ // The old object is no longer functional (IsReserved() returns false).
+ void TakeControl(VirtualMemory* from);
+
+ bool InVM(void* address, size_t size) {
+ return (reinterpret_cast<uintptr_t>(address_) <=
+ reinterpret_cast<uintptr_t>(address)) &&
+ ((reinterpret_cast<uintptr_t>(address_) + size_) >=
+ (reinterpret_cast<uintptr_t>(address) + size));
+ }
+
+ private:
+ void* address_; // Start address of the virtual memory.
+ size_t size_; // Size of the virtual memory.
+};
+
+bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result);
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
- base::VirtualMemory* result);
+ VirtualMemory* result);
+
+// Generate a random address to be used for hinting mmap().
+V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
index ca7b4833e9..50baed4ab7 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api-arguments.h
@@ -63,6 +63,8 @@ Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
return result;
}
+// Note: Calling args.Call() sets the return value on args. For multiple
+// Call()'s, a new args should be used every time.
class PropertyCallbackArguments
: public CustomArguments<PropertyCallbackInfo<Value> > {
public:
@@ -97,14 +99,14 @@ class PropertyCallbackArguments
DCHECK(values[T::kIsolateIndex]->IsSmi());
}
-/*
- * The following Call functions wrap the calling of all callbacks to handle
- * calling either the old or the new style callbacks depending on which one
- * has been registered.
- * For old callbacks which return an empty handle, the ReturnValue is checked
- * and used if it's been set to anything inside the callback.
- * New style callbacks always use the return value.
- */
+ /*
+ * The following Call functions wrap the calling of all callbacks to handle
+ * calling either the old or the new style callbacks depending on which one
+ * has been registered.
+ * For old callbacks which return an empty handle, the ReturnValue is checked
+ * and used if it's been set to anything inside the callback.
+ * New style callbacks always use the return value.
+ */
Handle<JSObject> Call(IndexedPropertyEnumeratorCallback f);
inline Handle<Object> Call(AccessorNameGetterCallback f, Handle<Name> name);
@@ -139,6 +141,10 @@ class PropertyCallbackArguments
}
bool PerformSideEffectCheck(Isolate* isolate, Address function);
+
+ // Don't copy PropertyCallbackArguments, because they would both have the
+ // same prev_ pointer.
+ DISALLOW_COPY_AND_ASSIGN(PropertyCallbackArguments);
};
class FunctionCallbackArguments
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index 8bd9431d20..35759459c6 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -556,7 +556,7 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
Handle<Map> object_map = isolate->factory()->NewMap(
JS_SPECIAL_API_OBJECT_TYPE,
JSObject::kHeaderSize + data->embedder_field_count() * kPointerSize,
- HOLEY_SMI_ELEMENTS);
+ TERMINAL_FAST_ELEMENTS_KIND);
object_map->SetConstructor(*constructor);
object_map->set_is_access_check_needed(true);
object_map->set_may_have_interesting_symbols(true);
@@ -689,12 +689,11 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
break;
default:
UNREACHABLE();
- type = JS_OBJECT_TYPE; // Keep the compiler happy.
break;
}
- Handle<Map> map =
- isolate->factory()->NewMap(type, instance_size, HOLEY_SMI_ELEMENTS);
+ Handle<Map> map = isolate->factory()->NewMap(type, instance_size,
+ TERMINAL_FAST_ELEMENTS_KIND);
JSFunction::SetInitialMap(result, map, Handle<JSObject>::cast(prototype));
// Mark as undetectable if needed.
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index a64e43de2c..300b88f70e 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -8,6 +8,9 @@
#ifdef V8_USE_ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif // V8_USE_ADDRESS_SANITIZER
+#if defined(LEAK_SANITIZER)
+#include <sanitizer/lsan_interface.h>
+#endif // defined(LEAK_SANITIZER)
#include <cmath> // For isnan.
#include <limits>
#include <vector>
@@ -20,6 +23,7 @@
#include "src/assert-scope.h"
#include "src/background-parsing-task.h"
#include "src/base/functional.h"
+#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/base/safe_conversions.h"
@@ -34,8 +38,10 @@
#include "src/conversions-inl.h"
#include "src/counters.h"
#include "src/debug/debug-coverage.h"
+#include "src/debug/debug-type-profile.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
+#include "src/detachable-vector.h"
#include "src/execution.h"
#include "src/frames-inl.h"
#include "src/gdb-jit.h"
@@ -62,20 +68,23 @@
#include "src/runtime-profiler.h"
#include "src/runtime/runtime.h"
#include "src/simulator.h"
+#include "src/snapshot/builtin-serializer.h"
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
#include "src/startup-data-util.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/unicode-cache-inl.h"
#include "src/unicode-inl.h"
#include "src/v8.h"
#include "src/v8threads.h"
#include "src/value-serializer.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
-#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/compilation-manager.h"
+#include "src/wasm/streaming-decoder.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
namespace v8 {
@@ -480,8 +489,11 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
virtual void Free(void* data, size_t) { free(data); }
virtual void* Reserve(size_t length) {
- return base::VirtualMemory::ReserveRegion(length,
- base::OS::GetRandomMmapAddr());
+ void* address = base::OS::ReserveRegion(length, i::GetRandomMmapAddr());
+#if defined(LEAK_SANITIZER)
+ __lsan_register_root_region(address, length);
+#endif
+ return address;
}
virtual void Free(void* data, size_t length,
@@ -491,7 +503,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return Free(data, length);
}
case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
- base::VirtualMemory::ReleaseRegion(data, length);
+ base::OS::ReleaseRegion(data, length);
return;
}
}
@@ -502,12 +514,11 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
v8::ArrayBuffer::Allocator::Protection protection) {
switch (protection) {
case v8::ArrayBuffer::Allocator::Protection::kNoAccess: {
- base::VirtualMemory::UncommitRegion(data, length);
+ base::OS::Guard(data, length);
return;
}
case v8::ArrayBuffer::Allocator::Protection::kReadWrite: {
- const bool is_executable = false;
- base::VirtualMemory::CommitRegion(data, length, is_executable);
+ base::OS::Unprotect(data, length);
return;
}
}
@@ -680,7 +691,8 @@ StartupData SnapshotCreator::CreateBlob(
i::DisallowHeapAllocation no_gc_from_here_on;
- i::List<i::Object*> contexts(num_additional_contexts);
+ std::vector<i::Object*> contexts;
+ contexts.reserve(num_additional_contexts);
i::Object* default_context;
{
i::HandleScope scope(isolate);
@@ -690,7 +702,7 @@ StartupData SnapshotCreator::CreateBlob(
for (int i = 0; i < num_additional_contexts; i++) {
i::Handle<i::Context> context =
v8::Utils::OpenHandle(*data->contexts_.Get(i));
- contexts.Add(*context);
+ contexts.push_back(*context);
}
data->contexts_.Clear();
}
@@ -732,12 +744,19 @@ StartupData SnapshotCreator::CreateBlob(
context_snapshots.push_back(new i::SnapshotData(&partial_serializer));
}
+ // Builtin serialization places additional objects into the partial snapshot
+ // cache and thus needs to happen before SerializeWeakReferencesAndDeferred
+ // is called below.
+ i::BuiltinSerializer builtin_serializer(isolate, &startup_serializer);
+ builtin_serializer.SerializeBuiltins();
+
startup_serializer.SerializeWeakReferencesAndDeferred();
can_be_rehashed = can_be_rehashed && startup_serializer.can_be_rehashed();
i::SnapshotData startup_snapshot(&startup_serializer);
+ i::BuiltinSnapshotData builtin_snapshot(&builtin_serializer);
StartupData result = i::Snapshot::CreateSnapshotBlob(
- &startup_snapshot, context_snapshots, can_be_rehashed);
+ &startup_snapshot, &builtin_snapshot, context_snapshots, can_be_rehashed);
// Delete heap-allocated context snapshot instances.
for (const auto context_snapshot : context_snapshots) {
@@ -818,6 +837,9 @@ StartupData V8::WarmUpSnapshotDataBlob(StartupData cold_snapshot_blob,
return result;
}
+void V8::SetDcheckErrorHandler(DcheckErrorCallback that) {
+ v8::base::SetDcheckFunction(that);
+}
void V8::SetFlagsFromString(const char* str, int length) {
i::FlagList::SetFlagsFromString(str, length);
@@ -1340,7 +1362,7 @@ static Local<FunctionTemplate> FunctionTemplateNew(
v8::Local<Signature> signature, int length, bool do_not_cache,
v8::Local<Private> cached_property_name = v8::Local<Private>()) {
i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
+ isolate->factory()->NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE, i::TENURED);
i::Handle<i::FunctionTemplateInfo> obj =
i::Handle<i::FunctionTemplateInfo>::cast(struct_obj);
InitializeFunctionTemplate(obj);
@@ -1434,7 +1456,7 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::TUPLE2_TYPE);
+ isolate->factory()->NewStruct(i::TUPLE2_TYPE, i::TENURED);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
SET_FIELD_WRAPPED(obj, set_callback, callback);
@@ -1583,7 +1605,7 @@ static Local<ObjectTemplate> ObjectTemplateNew(
LOG_API(isolate, ObjectTemplate, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::OBJECT_TEMPLATE_INFO_TYPE);
+ isolate->factory()->NewStruct(i::OBJECT_TEMPLATE_INFO_TYPE, i::TENURED);
i::Handle<i::ObjectTemplateInfo> obj =
i::Handle<i::ObjectTemplateInfo>::cast(struct_obj);
InitializeTemplate(obj, Consts::OBJECT_TEMPLATE);
@@ -1734,7 +1756,7 @@ static i::Handle<i::InterceptorInfo> CreateInterceptorInfo(
definer ==
nullptr); // Only use descriptor callback with definer callback.
auto obj = i::Handle<i::InterceptorInfo>::cast(
- isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE));
+ isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE, i::TENURED));
obj->set_flags(0);
if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
@@ -1812,7 +1834,7 @@ void ObjectTemplate::SetAccessCheckCallback(AccessCheckCallback callback,
EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetAccessCheckCallback");
i::Handle<i::Struct> struct_info =
- isolate->factory()->NewStruct(i::ACCESS_CHECK_INFO_TYPE);
+ isolate->factory()->NewStruct(i::ACCESS_CHECK_INFO_TYPE, i::TENURED);
i::Handle<i::AccessCheckInfo> info =
i::Handle<i::AccessCheckInfo>::cast(struct_info);
@@ -1842,7 +1864,7 @@ void ObjectTemplate::SetAccessCheckCallbackAndHandler(
cons, "v8::ObjectTemplate::SetAccessCheckCallbackWithHandler");
i::Handle<i::Struct> struct_info =
- isolate->factory()->NewStruct(i::ACCESS_CHECK_INFO_TYPE);
+ isolate->factory()->NewStruct(i::ACCESS_CHECK_INFO_TYPE, i::TENURED);
i::Handle<i::AccessCheckInfo> info =
i::Handle<i::AccessCheckInfo>::cast(struct_info);
@@ -1891,7 +1913,7 @@ void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
auto cons = EnsureConstructor(isolate, this);
EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetCallAsFunctionHandler");
i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::TUPLE2_TYPE);
+ isolate->factory()->NewStruct(i::TUPLE2_TYPE, i::TENURED);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
SET_FIELD_WRAPPED(obj, set_callback, callback);
@@ -2261,10 +2283,12 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
ENTER_V8_NO_SCRIPT(isolate, v8_isolate->GetCurrentContext(), ScriptCompiler,
CompileUnbound, MaybeLocal<UnboundScript>(),
InternalEscapableScope);
+ bool produce_cache = options == kProduceParserCache ||
+ options == kProduceCodeCache ||
+ options == kProduceFullCodeCache;
// Don't try to produce any kind of cache when the debugger is loaded.
- if (isolate->debug()->is_loaded() &&
- (options == kProduceParserCache || options == kProduceCodeCache)) {
+ if (isolate->debug()->is_loaded() && produce_cache) {
options = kNoCompileOptions;
}
@@ -2281,9 +2305,9 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
{
i::HistogramTimerScope total(isolate->counters()->compile_script(), true);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileScript");
- i::Handle<i::Object> name_obj;
- i::Handle<i::Object> source_map_url;
- i::Handle<i::FixedArray> host_defined_options =
+ i::MaybeHandle<i::Object> name_obj;
+ i::MaybeHandle<i::Object> source_map_url;
+ i::MaybeHandle<i::FixedArray> host_defined_options =
isolate->factory()->empty_fixed_array();
int line_offset = 0;
int column_offset = 0;
@@ -2303,11 +2327,12 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
if (!source->source_map_url.IsEmpty()) {
source_map_url = Utils::OpenHandle(*(source->source_map_url));
}
- result = i::Compiler::GetSharedFunctionInfoForScript(
- str, name_obj, line_offset, column_offset, source->resource_options,
- source_map_url, isolate->native_context(), NULL, &script_data, options,
- i::NOT_NATIVES_CODE, host_defined_options);
- has_pending_exception = result.is_null();
+ i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
+ i::Compiler::GetSharedFunctionInfoForScript(
+ str, name_obj, line_offset, column_offset, source->resource_options,
+ source_map_url, isolate->native_context(), NULL, &script_data,
+ options, i::NOT_NATIVES_CODE, host_defined_options);
+ has_pending_exception = !maybe_function_info.ToHandle(&result);
if (has_pending_exception && script_data != NULL) {
// This case won't happen during normal operation; we have compiled
// successfully and produced cached data, and but the second compilation
@@ -2317,8 +2342,7 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
}
RETURN_ON_FAILED_EXECUTION(UnboundScript);
- if ((options == kProduceParserCache || options == kProduceCodeCache) &&
- script_data != NULL) {
+ if (produce_cache && script_data != NULL) {
// script_data now contains the data that was generated. source will
// take the ownership.
source->cached_data = new CachedData(
@@ -3267,7 +3291,7 @@ Local<Value> JSON::Parse(Local<String> json_string) {
}
MaybeLocal<String> JSON::Stringify(Local<Context> context,
- Local<Object> json_object,
+ Local<Value> json_object,
Local<String> gap) {
PREPARE_FOR_EXECUTION(context, JSON, Stringify, String);
i::Handle<i::Object> object = Utils::OpenHandle(*json_object);
@@ -4492,8 +4516,6 @@ Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
v8::Local<Value> value,
v8::PropertyAttribute attributes) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
- ENTER_V8(isolate, context, Object, DefineOwnProperty, Nothing<bool>(),
- i::HandleScope);
i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
@@ -4503,11 +4525,25 @@ Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
desc.set_enumerable(!(attributes & v8::DontEnum));
desc.set_configurable(!(attributes & v8::DontDelete));
desc.set_value(value_obj);
- Maybe<bool> success = i::JSReceiver::DefineOwnProperty(
- isolate, self, key_obj, &desc, i::Object::DONT_THROW);
- // Even though we said DONT_THROW, there might be accessors that do throw.
- RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return success;
+
+ if (self->IsJSProxy()) {
+ ENTER_V8(isolate, context, Object, DefineOwnProperty, Nothing<bool>(),
+ i::HandleScope);
+ Maybe<bool> success = i::JSReceiver::DefineOwnProperty(
+ isolate, self, key_obj, &desc, i::Object::DONT_THROW);
+ // Even though we said DONT_THROW, there might be accessors that do throw.
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return success;
+ } else {
+ // If it's not a JSProxy, i::JSReceiver::DefineOwnProperty should never run
+ // a script.
+ ENTER_V8_NO_SCRIPT(isolate, context, Object, DefineOwnProperty,
+ Nothing<bool>(), i::HandleScope);
+ Maybe<bool> success = i::JSReceiver::DefineOwnProperty(
+ isolate, self, key_obj, &desc, i::Object::DONT_THROW);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return success;
+ }
}
Maybe<bool> v8::Object::DefineProperty(v8::Local<v8::Context> context,
@@ -4750,7 +4786,7 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context,
value = accumulator.GetKeys(i::GetKeysConversion::kKeepNumbers);
DCHECK(self->map()->EnumLength() == i::kInvalidEnumCacheSentinel ||
self->map()->EnumLength() == 0 ||
- self->map()->instance_descriptors()->GetEnumCache() != *value);
+ self->map()->instance_descriptors()->GetEnumCache()->keys() != *value);
auto result = isolate->factory()->NewJSArrayWithElements(value);
RETURN_ESCAPED(Utils::ToLocal(result));
}
@@ -4819,29 +4855,49 @@ Maybe<bool> v8::Object::SetIntegrityLevel(Local<Context> context,
Maybe<bool> v8::Object::Delete(Local<Context> context, Local<Value> key) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
- ENTER_V8(isolate, context, Object, Delete, Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
- Maybe<bool> result =
- i::Runtime::DeleteObjectProperty(isolate, self, key_obj, i::SLOPPY);
- has_pending_exception = result.IsNothing();
- RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return result;
+ if (self->IsJSProxy()) {
+ ENTER_V8(isolate, context, Object, Delete, Nothing<bool>(), i::HandleScope);
+ Maybe<bool> result =
+ i::Runtime::DeleteObjectProperty(isolate, self, key_obj, i::SLOPPY);
+ has_pending_exception = result.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return result;
+ } else {
+ // If it's not a JSProxy, i::Runtime::DeleteObjectProperty should never run
+ // a script.
+ ENTER_V8_NO_SCRIPT(isolate, context, Object, Delete, Nothing<bool>(),
+ i::HandleScope);
+ Maybe<bool> result =
+ i::Runtime::DeleteObjectProperty(isolate, self, key_obj, i::SLOPPY);
+ has_pending_exception = result.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return result;
+ }
}
-
bool v8::Object::Delete(v8::Local<Value> key) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return Delete(context, key).FromMaybe(false);
}
-
Maybe<bool> v8::Object::DeletePrivate(Local<Context> context,
Local<Private> key) {
- return Delete(context, Local<Value>(reinterpret_cast<Value*>(*key)));
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ // In case of private symbols, i::Runtime::DeleteObjectProperty does not run
+ // any author script.
+ ENTER_V8_NO_SCRIPT(isolate, context, Object, Delete, Nothing<bool>(),
+ i::HandleScope);
+ auto self = Utils::OpenHandle(this);
+ auto key_obj = Utils::OpenHandle(*key);
+ Maybe<bool> result =
+ i::Runtime::DeleteObjectProperty(isolate, self, key_obj, i::SLOPPY);
+ has_pending_exception = result.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return result;
}
-
Maybe<bool> v8::Object::Has(Local<Context> context, Local<Value> key) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8(isolate, context, Object, Has, Nothing<bool>(), i::HandleScope);
@@ -5239,6 +5295,7 @@ Local<v8::Context> v8::Object::CreationContext() {
int v8::Object::GetIdentityHash() {
+ i::DisallowHeapAllocation no_gc;
auto isolate = Utils::OpenHandle(this)->GetIsolate();
i::HandleScope scope(isolate);
auto self = Utils::OpenHandle(this);
@@ -7860,7 +7917,10 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::Compile(Isolate* isolate,
const uint8_t* start,
size_t length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::wasm::ErrorThrower thrower(i_isolate, "WasmCompiledModule::Deserialize()");
+ i::wasm::ErrorThrower thrower(i_isolate, "WasmCompiledModule::Compile()");
+ if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
+ return MaybeLocal<WasmCompiledModule>();
+ }
i::MaybeHandle<i::JSObject> maybe_compiled = i::wasm::SyncCompile(
i_isolate, &thrower, i::wasm::ModuleWireBytes(start, start + length));
if (maybe_compiled.is_null()) return MaybeLocal<WasmCompiledModule>();
@@ -7871,13 +7931,17 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::Compile(Isolate* isolate,
WasmModuleObjectBuilderStreaming::WasmModuleObjectBuilderStreaming(
Isolate* isolate)
: isolate_(isolate) {
- MaybeLocal<Promise::Resolver> maybe_promise =
+ MaybeLocal<Promise::Resolver> maybe_resolver =
Promise::Resolver::New(isolate->GetCurrentContext());
- Local<Promise::Resolver> promise;
- if (maybe_promise.ToLocal(&promise)) {
- promise_.Reset(isolate, promise->GetPromise());
- } else {
- UNREACHABLE();
+ Local<Promise::Resolver> resolver = maybe_resolver.ToLocalChecked();
+ promise_.Reset(isolate, resolver->GetPromise());
+
+ if (i::FLAG_wasm_stream_compilation) {
+ i::Handle<i::JSPromise> promise = Utils::OpenHandle(*GetPromise());
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ streaming_decoder_ =
+ i_isolate->wasm_compilation_manager()->StartStreamingCompilation(
+ i_isolate, handle(i_isolate->context()), promise);
}
}
@@ -7887,6 +7951,10 @@ Local<Promise> WasmModuleObjectBuilderStreaming::GetPromise() {
void WasmModuleObjectBuilderStreaming::OnBytesReceived(const uint8_t* bytes,
size_t size) {
+ if (i::FLAG_wasm_stream_compilation) {
+ streaming_decoder_->OnBytesReceived(i::Vector<const uint8_t>(bytes, size));
+ return;
+ }
std::unique_ptr<uint8_t[]> cloned_bytes(new uint8_t[size]);
memcpy(cloned_bytes.get(), bytes, size);
received_buffers_.push_back(
@@ -7897,6 +7965,10 @@ void WasmModuleObjectBuilderStreaming::OnBytesReceived(const uint8_t* bytes,
}
void WasmModuleObjectBuilderStreaming::Finish() {
+ if (i::FLAG_wasm_stream_compilation) {
+ streaming_decoder_->Finish();
+ return;
+ }
std::unique_ptr<uint8_t[]> wire_bytes(new uint8_t[total_size_]);
uint8_t* insert_at = wire_bytes.get();
@@ -7913,8 +7985,13 @@ void WasmModuleObjectBuilderStreaming::Finish() {
}
void WasmModuleObjectBuilderStreaming::Abort(Local<Value> exception) {
- Local<Promise::Resolver> resolver =
- promise_.Get(isolate_).As<Promise::Resolver>();
+ Local<Promise> promise = GetPromise();
+ // The promise has already been resolved, e.g. because of a compilation
+ // error.
+ if (promise->State() != v8::Promise::kPending) return;
+ if (i::FLAG_wasm_stream_compilation) streaming_decoder_->Abort();
+
+ Local<Promise::Resolver> resolver = promise.As<Promise::Resolver>();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
i::HandleScope scope(i_isolate);
Local<Context> context = Utils::ToLocal(handle(i_isolate->context()));
@@ -8508,6 +8585,18 @@ void V8::AddGCEpilogueCallback(v8::GCCallback callback, GCType gc_type) {
data, gc_type);
}
+void V8::RemoveGCPrologueCallback(GCCallback callback) {
+ void* data = reinterpret_cast<void*>(callback);
+ Isolate::GetCurrent()->RemoveGCPrologueCallback(CallGCCallbackWithoutIsolate,
+ data);
+}
+
+void V8::RemoveGCEpilogueCallback(GCCallback callback) {
+ void* data = reinterpret_cast<void*>(callback);
+ Isolate::GetCurrent()->RemoveGCEpilogueCallback(CallGCCallbackWithoutIsolate,
+ data);
+}
+
void Isolate::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->heap()->SetEmbedderHeapTracer(tracer);
@@ -8917,7 +9006,7 @@ void Isolate::EnqueueMicrotask(MicrotaskCallback microtask, void* data) {
i::HandleScope scope(isolate);
i::Handle<i::CallHandlerInfo> callback_info =
i::Handle<i::CallHandlerInfo>::cast(
- isolate->factory()->NewStruct(i::TUPLE2_TYPE));
+ isolate->factory()->NewStruct(i::TUPLE2_TYPE, i::NOT_TENURED));
SET_FIELD_WRAPPED(callback_info, set_callback, microtask);
SET_FIELD_WRAPPED(callback_info, set_data, data);
isolate->EnqueueMicrotask(callback_info);
@@ -9890,14 +9979,15 @@ MaybeLocal<UnboundScript> debug::CompileInspectorScript(Isolate* v8_isolate,
i::Handle<i::SharedFunctionInfo> result;
{
ScriptOriginOptions origin_options;
- result = i::Compiler::GetSharedFunctionInfoForScript(
- str, i::Handle<i::Object>(), 0, 0, origin_options,
- i::Handle<i::Object>(), isolate->native_context(), NULL, &script_data,
- ScriptCompiler::kNoCompileOptions,
- i::FLAG_expose_inspector_scripts ? i::NOT_NATIVES_CODE
- : i::INSPECTOR_CODE,
- i::Handle<i::FixedArray>());
- has_pending_exception = result.is_null();
+ i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
+ i::Compiler::GetSharedFunctionInfoForScript(
+ str, i::MaybeHandle<i::Object>(), 0, 0, origin_options,
+ i::MaybeHandle<i::Object>(), isolate->native_context(), NULL,
+ &script_data, ScriptCompiler::kNoCompileOptions,
+ i::FLAG_expose_inspector_scripts ? i::NOT_NATIVES_CODE
+ : i::INSPECTOR_CODE,
+ i::MaybeHandle<i::FixedArray>());
+ has_pending_exception = !maybe_function_info.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(UnboundScript);
}
RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
@@ -9999,6 +10089,9 @@ Local<Function> debug::GetBuiltin(Isolate* v8_isolate, Builtin builtin) {
i::Handle<i::JSFunction> fun =
isolate->factory()->NewFunctionWithoutPrototype(
isolate->factory()->empty_string(), call_code, i::SLOPPY);
+ if (i::Builtins::IsLazy(name)) {
+ fun->shared()->set_lazy_deserialization_builtin_id(name);
+ }
fun->shared()->DontAdaptArguments();
return Utils::ToLocal(handle_scope.CloseAndEscape(fun));
}
@@ -10122,7 +10215,7 @@ bool debug::Coverage::FunctionData::HasBlockCoverage() const {
debug::Coverage::BlockData debug::Coverage::FunctionData::GetBlockData(
size_t i) const {
- return BlockData(&function_->blocks.at(i));
+ return BlockData(&function_->blocks.at(i), coverage_);
}
Local<debug::Script> debug::Coverage::ScriptData::GetScript() const {
@@ -10135,15 +10228,17 @@ size_t debug::Coverage::ScriptData::FunctionCount() const {
debug::Coverage::FunctionData debug::Coverage::ScriptData::GetFunctionData(
size_t i) const {
- return FunctionData(&script_->functions.at(i));
+ return FunctionData(&script_->functions.at(i), coverage_);
}
-debug::Coverage::~Coverage() { delete coverage_; }
+debug::Coverage::ScriptData::ScriptData(size_t index,
+ std::shared_ptr<i::Coverage> coverage)
+ : script_(&coverage->at(index)), coverage_(std::move(coverage)) {}
size_t debug::Coverage::ScriptCount() const { return coverage_->size(); }
debug::Coverage::ScriptData debug::Coverage::GetScriptData(size_t i) const {
- return ScriptData(&coverage_->at(i));
+ return ScriptData(i, coverage_);
}
debug::Coverage debug::Coverage::CollectPrecise(Isolate* isolate) {
@@ -10160,6 +10255,53 @@ void debug::Coverage::SelectMode(Isolate* isolate, debug::Coverage::Mode mode) {
i::Coverage::SelectMode(reinterpret_cast<i::Isolate*>(isolate), mode);
}
+int debug::TypeProfile::Entry::SourcePosition() const {
+ return entry_->position;
+}
+
+std::vector<MaybeLocal<String>> debug::TypeProfile::Entry::Types() const {
+ std::vector<MaybeLocal<String>> result;
+ for (const internal::Handle<internal::String>& type : entry_->types) {
+ result.emplace_back(ToApiHandle<String>(type));
+ }
+ return result;
+}
+
+debug::TypeProfile::ScriptData::ScriptData(
+ size_t index, std::shared_ptr<i::TypeProfile> type_profile)
+ : script_(&type_profile->at(index)),
+ type_profile_(std::move(type_profile)) {}
+
+Local<debug::Script> debug::TypeProfile::ScriptData::GetScript() const {
+ return ToApiHandle<debug::Script>(script_->script);
+}
+
+std::vector<debug::TypeProfile::Entry> debug::TypeProfile::ScriptData::Entries()
+ const {
+ std::vector<debug::TypeProfile::Entry> result;
+ for (const internal::TypeProfileEntry& entry : script_->entries) {
+ result.push_back(debug::TypeProfile::Entry(&entry, type_profile_));
+ }
+ return result;
+}
+
+debug::TypeProfile debug::TypeProfile::Collect(Isolate* isolate) {
+ return TypeProfile(
+ i::TypeProfile::Collect(reinterpret_cast<i::Isolate*>(isolate)));
+}
+
+void debug::TypeProfile::SelectMode(Isolate* isolate,
+ debug::TypeProfile::Mode mode) {
+ i::TypeProfile::SelectMode(reinterpret_cast<i::Isolate*>(isolate), mode);
+}
+
+size_t debug::TypeProfile::ScriptCount() const { return type_profile_->size(); }
+
+debug::TypeProfile::ScriptData debug::TypeProfile::GetScriptData(
+ size_t i) const {
+ return ScriptData(i, type_profile_);
+}
+
const char* CpuProfileNode::GetFunctionNameStr() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
return node->entry()->name();
@@ -10646,7 +10788,6 @@ void Testing::DeoptimizeAll(Isolate* isolate) {
namespace internal {
-
void HandleScopeImplementer::FreeThreadResources() {
Free();
}
@@ -10680,7 +10821,7 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) {
bool found_block_before_deferred = false;
#endif
// Iterate over all handles in the blocks except for the last.
- for (int i = blocks()->length() - 2; i >= 0; --i) {
+ for (int i = static_cast<int>(blocks()->size()) - 2; i >= 0; --i) {
Object** block = blocks()->at(i);
if (last_handle_before_deferred_block_ != NULL &&
(last_handle_before_deferred_block_ <= &block[kHandleBlockSize]) &&
@@ -10700,17 +10841,18 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) {
found_block_before_deferred);
// Iterate over live handles in the last block (if any).
- if (!blocks()->is_empty()) {
- v->VisitRootPointers(Root::kHandleScope, blocks()->last(),
+ if (!blocks()->empty()) {
+ v->VisitRootPointers(Root::kHandleScope, blocks()->back(),
handle_scope_data_.next);
}
- List<Context*>* context_lists[2] = { &saved_contexts_, &entered_contexts_};
+ DetachableVector<Context*>* context_lists[2] = {&saved_contexts_,
+ &entered_contexts_};
for (unsigned i = 0; i < arraysize(context_lists); i++) {
- if (context_lists[i]->is_empty()) continue;
- Object** start = reinterpret_cast<Object**>(&context_lists[i]->first());
+ if (context_lists[i]->empty()) continue;
+ Object** start = reinterpret_cast<Object**>(&context_lists[i]->front());
v->VisitRootPointers(Root::kHandleScope, start,
- start + context_lists[i]->length());
+ start + context_lists[i]->size());
}
if (microtask_context_) {
v->VisitRootPointer(Root::kHandleScope,
@@ -10736,24 +10878,24 @@ DeferredHandles* HandleScopeImplementer::Detach(Object** prev_limit) {
DeferredHandles* deferred =
new DeferredHandles(isolate()->handle_scope_data()->next, isolate());
- while (!blocks_.is_empty()) {
- Object** block_start = blocks_.last();
+ while (!blocks_.empty()) {
+ Object** block_start = blocks_.back();
Object** block_limit = &block_start[kHandleBlockSize];
// We should not need to check for SealHandleScope here. Assert this.
DCHECK(prev_limit == block_limit ||
!(block_start <= prev_limit && prev_limit <= block_limit));
if (prev_limit == block_limit) break;
- deferred->blocks_.Add(blocks_.last());
- blocks_.RemoveLast();
+ deferred->blocks_.push_back(blocks_.back());
+ blocks_.pop_back();
}
// deferred->blocks_ now contains the blocks installed on the
// HandleScope stack since BeginDeferredScope was called, but in
// reverse order.
- DCHECK(prev_limit == NULL || !blocks_.is_empty());
+ DCHECK(prev_limit == NULL || !blocks_.empty());
- DCHECK(!blocks_.is_empty() && prev_limit != NULL);
+ DCHECK(!blocks_.empty() && prev_limit != NULL);
DCHECK(last_handle_before_deferred_block_ != NULL);
last_handle_before_deferred_block_ = NULL;
return deferred;
@@ -10769,7 +10911,7 @@ void HandleScopeImplementer::BeginDeferredScope() {
DeferredHandles::~DeferredHandles() {
isolate_->UnlinkDeferredHandles(this);
- for (int i = 0; i < blocks_.length(); i++) {
+ for (size_t i = 0; i < blocks_.size(); i++) {
#ifdef ENABLE_HANDLE_ZAPPING
HandleScope::ZapRange(blocks_[i], &blocks_[i][kHandleBlockSize]);
#endif
@@ -10778,14 +10920,14 @@ DeferredHandles::~DeferredHandles() {
}
void DeferredHandles::Iterate(RootVisitor* v) {
- DCHECK(!blocks_.is_empty());
+ DCHECK(!blocks_.empty());
- DCHECK((first_block_limit_ >= blocks_.first()) &&
- (first_block_limit_ <= &(blocks_.first())[kHandleBlockSize]));
+ DCHECK((first_block_limit_ >= blocks_.front()) &&
+ (first_block_limit_ <= &(blocks_.front())[kHandleBlockSize]));
- v->VisitRootPointers(Root::kHandleScope, blocks_.first(), first_block_limit_);
+ v->VisitRootPointers(Root::kHandleScope, blocks_.front(), first_block_limit_);
- for (int i = 1; i < blocks_.length(); i++) {
+ for (size_t i = 1; i < blocks_.size(); i++) {
v->VisitRootPointers(Root::kHandleScope, blocks_[i],
&blocks_[i][kHandleBlockSize]);
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 33ce26eec4..92025ee0ca 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -8,9 +8,9 @@
#include "include/v8-testing.h"
#include "src/contexts.h"
#include "src/debug/debug-interface.h"
+#include "src/detachable-vector.h"
#include "src/factory.h"
#include "src/isolate.h"
-#include "src/list.h"
namespace v8 {
@@ -379,7 +379,7 @@ class V8_EXPORT_PRIVATE DeferredHandles {
void Iterate(RootVisitor* v);
- List<Object**> blocks_;
+ std::vector<Object**> blocks_;
DeferredHandles* next_;
DeferredHandles* previous_;
Object** first_block_limit_;
@@ -403,9 +403,6 @@ class HandleScopeImplementer {
public:
explicit HandleScopeImplementer(Isolate* isolate)
: isolate_(isolate),
- blocks_(0),
- entered_contexts_(0),
- saved_contexts_(0),
microtask_context_(nullptr),
spare_(NULL),
call_depth_(0),
@@ -416,7 +413,8 @@ class HandleScopeImplementer {
debug_microtasks_depth_(0),
#endif
microtasks_policy_(v8::MicrotasksPolicy::kAuto),
- last_handle_before_deferred_block_(NULL) { }
+ last_handle_before_deferred_block_(NULL) {
+ }
~HandleScopeImplementer() {
DeleteArray(spare_);
@@ -478,15 +476,14 @@ class HandleScopeImplementer {
inline Handle<Context> MicrotaskContext();
inline bool MicrotaskContextIsLastEnteredContext() const {
return microtask_context_ &&
- entered_context_count_during_microtasks_ ==
- entered_contexts_.length();
+ entered_context_count_during_microtasks_ == entered_contexts_.size();
}
inline void SaveContext(Context* context);
inline Context* RestoreContext();
inline bool HasSavedContexts();
- inline List<internal::Object**>* blocks() { return &blocks_; }
+ inline DetachableVector<Object**>* blocks() { return &blocks_; }
Isolate* isolate() const { return isolate_; }
void ReturnBlock(Object** block) {
@@ -497,9 +494,9 @@ class HandleScopeImplementer {
private:
void ResetAfterArchive() {
- blocks_.Initialize(0);
- entered_contexts_.Initialize(0);
- saved_contexts_.Initialize(0);
+ blocks_.detach();
+ entered_contexts_.detach();
+ saved_contexts_.detach();
microtask_context_ = nullptr;
entered_context_count_during_microtasks_ = 0;
spare_ = NULL;
@@ -508,13 +505,14 @@ class HandleScopeImplementer {
}
void Free() {
- DCHECK(blocks_.length() == 0);
- DCHECK(entered_contexts_.length() == 0);
- DCHECK(saved_contexts_.length() == 0);
+ DCHECK(blocks_.empty());
+ DCHECK(entered_contexts_.empty());
+ DCHECK(saved_contexts_.empty());
DCHECK(!microtask_context_);
- blocks_.Free();
- entered_contexts_.Free();
- saved_contexts_.Free();
+
+ blocks_.free();
+ entered_contexts_.free();
+ saved_contexts_.free();
if (spare_ != NULL) {
DeleteArray(spare_);
spare_ = NULL;
@@ -526,17 +524,17 @@ class HandleScopeImplementer {
DeferredHandles* Detach(Object** prev_limit);
Isolate* isolate_;
- List<internal::Object**> blocks_;
+ DetachableVector<Object**> blocks_;
// Used as a stack to keep track of entered contexts.
- List<Context*> entered_contexts_;
+ DetachableVector<Context*> entered_contexts_;
// Used as a stack to keep track of saved contexts.
- List<Context*> saved_contexts_;
+ DetachableVector<Context*> saved_contexts_;
Context* microtask_context_;
Object** spare_;
int call_depth_;
int microtasks_depth_;
int microtasks_suppressions_;
- int entered_context_count_during_microtasks_;
+ size_t entered_context_count_during_microtasks_;
#ifdef DEBUG
int debug_microtasks_depth_;
#endif
@@ -571,44 +569,42 @@ v8::MicrotasksPolicy HandleScopeImplementer::microtasks_policy() const {
void HandleScopeImplementer::SaveContext(Context* context) {
- saved_contexts_.Add(context);
+ saved_contexts_.push_back(context);
}
Context* HandleScopeImplementer::RestoreContext() {
- return saved_contexts_.RemoveLast();
+ Context* last_context = saved_contexts_.back();
+ saved_contexts_.pop_back();
+ return last_context;
}
bool HandleScopeImplementer::HasSavedContexts() {
- return !saved_contexts_.is_empty();
+ return !saved_contexts_.empty();
}
void HandleScopeImplementer::EnterContext(Handle<Context> context) {
- entered_contexts_.Add(*context);
-}
-
-
-void HandleScopeImplementer::LeaveContext() {
- entered_contexts_.RemoveLast();
+ entered_contexts_.push_back(*context);
}
+void HandleScopeImplementer::LeaveContext() { entered_contexts_.pop_back(); }
bool HandleScopeImplementer::LastEnteredContextWas(Handle<Context> context) {
- return !entered_contexts_.is_empty() && entered_contexts_.last() == *context;
+ return !entered_contexts_.empty() && entered_contexts_.back() == *context;
}
Handle<Context> HandleScopeImplementer::LastEnteredContext() {
- if (entered_contexts_.is_empty()) return Handle<Context>::null();
- return Handle<Context>(entered_contexts_.last());
+ if (entered_contexts_.empty()) return Handle<Context>::null();
+ return Handle<Context>(entered_contexts_.back());
}
void HandleScopeImplementer::EnterMicrotaskContext(Handle<Context> context) {
DCHECK(!microtask_context_);
microtask_context_ = *context;
- entered_context_count_during_microtasks_ = entered_contexts_.length();
+ entered_context_count_during_microtasks_ = entered_contexts_.size();
}
void HandleScopeImplementer::LeaveMicrotaskContext() {
@@ -633,8 +629,8 @@ internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {
void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
- while (!blocks_.is_empty()) {
- internal::Object** block_start = blocks_.last();
+ while (!blocks_.empty()) {
+ internal::Object** block_start = blocks_.back();
internal::Object** block_limit = block_start + kHandleBlockSize;
// SealHandleScope may make the prev_limit to point inside the block.
@@ -645,7 +641,7 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
break;
}
- blocks_.RemoveLast();
+ blocks_.pop_back();
#ifdef ENABLE_HANDLE_ZAPPING
internal::HandleScope::ZapRange(block_start, block_limit);
#endif
@@ -654,8 +650,8 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
}
spare_ = block_start;
}
- DCHECK((blocks_.is_empty() && prev_limit == NULL) ||
- (!blocks_.is_empty() && prev_limit != NULL));
+ DCHECK((blocks_.empty() && prev_limit == NULL) ||
+ (!blocks_.empty() && prev_limit != NULL));
}
// Interceptor functions called from generated inline caches to notify
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index f3fcb8edb0..3d58b8249b 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -107,8 +107,6 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
#define RUNTIME_FUNCTION(Name) RUNTIME_FUNCTION_RETURNS_TYPE(Object*, Name)
#define RUNTIME_FUNCTION_RETURN_PAIR(Name) \
RUNTIME_FUNCTION_RETURNS_TYPE(ObjectPair, Name)
-#define RUNTIME_FUNCTION_RETURN_TRIPLE(Name) \
- RUNTIME_FUNCTION_RETURNS_TYPE(ObjectTriple, Name)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 08a9d4d308..4261943325 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -179,35 +179,22 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
}
}
-Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
- rm_ = no_reg;
+Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) : rmode_(rmode) {
value_.immediate = immediate;
- rmode_ = rmode;
}
Operand Operand::Zero() { return Operand(static_cast<int32_t>(0)); }
-Operand::Operand(const ExternalReference& f) {
- rm_ = no_reg;
+Operand::Operand(const ExternalReference& f)
+ : rmode_(RelocInfo::EXTERNAL_REFERENCE) {
value_.immediate = reinterpret_cast<int32_t>(f.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
-
-Operand::Operand(Smi* value) {
- rm_ = no_reg;
+Operand::Operand(Smi* value) : rmode_(RelocInfo::NONE32) {
value_.immediate = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE32;
-}
-
-
-Operand::Operand(Register rm) {
- rm_ = rm;
- rs_ = no_reg;
- shift_op_ = LSL;
- shift_imm_ = 0;
}
+Operand::Operand(Register rm) : rm_(rm), shift_op_(LSL), shift_imm_(0) {}
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index c4d65829db..c9aa9ef015 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -413,52 +413,37 @@ Operand Operand::EmbeddedCode(CodeStub* stub) {
return result;
}
-MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
- rn_ = rn;
- rm_ = no_reg;
- offset_ = offset;
- am_ = am;
-
+MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am)
+ : rn_(rn), rm_(no_reg), offset_(offset), am_(am) {
// Accesses below the stack pointer are not safe, and are prohibited by the
// ABI. We can check obvious violations here.
- if (rn.is(sp)) {
+ if (rn == sp) {
if (am == Offset) DCHECK_LE(0, offset);
if (am == NegOffset) DCHECK_GE(0, offset);
}
}
+MemOperand::MemOperand(Register rn, Register rm, AddrMode am)
+ : rn_(rn), rm_(rm), shift_op_(LSL), shift_imm_(0), am_(am) {}
-MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
- rn_ = rn;
- rm_ = rm;
- shift_op_ = LSL;
- shift_imm_ = 0;
- am_ = am;
-}
-
-
-MemOperand::MemOperand(Register rn, Register rm,
- ShiftOp shift_op, int shift_imm, AddrMode am) {
+MemOperand::MemOperand(Register rn, Register rm, ShiftOp shift_op,
+ int shift_imm, AddrMode am)
+ : rn_(rn),
+ rm_(rm),
+ shift_op_(shift_op),
+ shift_imm_(shift_imm & 31),
+ am_(am) {
DCHECK(is_uint5(shift_imm));
- rn_ = rn;
- rm_ = rm;
- shift_op_ = shift_op;
- shift_imm_ = shift_imm & 31;
- am_ = am;
}
-
-NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
+NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align)
+ : rn_(rn), rm_(am == Offset ? pc : sp) {
DCHECK((am == Offset) || (am == PostIndex));
- rn_ = rn;
- rm_ = (am == Offset) ? pc : sp;
SetAlignment(align);
}
-
-NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
- rn_ = rn;
- rm_ = rm;
+NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align)
+ : rn_(rn), rm_(rm) {
SetAlignment(align);
}
@@ -507,18 +492,16 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
// register r is not encoded.
-const Instr kPushRegPattern =
- al | B26 | 4 | NegPreIndex | Register::kCode_sp * B16;
+const Instr kPushRegPattern = al | B26 | 4 | NegPreIndex | sp.code() * B16;
// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
// register r is not encoded.
-const Instr kPopRegPattern =
- al | B26 | L | 4 | PostIndex | Register::kCode_sp * B16;
+const Instr kPopRegPattern = al | B26 | L | 4 | PostIndex | sp.code() * B16;
// ldr rd, [pc, #offset]
const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPCImmedPattern = 5 * B24 | L | Register::kCode_pc * B16;
+const Instr kLdrPCImmedPattern = 5 * B24 | L | pc.code() * B16;
// vldr dd, [pc, #offset]
const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
-const Instr kVldrDPCPattern = 13 * B24 | L | Register::kCode_pc * B16 | 11 * B8;
+const Instr kVldrDPCPattern = 13 * B24 | L | pc.code() * B16 | 11 * B8;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
@@ -544,14 +527,11 @@ const Instr kAddSubFlip = 0x6 * B21;
const Instr kAndBicFlip = 0xe * B21;
// A mask for the Rd register for push, pop, ldr, str instructions.
-const Instr kLdrRegFpOffsetPattern =
- al | B26 | L | Offset | Register::kCode_fp * B16;
-const Instr kStrRegFpOffsetPattern =
- al | B26 | Offset | Register::kCode_fp * B16;
+const Instr kLdrRegFpOffsetPattern = al | B26 | L | Offset | fp.code() * B16;
+const Instr kStrRegFpOffsetPattern = al | B26 | Offset | fp.code() * B16;
const Instr kLdrRegFpNegOffsetPattern =
- al | B26 | L | NegOffset | Register::kCode_fp * B16;
-const Instr kStrRegFpNegOffsetPattern =
- al | B26 | NegOffset | Register::kCode_fp * B16;
+ al | B26 | L | NegOffset | fp.code() * B16;
+const Instr kStrRegFpNegOffsetPattern = al | B26 | NegOffset | fp.code() * B16;
const Instr kLdrStrInstrTypeMask = 0xffff0000;
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
@@ -722,23 +702,17 @@ Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
Register Assembler::GetRd(Instr instr) {
- Register reg;
- reg.reg_code = Instruction::RdValue(instr);
- return reg;
+ return Register::from_code(Instruction::RdValue(instr));
}
Register Assembler::GetRn(Instr instr) {
- Register reg;
- reg.reg_code = Instruction::RnValue(instr);
- return reg;
+ return Register::from_code(Instruction::RnValue(instr));
}
Register Assembler::GetRm(Instr instr) {
- Register reg;
- reg.reg_code = Instruction::RmValue(instr);
- return reg;
+ return Register::from_code(Instruction::RmValue(instr));
}
@@ -1182,7 +1156,7 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x,
DCHECK(!x.MustOutputRelocInfo(this));
UseScratchRegisterScope temps(this);
// Re-use the destination register as a scratch if possible.
- Register target = !rd.is(pc) ? rd : temps.Acquire();
+ Register target = rd != pc ? rd : temps.Acquire();
if (CpuFeatures::IsSupported(ARMv7)) {
uint32_t imm32 = static_cast<uint32_t>(x.immediate());
CpuFeatureScope scope(this, ARMv7);
@@ -1240,7 +1214,7 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
UseScratchRegisterScope temps(this);
// Re-use the destination register if possible.
Register scratch =
- (rd.is_valid() && !rd.is(rn) && !rd.is(pc)) ? rd : temps.Acquire();
+ (rd.is_valid() && rd != rn && rd != pc) ? rd : temps.Acquire();
mov(scratch, x, LeaveCC, cond);
AddrMode1(instr, rd, rn, Operand(scratch));
}
@@ -1252,12 +1226,12 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
} else if (!rn.is_valid()) {
// Emit a move instruction. If the operand is a register-shifted register,
// then prevent the destination from being PC as this is unpredictable.
- DCHECK(!x.IsRegisterShiftedRegister() || !rd.is(pc));
+ DCHECK(!x.IsRegisterShiftedRegister() || rd != pc);
emit(instr | rd.code() * B12);
} else {
emit(instr | rn.code() * B16 | rd.code() * B12);
}
- if (rn.is(pc) || x.rm_.is(pc)) {
+ if (rn == pc || x.rm_ == pc) {
// Block constant pool emission for one instruction after reading pc.
BlockConstPoolFor(1);
}
@@ -1279,7 +1253,7 @@ bool Assembler::AddrMode1TryEncodeOperand(Instr* instr, const Operand& x) {
} else {
DCHECK(x.IsRegisterShiftedRegister());
// It is unpredictable to use the PC in this case.
- DCHECK(!x.rm_.is(pc) && !x.rs_.is(pc));
+ DCHECK(x.rm_ != pc && x.rs_ != pc);
*instr |= x.rs_.code() * B8 | x.shift_op_ | B4 | x.rm_.code();
}
@@ -1303,7 +1277,7 @@ void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) {
// Allow re-using rd for load instructions if possible.
bool is_load = (instr & L) == L;
Register scratch =
- (is_load && !rd.is(x.rn_) && !rd.is(pc)) ? rd : temps.Acquire();
+ (is_load && rd != x.rn_ && rd != pc) ? rd : temps.Acquire();
mov(scratch, Operand(x.offset_), LeaveCC,
Instruction::ConditionField(instr));
AddrMode2(instr, rd, MemOperand(x.rn_, scratch, x.am_));
@@ -1315,10 +1289,10 @@ void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) {
// Register offset (shift_imm_ and shift_op_ are 0) or scaled
// register offset the constructors make sure than both shift_imm_
// and shift_op_ are initialized.
- DCHECK(!x.rm_.is(pc));
+ DCHECK(x.rm_ != pc);
instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
}
- DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+ DCHECK((am & (P | W)) == P || x.rn_ != pc); // no pc base with writeback
emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
}
@@ -1340,7 +1314,7 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
UseScratchRegisterScope temps(this);
// Allow re-using rd for load instructions if possible.
Register scratch =
- (is_load && !rd.is(x.rn_) && !rd.is(pc)) ? rd : temps.Acquire();
+ (is_load && rd != x.rn_ && rd != pc) ? rd : temps.Acquire();
mov(scratch, Operand(x.offset_), LeaveCC,
Instruction::ConditionField(instr));
AddrMode3(instr, rd, MemOperand(x.rn_, scratch, x.am_));
@@ -1354,24 +1328,24 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
UseScratchRegisterScope temps(this);
// Allow re-using rd for load instructions if possible.
Register scratch =
- (is_load && !rd.is(x.rn_) && !rd.is(pc)) ? rd : temps.Acquire();
+ (is_load && rd != x.rn_ && rd != pc) ? rd : temps.Acquire();
mov(scratch, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
Instruction::ConditionField(instr));
AddrMode3(instr, rd, MemOperand(x.rn_, scratch, x.am_));
return;
} else {
// Register offset.
- DCHECK((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
+ DCHECK((am & (P | W)) == P || x.rm_ != pc); // no pc index with writeback
instr |= x.rm_.code();
}
- DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+ DCHECK((am & (P | W)) == P || x.rn_ != pc); // no pc base with writeback
emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
}
void Assembler::AddrMode4(Instr instr, Register rn, RegList rl) {
DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27);
DCHECK(rl != 0);
- DCHECK(!rn.is(pc));
+ DCHECK(rn != pc);
emit(instr | rn.code()*B16 | rl);
}
@@ -1389,7 +1363,7 @@ void Assembler::AddrMode5(Instr instr, CRegister crd, const MemOperand& x) {
am ^= U;
}
DCHECK(is_uint8(offset_8)); // unsigned word offset must fit in a byte
- DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+ DCHECK((am & (P | W)) == P || x.rn_ != pc); // no pc base with writeback
// Post-indexed addressing requires W == 1; different than in AddrMode2/3.
if ((am & P) == 0)
@@ -1453,12 +1427,12 @@ void Assembler::blx(int branch_offset) {
}
void Assembler::blx(Register target, Condition cond) {
- DCHECK(!target.is(pc));
+ DCHECK(target != pc);
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
}
void Assembler::bx(Register target, Condition cond) {
- DCHECK(!target.is(pc)); // use of pc is actually allowed, but discouraged
+ DCHECK(target != pc); // use of pc is actually allowed, but discouraged
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
}
@@ -1585,7 +1559,7 @@ void Assembler::orr(Register dst, Register src1, Register src2, SBit s,
void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
// Don't allow nop instructions in the form mov rn, rn to be generated using
// the mov instruction. They must be generated using nop(int/NopMarkerTypes).
- DCHECK(!(src.IsRegister() && src.rm().is(dst) && s == LeaveCC && cond == al));
+ DCHECK(!(src.IsRegister() && src.rm() == dst && s == LeaveCC && cond == al));
AddrMode1(cond | MOV | s, dst, no_reg, src);
}
@@ -1684,7 +1658,7 @@ void Assembler::lsr(Register dst, Register src1, const Operand& src2, SBit s,
// Multiply instructions.
void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
SBit s, Condition cond) {
- DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+ DCHECK(dst != pc && src1 != pc && src2 != pc && srcA != pc);
emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1692,7 +1666,7 @@ void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond) {
- DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+ DCHECK(dst != pc && src1 != pc && src2 != pc && srcA != pc);
DCHECK(IsEnabled(ARMv7));
emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
@@ -1701,7 +1675,7 @@ void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
void Assembler::sdiv(Register dst, Register src1, Register src2,
Condition cond) {
- DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+ DCHECK(dst != pc && src1 != pc && src2 != pc);
DCHECK(IsEnabled(SUDIV));
emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
src2.code()*B8 | B4 | src1.code());
@@ -1710,7 +1684,7 @@ void Assembler::sdiv(Register dst, Register src1, Register src2,
void Assembler::udiv(Register dst, Register src1, Register src2,
Condition cond) {
- DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+ DCHECK(dst != pc && src1 != pc && src2 != pc);
DCHECK(IsEnabled(SUDIV));
emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xf * B12 |
src2.code() * B8 | B4 | src1.code());
@@ -1719,7 +1693,7 @@ void Assembler::udiv(Register dst, Register src1, Register src2,
void Assembler::mul(Register dst, Register src1, Register src2, SBit s,
Condition cond) {
- DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+ DCHECK(dst != pc && src1 != pc && src2 != pc);
// dst goes in bits 16-19 for this instruction!
emit(cond | s | dst.code() * B16 | src2.code() * B8 | B7 | B4 | src1.code());
}
@@ -1727,7 +1701,7 @@ void Assembler::mul(Register dst, Register src1, Register src2, SBit s,
void Assembler::smmla(Register dst, Register src1, Register src2, Register srcA,
Condition cond) {
- DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+ DCHECK(dst != pc && src1 != pc && src2 != pc && srcA != pc);
emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 |
srcA.code() * B12 | src2.code() * B8 | B4 | src1.code());
}
@@ -1735,7 +1709,7 @@ void Assembler::smmla(Register dst, Register src1, Register src2, Register srcA,
void Assembler::smmul(Register dst, Register src1, Register src2,
Condition cond) {
- DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+ DCHECK(dst != pc && src1 != pc && src2 != pc);
emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 | 0xf * B12 |
src2.code() * B8 | B4 | src1.code());
}
@@ -1747,8 +1721,8 @@ void Assembler::smlal(Register dstL,
Register src2,
SBit s,
Condition cond) {
- DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- DCHECK(!dstL.is(dstH));
+ DCHECK(dstL != pc && dstH != pc && src1 != pc && src2 != pc);
+ DCHECK(dstL != dstH);
emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1760,8 +1734,8 @@ void Assembler::smull(Register dstL,
Register src2,
SBit s,
Condition cond) {
- DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- DCHECK(!dstL.is(dstH));
+ DCHECK(dstL != pc && dstH != pc && src1 != pc && src2 != pc);
+ DCHECK(dstL != dstH);
emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1773,8 +1747,8 @@ void Assembler::umlal(Register dstL,
Register src2,
SBit s,
Condition cond) {
- DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- DCHECK(!dstL.is(dstH));
+ DCHECK(dstL != pc && dstH != pc && src1 != pc && src2 != pc);
+ DCHECK(dstL != dstH);
emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1786,8 +1760,8 @@ void Assembler::umull(Register dstL,
Register src2,
SBit s,
Condition cond) {
- DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- DCHECK(!dstL.is(dstH));
+ DCHECK(dstL != pc && dstH != pc && src1 != pc && src2 != pc);
+ DCHECK(dstL != dstH);
emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1795,7 +1769,7 @@ void Assembler::umull(Register dstL,
// Miscellaneous arithmetic instructions.
void Assembler::clz(Register dst, Register src, Condition cond) {
- DCHECK(!dst.is(pc) && !src.is(pc));
+ DCHECK(dst != pc && src != pc);
emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
15*B8 | CLZ | src.code());
}
@@ -1808,7 +1782,7 @@ void Assembler::usat(Register dst,
int satpos,
const Operand& src,
Condition cond) {
- DCHECK(!dst.is(pc) && !src.rm_.is(pc));
+ DCHECK(dst != pc && src.rm_ != pc);
DCHECK((satpos >= 0) && (satpos <= 31));
DCHECK(src.IsImmediateShiftedRegister());
DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
@@ -1835,7 +1809,7 @@ void Assembler::ubfx(Register dst,
int width,
Condition cond) {
DCHECK(IsEnabled(ARMv7));
- DCHECK(!dst.is(pc) && !src.is(pc));
+ DCHECK(dst != pc && src != pc);
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
@@ -1854,7 +1828,7 @@ void Assembler::sbfx(Register dst,
int width,
Condition cond) {
DCHECK(IsEnabled(ARMv7));
- DCHECK(!dst.is(pc) && !src.is(pc));
+ DCHECK(dst != pc && src != pc);
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
@@ -1868,7 +1842,7 @@ void Assembler::sbfx(Register dst,
// bfc dst, #lsb, #width
void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
DCHECK(IsEnabled(ARMv7));
- DCHECK(!dst.is(pc));
+ DCHECK(dst != pc);
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
int msb = lsb + width - 1;
@@ -1886,7 +1860,7 @@ void Assembler::bfi(Register dst,
int width,
Condition cond) {
DCHECK(IsEnabled(ARMv7));
- DCHECK(!dst.is(pc) && !src.is(pc));
+ DCHECK(dst != pc && src != pc);
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
int msb = lsb + width - 1;
@@ -1902,10 +1876,10 @@ void Assembler::pkhbt(Register dst,
// Instruction details available in ARM DDI 0406C.b, A8.8.125.
// cond(31-28) | 01101000(27-20) | Rn(19-16) |
// Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
- DCHECK(!dst.is(pc));
- DCHECK(!src1.is(pc));
+ DCHECK(dst != pc);
+ DCHECK(src1 != pc);
DCHECK(src2.IsImmediateShiftedRegister());
- DCHECK(!src2.rm().is(pc));
+ DCHECK(src2.rm() != pc);
DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
DCHECK(src2.shift_op() == LSL);
emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
@@ -1920,10 +1894,10 @@ void Assembler::pkhtb(Register dst,
// Instruction details available in ARM DDI 0406C.b, A8.8.125.
// cond(31-28) | 01101000(27-20) | Rn(19-16) |
// Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
- DCHECK(!dst.is(pc));
- DCHECK(!src1.is(pc));
+ DCHECK(dst != pc);
+ DCHECK(src1 != pc);
DCHECK(src2.IsImmediateShiftedRegister());
- DCHECK(!src2.rm().is(pc));
+ DCHECK(src2.rm() != pc);
DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
DCHECK(src2.shift_op() == ASR);
int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
@@ -1936,8 +1910,8 @@ void Assembler::sxtb(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.233.
// cond(31-28) | 01101010(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
- DCHECK(!dst.is(pc));
- DCHECK(!src.is(pc));
+ DCHECK(dst != pc);
+ DCHECK(src != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6A * B20 | 0xF * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
@@ -1949,9 +1923,9 @@ void Assembler::sxtab(Register dst, Register src1, Register src2, int rotate,
// Instruction details available in ARM DDI 0406C.b, A8.8.233.
// cond(31-28) | 01101010(27-20) | Rn(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
- DCHECK(!dst.is(pc));
- DCHECK(!src1.is(pc));
- DCHECK(!src2.is(pc));
+ DCHECK(dst != pc);
+ DCHECK(src1 != pc);
+ DCHECK(src2 != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6A * B20 | src1.code() * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
@@ -1962,8 +1936,8 @@ void Assembler::sxth(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.235.
// cond(31-28) | 01101011(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
- DCHECK(!dst.is(pc));
- DCHECK(!src.is(pc));
+ DCHECK(dst != pc);
+ DCHECK(src != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6B * B20 | 0xF * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
@@ -1975,9 +1949,9 @@ void Assembler::sxtah(Register dst, Register src1, Register src2, int rotate,
// Instruction details available in ARM DDI 0406C.b, A8.8.235.
// cond(31-28) | 01101011(27-20) | Rn(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
- DCHECK(!dst.is(pc));
- DCHECK(!src1.is(pc));
- DCHECK(!src2.is(pc));
+ DCHECK(dst != pc);
+ DCHECK(src1 != pc);
+ DCHECK(src2 != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6B * B20 | src1.code() * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
@@ -1988,8 +1962,8 @@ void Assembler::uxtb(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.274.
// cond(31-28) | 01101110(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
- DCHECK(!dst.is(pc));
- DCHECK(!src.is(pc));
+ DCHECK(dst != pc);
+ DCHECK(src != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6E * B20 | 0xF * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
@@ -2001,9 +1975,9 @@ void Assembler::uxtab(Register dst, Register src1, Register src2, int rotate,
// Instruction details available in ARM DDI 0406C.b, A8.8.271.
// cond(31-28) | 01101110(27-20) | Rn(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
- DCHECK(!dst.is(pc));
- DCHECK(!src1.is(pc));
- DCHECK(!src2.is(pc));
+ DCHECK(dst != pc);
+ DCHECK(src1 != pc);
+ DCHECK(src2 != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6E * B20 | src1.code() * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
@@ -2014,8 +1988,8 @@ void Assembler::uxtb16(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.275.
// cond(31-28) | 01101100(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
- DCHECK(!dst.is(pc));
- DCHECK(!src.is(pc));
+ DCHECK(dst != pc);
+ DCHECK(src != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6C * B20 | 0xF * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
@@ -2026,8 +2000,8 @@ void Assembler::uxth(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.276.
// cond(31-28) | 01101111(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
- DCHECK(!dst.is(pc));
- DCHECK(!src.is(pc));
+ DCHECK(dst != pc);
+ DCHECK(src != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6F * B20 | 0xF * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
@@ -2039,9 +2013,9 @@ void Assembler::uxtah(Register dst, Register src1, Register src2, int rotate,
// Instruction details available in ARM DDI 0406C.b, A8.8.273.
// cond(31-28) | 01101111(27-20) | Rn(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
- DCHECK(!dst.is(pc));
- DCHECK(!src1.is(pc));
- DCHECK(!src2.is(pc));
+ DCHECK(dst != pc);
+ DCHECK(src1 != pc);
+ DCHECK(src2 != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6F * B20 | src1.code() * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
@@ -2052,15 +2026,15 @@ void Assembler::rbit(Register dst, Register src, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.144.
// cond(31-28) | 011011111111(27-16) | Rd(15-12) | 11110011(11-4) | Rm(3-0)
DCHECK(IsEnabled(ARMv7));
- DCHECK(!dst.is(pc));
- DCHECK(!src.is(pc));
+ DCHECK(dst != pc);
+ DCHECK(src != pc);
emit(cond | 0x6FF * B16 | dst.code() * B12 | 0xF3 * B4 | src.code());
}
// Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
- DCHECK(!dst.is(pc));
+ DCHECK(dst != pc);
emit(cond | B24 | s | 15*B16 | dst.code()*B12);
}
@@ -2136,8 +2110,8 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
void Assembler::ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
- DCHECK(src.rm().is(no_reg));
- DCHECK(!dst1.is(lr)); // r14.
+ DCHECK(src.rm() == no_reg);
+ DCHECK(dst1 != lr); // r14.
DCHECK_EQ(0, dst1.code() % 2);
DCHECK_EQ(dst1.code() + 1, dst2.code());
AddrMode3(cond | B7 | B6 | B4, dst1, src);
@@ -2146,8 +2120,8 @@ void Assembler::ldrd(Register dst1, Register dst2,
void Assembler::strd(Register src1, Register src2,
const MemOperand& dst, Condition cond) {
- DCHECK(dst.rm().is(no_reg));
- DCHECK(!src1.is(lr)); // r14.
+ DCHECK(dst.rm() == no_reg);
+ DCHECK(src1 != lr); // r14.
DCHECK_EQ(0, src1.code() % 2);
DCHECK_EQ(src1.code() + 1, src2.code());
AddrMode3(cond | B7 | B6 | B5 | B4, src1, dst);
@@ -2157,6 +2131,8 @@ void Assembler::strd(Register src1, Register src2,
void Assembler::ldrex(Register dst, Register src, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.75.
// cond(31-28) | 00011001(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
+ DCHECK(dst != pc);
+ DCHECK(src != pc);
emit(cond | B24 | B23 | B20 | src.code() * B16 | dst.code() * B12 | 0xf9f);
}
@@ -2165,6 +2141,11 @@ void Assembler::strex(Register src1, Register src2, Register dst,
// Instruction details available in ARM DDI 0406C.b, A8.8.212.
// cond(31-28) | 00011000(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
// Rt(3-0)
+ DCHECK(dst != pc);
+ DCHECK(src1 != pc);
+ DCHECK(src2 != pc);
+ DCHECK(src1 != dst);
+ DCHECK(src1 != src2);
emit(cond | B24 | B23 | dst.code() * B16 | src1.code() * B12 | 0xf9 * B4 |
src2.code());
}
@@ -2172,6 +2153,8 @@ void Assembler::strex(Register src1, Register src2, Register dst,
void Assembler::ldrexb(Register dst, Register src, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.76.
// cond(31-28) | 00011101(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
+ DCHECK(dst != pc);
+ DCHECK(src != pc);
emit(cond | B24 | B23 | B22 | B20 | src.code() * B16 | dst.code() * B12 |
0xf9f);
}
@@ -2181,6 +2164,11 @@ void Assembler::strexb(Register src1, Register src2, Register dst,
// Instruction details available in ARM DDI 0406C.b, A8.8.213.
// cond(31-28) | 00011100(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
// Rt(3-0)
+ DCHECK(dst != pc);
+ DCHECK(src1 != pc);
+ DCHECK(src2 != pc);
+ DCHECK(src1 != dst);
+ DCHECK(src1 != src2);
emit(cond | B24 | B23 | B22 | dst.code() * B16 | src1.code() * B12 |
0xf9 * B4 | src2.code());
}
@@ -2188,6 +2176,8 @@ void Assembler::strexb(Register src1, Register src2, Register dst,
void Assembler::ldrexh(Register dst, Register src, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.78.
// cond(31-28) | 00011111(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
+ DCHECK(dst != pc);
+ DCHECK(src != pc);
emit(cond | B24 | B23 | B22 | B21 | B20 | src.code() * B16 |
dst.code() * B12 | 0xf9f);
}
@@ -2197,6 +2187,11 @@ void Assembler::strexh(Register src1, Register src2, Register dst,
// Instruction details available in ARM DDI 0406C.b, A8.8.215.
// cond(31-28) | 00011110(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
// Rt(3-0)
+ DCHECK(dst != pc);
+ DCHECK(src1 != pc);
+ DCHECK(src2 != pc);
+ DCHECK(src1 != dst);
+ DCHECK(src1 != src2);
emit(cond | B24 | B23 | B22 | B21 | dst.code() * B16 | src1.code() * B12 |
0xf9 * B4 | src2.code());
}
@@ -2206,7 +2201,7 @@ void Assembler::pld(const MemOperand& address) {
// Instruction details available in ARM DDI 0406C.b, A8.8.128.
// 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
// 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
- DCHECK(address.rm().is(no_reg));
+ DCHECK(address.rm() == no_reg);
DCHECK(address.am() == Offset);
int U = B23;
int offset = address.offset();
@@ -2226,7 +2221,7 @@ void Assembler::ldm(BlockAddrMode am,
RegList dst,
Condition cond) {
// ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
- DCHECK(base.is(sp) || (dst & sp.bit()) == 0);
+ DCHECK(base == sp || (dst & sp.bit()) == 0);
AddrMode4(cond | B27 | am | L, base, dst);
@@ -2440,7 +2435,7 @@ void Assembler::vldr(const DwVfpRegister dst,
Register scratch = temps.Acquire();
// Larger offsets must be handled by computing the correct address in a
// scratch register.
- DCHECK(!base.is(scratch));
+ DCHECK(base != scratch);
if (u == 1) {
add(scratch, base, Operand(offset));
} else {
@@ -2494,7 +2489,7 @@ void Assembler::vldr(const SwVfpRegister dst,
// scratch register.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!base.is(scratch));
+ DCHECK(base != scratch);
if (u == 1) {
add(scratch, base, Operand(offset));
} else {
@@ -2549,7 +2544,7 @@ void Assembler::vstr(const DwVfpRegister src,
// scratch register.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!base.is(scratch));
+ DCHECK(base != scratch);
if (u == 1) {
add(scratch, base, Operand(offset));
} else {
@@ -2603,7 +2598,7 @@ void Assembler::vstr(const SwVfpRegister src,
// scratch register.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!base.is(scratch));
+ DCHECK(base != scratch);
if (u == 1) {
add(scratch, base, Operand(offset));
} else {
@@ -2638,7 +2633,7 @@ void Assembler::vldm(BlockAddrMode am, Register base, DwVfpRegister first,
DCHECK_LE(first.code(), last.code());
DCHECK(VfpRegisterIsAvailable(last));
DCHECK(am == ia || am == ia_w || am == db_w);
- DCHECK(!base.is(pc));
+ DCHECK(base != pc);
int sd, d;
first.split_code(&sd, &d);
@@ -2656,7 +2651,7 @@ void Assembler::vstm(BlockAddrMode am, Register base, DwVfpRegister first,
DCHECK_LE(first.code(), last.code());
DCHECK(VfpRegisterIsAvailable(last));
DCHECK(am == ia || am == ia_w || am == db_w);
- DCHECK(!base.is(pc));
+ DCHECK(base != pc);
int sd, d;
first.split_code(&sd, &d);
@@ -2673,7 +2668,7 @@ void Assembler::vldm(BlockAddrMode am, Register base, SwVfpRegister first,
// first(15-12) | 1010(11-8) | (count/2)
DCHECK_LE(first.code(), last.code());
DCHECK(am == ia || am == ia_w || am == db_w);
- DCHECK(!base.is(pc));
+ DCHECK(base != pc);
int sd, d;
first.split_code(&sd, &d);
@@ -2689,7 +2684,7 @@ void Assembler::vstm(BlockAddrMode am, Register base, SwVfpRegister first,
// first(15-12) | 1011(11-8) | (count/2)
DCHECK_LE(first.code(), last.code());
DCHECK(am == ia || am == ia_w || am == db_w);
- DCHECK(!base.is(pc));
+ DCHECK(base != pc);
int sd, d;
first.split_code(&sd, &d);
@@ -2821,7 +2816,7 @@ void Assembler::vmov(const DwVfpRegister dst, Double imm,
// instruction.
mov(scratch, Operand(lo));
vmov(dst, scratch, scratch);
- } else if (extra_scratch.is(no_reg)) {
+ } else if (extra_scratch == no_reg) {
// We only have one spare scratch register.
mov(scratch, Operand(lo));
vmov(dst, VmovIndexLo, scratch);
@@ -2915,7 +2910,7 @@ void Assembler::vmov(const DwVfpRegister dst,
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
DCHECK(VfpRegisterIsAvailable(dst));
- DCHECK(!src1.is(pc) && !src2.is(pc));
+ DCHECK(src1 != pc && src2 != pc);
int vm, m;
dst.split_code(&vm, &m);
emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
@@ -2932,7 +2927,7 @@ void Assembler::vmov(const Register dst1,
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
DCHECK(VfpRegisterIsAvailable(src));
- DCHECK(!dst1.is(pc) && !dst2.is(pc));
+ DCHECK(dst1 != pc && dst2 != pc);
int vm, m;
src.split_code(&vm, &m);
emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
@@ -2947,7 +2942,7 @@ void Assembler::vmov(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- DCHECK(!src.is(pc));
+ DCHECK(src != pc);
int sn, n;
dst.split_code(&sn, &n);
emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
@@ -2961,7 +2956,7 @@ void Assembler::vmov(const Register dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- DCHECK(!dst.is(pc));
+ DCHECK(dst != pc);
int sn, n;
src.split_code(&sn, &n);
emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 54a6faa0c9..536731978b 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -65,12 +65,17 @@ namespace internal {
V(s16) V(s17) V(s18) V(s19) V(s20) V(s21) V(s22) V(s23) \
V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)
-#define DOUBLE_REGISTERS(V) \
+#define LOW_DOUBLE_REGISTERS(V) \
V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
- V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
+
+#define NON_LOW_DOUBLE_REGISTERS(V) \
V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+#define DOUBLE_REGISTERS(V) \
+ LOW_DOUBLE_REGISTERS(V) NON_LOW_DOUBLE_REGISTERS(V)
+
#define SIMD128_REGISTERS(V) \
V(q0) V(q1) V(q2) V(q3) V(q4) V(q5) V(q6) V(q7) \
V(q8) V(q9) V(q10) V(q11) V(q12) V(q13) V(q14) V(q15)
@@ -83,7 +88,11 @@ namespace internal {
#define ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(V) \
V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
- V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d15)
+
+#define C_REGISTERS(V) \
+ V(cr0) V(cr1) V(cr2) V(cr3) V(cr4) V(cr5) V(cr6) V(cr7) \
+ V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
// clang-format on
// The ARM ABI does not specify the usage of register r9, which may be reserved
@@ -140,248 +149,142 @@ const int kNumSafepointRegisters = 16;
const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-
-struct Register {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = -1
- };
-
- static constexpr int kNumRegisters = Code::kAfterLast;
-
- static Register from_code(int code) {
- DCHECK(code >= 0);
- DCHECK(code < kNumRegisters);
- Register r = {code};
- return r;
- }
- bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
- bool is(Register reg) const { return reg_code == reg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
- void set_code(int code) {
- reg_code = code;
- DCHECK(is_valid());
- }
+ kRegAfterLast
+};
- // Unfortunately we can't make this private in a struct.
- int reg_code;
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ friend class RegisterBase;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
};
+static_assert(IS_TRIVIALLY_COPYABLE(Register) &&
+ sizeof(Register) == sizeof(int),
+ "Register can efficiently be passed by value");
+
// r7: context register
// r9: lithium scratch
-#define DECLARE_REGISTER(R) constexpr Register R = {Register::kCode_##R};
+#define DECLARE_REGISTER(R) \
+ constexpr Register R = Register::from_code<kRegCode_##R>();
GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
-constexpr Register no_reg = {Register::kCode_no_reg};
+constexpr Register no_reg = Register::no_reg();
constexpr bool kSimpleFPAliasing = false;
constexpr bool kSimdMaskRegisters = false;
-// Single word VFP register.
-struct SwVfpRegister {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- FLOAT_REGISTERS(REGISTER_CODE)
+enum SwVfpRegisterCode {
+#define REGISTER_CODE(R) kSwVfpCode_##R,
+ FLOAT_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = -1
- };
-
- static constexpr int kMaxNumRegisters = Code::kAfterLast;
+ kSwVfpAfterLast
+};
+// Single word VFP register.
+class SwVfpRegister : public RegisterBase<SwVfpRegister, kSwVfpAfterLast> {
+ public:
static constexpr int kSizeInBytes = 4;
- bool is_valid() const { return 0 <= reg_code && reg_code < 32; }
- bool is(SwVfpRegister reg) const { return reg_code == reg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
- static SwVfpRegister from_code(int code) {
- SwVfpRegister r = {code};
- return r;
- }
static void split_code(int reg_code, int* vm, int* m) {
DCHECK(from_code(reg_code).is_valid());
*m = reg_code & 0x1;
*vm = reg_code >> 1;
}
- void split_code(int* vm, int* m) const {
- split_code(reg_code, vm, m);
- }
+ void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
- int reg_code;
+ private:
+ friend class RegisterBase;
+ explicit constexpr SwVfpRegister(int code) : RegisterBase(code) {}
};
+static_assert(IS_TRIVIALLY_COPYABLE(SwVfpRegister) &&
+ sizeof(SwVfpRegister) == sizeof(int),
+ "SwVfpRegister can efficiently be passed by value");
+
typedef SwVfpRegister FloatRegister;
-// Double word VFP register.
-struct DwVfpRegister {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = -1
- };
-
- static constexpr int kMaxNumRegisters = Code::kAfterLast;
-
- inline static int NumRegisters();
+ kDoubleAfterLast
+};
+// Double word VFP register.
+class DwVfpRegister : public RegisterBase<DwVfpRegister, kDoubleAfterLast> {
+ public:
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0, that does not fit in the immediate field of vmov instructions.
// d14: 0.0
// d15: scratch register.
static constexpr int kSizeInBytes = 8;
- bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
- bool is(DwVfpRegister reg) const { return reg_code == reg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
+ inline static int NumRegisters();
- static DwVfpRegister from_code(int code) {
- DwVfpRegister r = {code};
- return r;
- }
static void split_code(int reg_code, int* vm, int* m) {
DCHECK(from_code(reg_code).is_valid());
*m = (reg_code & 0x10) >> 4;
*vm = reg_code & 0x0F;
}
- void split_code(int* vm, int* m) const {
- split_code(reg_code, vm, m);
- }
+ void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
- int reg_code;
+ private:
+ friend class RegisterBase;
+ friend class LowDwVfpRegister;
+ explicit constexpr DwVfpRegister(int code) : RegisterBase(code) {}
};
+static_assert(IS_TRIVIALLY_COPYABLE(DwVfpRegister) &&
+ sizeof(DwVfpRegister) == sizeof(int),
+ "DwVfpRegister can efficiently be passed by value");
typedef DwVfpRegister DoubleRegister;
// Double word VFP register d0-15.
-struct LowDwVfpRegister {
+class LowDwVfpRegister
+ : public RegisterBase<LowDwVfpRegister, kDoubleCode_d16> {
public:
- static constexpr int kMaxNumLowRegisters = 16;
- constexpr operator DwVfpRegister() const {
- return DwVfpRegister { reg_code };
- }
- static LowDwVfpRegister from_code(int code) {
- LowDwVfpRegister r = { code };
- return r;
- }
-
- bool is_valid() const {
- return 0 <= reg_code && reg_code < kMaxNumLowRegisters;
- }
- bool is(DwVfpRegister reg) const { return reg_code == reg.reg_code; }
- bool is(LowDwVfpRegister reg) const { return reg_code == reg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- SwVfpRegister low() const {
- SwVfpRegister reg;
- reg.reg_code = reg_code * 2;
+ constexpr operator DwVfpRegister() const { return DwVfpRegister(reg_code_); }
- DCHECK(reg.is_valid());
- return reg;
- }
+ SwVfpRegister low() const { return SwVfpRegister::from_code(code() * 2); }
SwVfpRegister high() const {
- SwVfpRegister reg;
- reg.reg_code = (reg_code * 2) + 1;
-
- DCHECK(reg.is_valid());
- return reg;
+ return SwVfpRegister::from_code(code() * 2 + 1);
}
- int reg_code;
+ private:
+ friend class RegisterBase;
+ explicit constexpr LowDwVfpRegister(int code) : RegisterBase(code) {}
};
+enum Simd128RegisterCode {
+#define REGISTER_CODE(R) kSimd128Code_##R,
+ SIMD128_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kSimd128AfterLast
+};
// Quad word NEON register.
-struct QwNeonRegister {
- static constexpr int kMaxNumRegisters = 16;
-
- static QwNeonRegister from_code(int code) {
- QwNeonRegister r = { code };
- return r;
- }
-
- bool is_valid() const {
- return (0 <= reg_code) && (reg_code < kMaxNumRegisters);
- }
- bool is(QwNeonRegister reg) const { return reg_code == reg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
+class QwNeonRegister : public RegisterBase<QwNeonRegister, kSimd128AfterLast> {
+ public:
static void split_code(int reg_code, int* vm, int* m) {
DCHECK(from_code(reg_code).is_valid());
int encoded_code = reg_code << 1;
*m = (encoded_code & 0x10) >> 4;
*vm = encoded_code & 0x0F;
}
- void split_code(int* vm, int* m) const {
- split_code(reg_code, vm, m);
- }
- DwVfpRegister low() const {
- DwVfpRegister reg;
- reg.reg_code = reg_code * 2;
-
- DCHECK(reg.is_valid());
- return reg;
- }
+ void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
+ DwVfpRegister low() const { return DwVfpRegister::from_code(code() * 2); }
DwVfpRegister high() const {
- DwVfpRegister reg;
- reg.reg_code = reg_code * 2 + 1;
-
- DCHECK(reg.is_valid());
- return reg;
+ return DwVfpRegister::from_code(code() * 2 + 1);
}
- int reg_code;
+ private:
+ friend class RegisterBase;
+ explicit constexpr QwNeonRegister(int code) : RegisterBase(code) {}
};
@@ -389,92 +292,42 @@ typedef QwNeonRegister QuadRegister;
typedef QwNeonRegister Simd128Register;
+enum CRegisterCode {
+#define REGISTER_CODE(R) kCCode_##R,
+ C_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kCAfterLast
+};
+
+// Coprocessor register
+class CRegister : public RegisterBase<CRegister, kCAfterLast> {
+ friend class RegisterBase;
+ explicit constexpr CRegister(int code) : RegisterBase(code) {}
+};
+
// Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "s(N):s(N+1)" is the same as "d(N/2)".
-constexpr SwVfpRegister s0 = { 0 };
-constexpr SwVfpRegister s1 = { 1 };
-constexpr SwVfpRegister s2 = { 2 };
-constexpr SwVfpRegister s3 = { 3 };
-constexpr SwVfpRegister s4 = { 4 };
-constexpr SwVfpRegister s5 = { 5 };
-constexpr SwVfpRegister s6 = { 6 };
-constexpr SwVfpRegister s7 = { 7 };
-constexpr SwVfpRegister s8 = { 8 };
-constexpr SwVfpRegister s9 = { 9 };
-constexpr SwVfpRegister s10 = { 10 };
-constexpr SwVfpRegister s11 = { 11 };
-constexpr SwVfpRegister s12 = { 12 };
-constexpr SwVfpRegister s13 = { 13 };
-constexpr SwVfpRegister s14 = { 14 };
-constexpr SwVfpRegister s15 = { 15 };
-constexpr SwVfpRegister s16 = { 16 };
-constexpr SwVfpRegister s17 = { 17 };
-constexpr SwVfpRegister s18 = { 18 };
-constexpr SwVfpRegister s19 = { 19 };
-constexpr SwVfpRegister s20 = { 20 };
-constexpr SwVfpRegister s21 = { 21 };
-constexpr SwVfpRegister s22 = { 22 };
-constexpr SwVfpRegister s23 = { 23 };
-constexpr SwVfpRegister s24 = { 24 };
-constexpr SwVfpRegister s25 = { 25 };
-constexpr SwVfpRegister s26 = { 26 };
-constexpr SwVfpRegister s27 = { 27 };
-constexpr SwVfpRegister s28 = { 28 };
-constexpr SwVfpRegister s29 = { 29 };
-constexpr SwVfpRegister s30 = { 30 };
-constexpr SwVfpRegister s31 = { 31 };
-
-constexpr DwVfpRegister no_dreg = { -1 };
-constexpr LowDwVfpRegister d0 = { 0 };
-constexpr LowDwVfpRegister d1 = { 1 };
-constexpr LowDwVfpRegister d2 = { 2 };
-constexpr LowDwVfpRegister d3 = { 3 };
-constexpr LowDwVfpRegister d4 = { 4 };
-constexpr LowDwVfpRegister d5 = { 5 };
-constexpr LowDwVfpRegister d6 = { 6 };
-constexpr LowDwVfpRegister d7 = { 7 };
-constexpr LowDwVfpRegister d8 = { 8 };
-constexpr LowDwVfpRegister d9 = { 9 };
-constexpr LowDwVfpRegister d10 = { 10 };
-constexpr LowDwVfpRegister d11 = { 11 };
-constexpr LowDwVfpRegister d12 = { 12 };
-constexpr LowDwVfpRegister d13 = { 13 };
-constexpr LowDwVfpRegister d14 = { 14 };
-constexpr LowDwVfpRegister d15 = { 15 };
-constexpr DwVfpRegister d16 = { 16 };
-constexpr DwVfpRegister d17 = { 17 };
-constexpr DwVfpRegister d18 = { 18 };
-constexpr DwVfpRegister d19 = { 19 };
-constexpr DwVfpRegister d20 = { 20 };
-constexpr DwVfpRegister d21 = { 21 };
-constexpr DwVfpRegister d22 = { 22 };
-constexpr DwVfpRegister d23 = { 23 };
-constexpr DwVfpRegister d24 = { 24 };
-constexpr DwVfpRegister d25 = { 25 };
-constexpr DwVfpRegister d26 = { 26 };
-constexpr DwVfpRegister d27 = { 27 };
-constexpr DwVfpRegister d28 = { 28 };
-constexpr DwVfpRegister d29 = { 29 };
-constexpr DwVfpRegister d30 = { 30 };
-constexpr DwVfpRegister d31 = { 31 };
-
-constexpr QwNeonRegister q0 = { 0 };
-constexpr QwNeonRegister q1 = { 1 };
-constexpr QwNeonRegister q2 = { 2 };
-constexpr QwNeonRegister q3 = { 3 };
-constexpr QwNeonRegister q4 = { 4 };
-constexpr QwNeonRegister q5 = { 5 };
-constexpr QwNeonRegister q6 = { 6 };
-constexpr QwNeonRegister q7 = { 7 };
-constexpr QwNeonRegister q8 = { 8 };
-constexpr QwNeonRegister q9 = { 9 };
-constexpr QwNeonRegister q10 = { 10 };
-constexpr QwNeonRegister q11 = { 11 };
-constexpr QwNeonRegister q12 = { 12 };
-constexpr QwNeonRegister q13 = { 13 };
-constexpr QwNeonRegister q14 = { 14 };
-constexpr QwNeonRegister q15 = { 15 };
+#define DECLARE_FLOAT_REGISTER(R) \
+ constexpr SwVfpRegister R = SwVfpRegister::from_code<kSwVfpCode_##R>();
+FLOAT_REGISTERS(DECLARE_FLOAT_REGISTER)
+#undef DECLARE_FLOAT_REGISTER
+
+#define DECLARE_LOW_DOUBLE_REGISTER(R) \
+ constexpr LowDwVfpRegister R = LowDwVfpRegister::from_code<kDoubleCode_##R>();
+LOW_DOUBLE_REGISTERS(DECLARE_LOW_DOUBLE_REGISTER)
+#undef DECLARE_LOW_DOUBLE_REGISTER
+#define DECLARE_DOUBLE_REGISTER(R) \
+ constexpr DwVfpRegister R = DwVfpRegister::from_code<kDoubleCode_##R>();
+NON_LOW_DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
+#undef DECLARE_DOUBLE_REGISTER
+
+constexpr DwVfpRegister no_dreg = DwVfpRegister::no_reg();
+
+#define DECLARE_SIMD128_REGISTER(R) \
+ constexpr Simd128Register R = Simd128Register::from_code<kSimd128Code_##R>();
+SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
+#undef DECLARE_SIMD128_REGISTER
// Aliases for double registers.
constexpr LowDwVfpRegister kFirstCalleeSavedDoubleReg = d8;
@@ -487,43 +340,12 @@ constexpr LowDwVfpRegister kScratchDoubleReg = d14;
constexpr QwNeonRegister kScratchQuadReg = q7;
constexpr LowDwVfpRegister kScratchDoubleReg2 = d15;
-// Coprocessor register
-struct CRegister {
- bool is_valid() const { return 0 <= reg_code && reg_code < 16; }
- bool is(CRegister creg) const { return reg_code == creg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
-
- // Unfortunately we can't make this private in a struct.
- int reg_code;
-};
-
-
-constexpr CRegister no_creg = { -1 };
-
-constexpr CRegister cr0 = { 0 };
-constexpr CRegister cr1 = { 1 };
-constexpr CRegister cr2 = { 2 };
-constexpr CRegister cr3 = { 3 };
-constexpr CRegister cr4 = { 4 };
-constexpr CRegister cr5 = { 5 };
-constexpr CRegister cr6 = { 6 };
-constexpr CRegister cr7 = { 7 };
-constexpr CRegister cr8 = { 8 };
-constexpr CRegister cr9 = { 9 };
-constexpr CRegister cr10 = { 10 };
-constexpr CRegister cr11 = { 11 };
-constexpr CRegister cr12 = { 12 };
-constexpr CRegister cr13 = { 13 };
-constexpr CRegister cr14 = { 14 };
-constexpr CRegister cr15 = { 15 };
+constexpr CRegister no_creg = CRegister::no_reg();
+#define DECLARE_C_REGISTER(R) \
+ constexpr CRegister R = CRegister::from_code<kCCode_##R>();
+C_REGISTERS(DECLARE_C_REGISTER)
+#undef DECLARE_C_REGISTER
// Coprocessor number
enum Coprocessor {
@@ -584,10 +406,8 @@ class Operand BASE_EMBEDDED {
// Return true if this is a register operand.
bool IsRegister() const {
- return rm_.is_valid() &&
- rs_.is(no_reg) &&
- shift_op_ == LSL &&
- shift_imm_ == 0;
+ return rm_.is_valid() && rs_ == no_reg && shift_op_ == LSL &&
+ shift_imm_ == 0;
}
// Return true if this is a register operand shifted with an immediate.
bool IsImmediateShiftedRegister() const {
@@ -639,8 +459,8 @@ class Operand BASE_EMBEDDED {
private:
- Register rm_;
- Register rs_;
+ Register rm_ = no_reg;
+ Register rs_ = no_reg;
ShiftOp shift_op_;
int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
union Value {
@@ -684,13 +504,13 @@ class MemOperand BASE_EMBEDDED {
}
void set_offset(int32_t offset) {
- DCHECK(rm_.is(no_reg));
- offset_ = offset;
+ DCHECK(rm_ == no_reg);
+ offset_ = offset;
}
uint32_t offset() const {
- DCHECK(rm_.is(no_reg));
- return offset_;
+ DCHECK(rm_ == no_reg);
+ return offset_;
}
Register rn() const { return rn_; }
@@ -1791,13 +1611,13 @@ class Assembler : public AssemblerBase {
bool VfpRegisterIsAvailable(DwVfpRegister reg) {
DCHECK(reg.is_valid());
return IsEnabled(VFP32DREGS) ||
- (reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters);
+ (reg.code() < LowDwVfpRegister::kNumRegisters);
}
bool VfpRegisterIsAvailable(QwNeonRegister reg) {
DCHECK(reg.is_valid());
return IsEnabled(VFP32DREGS) ||
- (reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters / 2);
+ (reg.code() < LowDwVfpRegister::kNumRegisters / 2);
}
inline void emit(Instr x);
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index c64858038f..c2aa0d4bed 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -49,7 +49,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
int double_offset = offset();
// Account for saved regs if input is sp.
- if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
+ if (input_reg == sp) double_offset += 3 * kPointerSize;
Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
Register scratch_low =
@@ -174,7 +174,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(exponent.is(r2));
+ DCHECK(exponent == r2);
const LowDwVfpRegister double_base = d0;
const LowDwVfpRegister double_exponent = d1;
const LowDwVfpRegister double_result = d2;
@@ -326,9 +326,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// r1: pointer to the first argument (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
+#if V8_HOST_ARCH_ARM
int frame_alignment = MacroAssembler::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
-#if V8_HOST_ARCH_ARM
if (FLAG_debug_code) {
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
@@ -343,25 +343,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
#endif
// Call C built-in.
- int result_stack_size;
- if (result_size() <= 2) {
- // r0 = argc, r1 = argv, r2 = isolate
- __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
- result_stack_size = 0;
- } else {
- DCHECK_EQ(3, result_size());
- // Allocate additional space for the result.
- result_stack_size =
- ((result_size() * kPointerSize) + frame_alignment_mask) &
- ~frame_alignment_mask;
- __ sub(sp, sp, Operand(result_stack_size));
-
- // r0 = hidden result argument, r1 = argc, r2 = argv, r3 = isolate.
- __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
- __ mov(r2, Operand(r1));
- __ mov(r1, Operand(r0));
- __ mov(r0, Operand(sp));
- }
+ // r0 = argc, r1 = argv, r2 = isolate
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
@@ -374,17 +357,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Prevent literal pool emission before return address.
Assembler::BlockConstPoolScope block_const_pool(masm);
__ add(lr, pc, Operand(4));
- __ str(lr, MemOperand(sp, result_stack_size));
+ __ str(lr, MemOperand(sp));
__ Call(r5);
}
- if (result_size() > 2) {
- DCHECK_EQ(3, result_size());
- // Read result values stored on stack.
- __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
- }
- // Result returned in r0, r1:r0 or r2:r1:r0 - do not destroy these registers!
+
+ // Result returned in r0 or r1:r0 - do not destroy these registers!
// Check result for exception sentinel.
Label exception_returned;
@@ -410,14 +387,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
- Register argc;
- if (argv_in_register()) {
- // We don't want to pop arguments so set argc to no_reg.
- argc = no_reg;
- } else {
- // Callee-saved register r4 still holds argc.
- argc = r4;
- }
+ Register argc = argv_in_register()
+ // We don't want to pop arguments so set argc to no_reg.
+ ? no_reg
+ // Callee-saved register r4 still holds argc.
+ : r4;
__ LeaveExitFrame(save_doubles(), argc, true);
__ mov(pc, lr);
@@ -778,7 +752,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ add(tmp, properties, Operand(index, LSL, 1));
__ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
- DCHECK(!tmp.is(entity_name));
+ DCHECK(tmp != entity_name);
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
__ cmp(entity_name, tmp);
__ b(eq, done);
@@ -922,6 +896,49 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
stub2.GetCode();
}
+RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
+ Instr first_instruction = Assembler::instr_at(stub->instruction_start());
+ Instr second_instruction =
+ Assembler::instr_at(stub->instruction_start() + Assembler::kInstrSize);
+
+ if (Assembler::IsBranch(first_instruction)) {
+ return INCREMENTAL;
+ }
+
+ DCHECK(Assembler::IsTstImmediate(first_instruction));
+
+ if (Assembler::IsBranch(second_instruction)) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ DCHECK(Assembler::IsTstImmediate(second_instruction));
+
+ return STORE_BUFFER_ONLY;
+}
+
+void RecordWriteStub::Patch(Code* stub, Mode mode) {
+ MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
+ stub->instruction_size(), CodeObjectRequired::kNo);
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ DCHECK(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ PatchBranchIntoNop(&masm, 0);
+ PatchBranchIntoNop(&masm, Assembler::kInstrSize);
+ break;
+ case INCREMENTAL:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, 0);
+ break;
+ case INCREMENTAL_COMPACTION:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, Assembler::kInstrSize);
+ break;
+ }
+ DCHECK(GetMode(stub) == mode);
+ Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
+ 2 * Assembler::kInstrSize);
+}
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
@@ -945,8 +962,7 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
}
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
}
__ Ret();
@@ -985,8 +1001,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm);
regs_.Restore(masm);
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
__ bind(&dont_need_remembered_set);
}
@@ -1003,10 +1018,9 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
int argument_count = 3;
__ PrepareCallCFunction(argument_count);
- Register address =
- r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
- DCHECK(!address.is(regs_.object()));
- DCHECK(!address.is(r0));
+ Register address = r0 == regs_.address() ? regs_.scratch0() : regs_.address();
+ DCHECK(address != regs_.object());
+ DCHECK(address != r0);
__ Move(address, regs_.address());
__ Move(r0, regs_.object());
__ Move(r1, address);
@@ -1038,8 +1052,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ Ret();
}
@@ -1080,8 +1093,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ Ret();
}
@@ -1484,7 +1496,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
const int kLevelOffset = AddressOffset(
ExternalReference::handle_scope_level_address(isolate), next_address);
- DCHECK(function_address.is(r1) || function_address.is(r2));
+ DCHECK(function_address == r1 || function_address == r2);
Label profiler_disabled;
Label end_profiler_check;
@@ -1696,7 +1708,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- DCHECK(!api_function_address.is(r0) && !scratch0.is(r0));
+ DCHECK(api_function_address != r0 && scratch0 != r0);
// r0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ add(r0, sp, Operand(1 * kPointerSize));
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 404dc2fb2b..3407ff9573 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -72,49 +72,9 @@ class RecordWriteStub: public PlatformCodeStub {
DCHECK(Assembler::IsBranch(masm->instr_at(pos)));
}
- static Mode GetMode(Code* stub) {
- Instr first_instruction = Assembler::instr_at(stub->instruction_start());
- Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
- Assembler::kInstrSize);
+ static Mode GetMode(Code* stub);
- if (Assembler::IsBranch(first_instruction)) {
- return INCREMENTAL;
- }
-
- DCHECK(Assembler::IsTstImmediate(first_instruction));
-
- if (Assembler::IsBranch(second_instruction)) {
- return INCREMENTAL_COMPACTION;
- }
-
- DCHECK(Assembler::IsTstImmediate(second_instruction));
-
- return STORE_BUFFER_ONLY;
- }
-
- static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
- stub->instruction_size(), CodeObjectRequired::kNo);
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- PatchBranchIntoNop(&masm, 0);
- PatchBranchIntoNop(&masm, Assembler::kInstrSize);
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 0);
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, Assembler::kInstrSize);
- break;
- }
- DCHECK(GetMode(stub) == mode);
- Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
- 2 * Assembler::kInstrSize);
- }
+ static void Patch(Code* stub, Mode mode);
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
@@ -124,12 +84,11 @@ class RecordWriteStub: public PlatformCodeStub {
// the caller.
class RegisterAllocation {
public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch0)
+ RegisterAllocation(Register object, Register address, Register scratch0)
: object_(object),
address_(address),
- scratch0_(scratch0) {
+ scratch0_(scratch0),
+ scratch1_(no_reg) {
DCHECK(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 974d5a9dc0..d633d910fb 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -27,8 +27,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Everything but pc, lr and ip which will be saved but not restored.
RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
- const int kDoubleRegsSize = kDoubleSize * DwVfpRegister::kMaxNumRegisters;
- const int kFloatRegsSize = kFloatSize * SwVfpRegister::kMaxNumRegisters;
+ const int kDoubleRegsSize = kDoubleSize * DwVfpRegister::kNumRegisters;
+ const int kFloatRegsSize = kFloatSize * SwVfpRegister::kNumRegisters;
// Save all allocatable VFP registers before messing with them.
DCHECK(kDoubleRegZero.code() == 13);
@@ -115,7 +115,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
// Copy VFP registers to
- // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
+ // double_registers_[DoubleRegister::kNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
@@ -128,7 +128,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
// Copy VFP registers to
- // float_registers_[FloatRegister::kMaxNumAllocatableRegisters]
+ // float_registers_[FloatRegister::kNumAllocatableRegisters]
int float_regs_offset = FrameDescription::float_registers_offset();
for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
int code = config->GetAllocatableFloatCode(i);
@@ -210,9 +210,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ vldr(reg, r1, src_offset);
}
- // Push state, pc, and continuation from the last output frame.
- __ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
- __ push(r6);
+ // Push pc and continuation from the last output frame.
__ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
__ push(r6);
__ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
@@ -295,6 +293,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ push(scratch);
}
+bool Deoptimizer::PadTopOfStackRegister() { return false; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
diff --git a/deps/v8/src/arm/eh-frame-arm.cc b/deps/v8/src/arm/eh-frame-arm.cc
index 7405b1365f..f0902691bc 100644
--- a/deps/v8/src/arm/eh-frame-arm.cc
+++ b/deps/v8/src/arm/eh-frame-arm.cc
@@ -27,13 +27,13 @@ void EhFrameWriter::WriteInitialStateInCie() {
// static
int EhFrameWriter::RegisterToDwarfCode(Register name) {
switch (name.code()) {
- case Register::kCode_fp:
+ case kRegCode_fp:
return kFpDwarfCode;
- case Register::kCode_sp:
+ case kRegCode_sp:
return kSpDwarfCode;
- case Register::kCode_lr:
+ case kRegCode_lr:
return kLrDwarfCode;
- case Register::kCode_r0:
+ case kRegCode_r0:
return kR0DwarfCode;
default:
UNIMPLEMENTED();
diff --git a/deps/v8/src/arm/frame-constants-arm.cc b/deps/v8/src/arm/frame-constants-arm.cc
index cff79e753a..b83dd38a9a 100644
--- a/deps/v8/src/arm/frame-constants-arm.cc
+++ b/deps/v8/src/arm/frame-constants-arm.cc
@@ -20,6 +20,10 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
+int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
+ return register_count;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index fbe6be022c..fb7076d33f 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -24,9 +24,14 @@ void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // TODO(albertnetymk): Use default for now; should call
- // RestrictAllocatableRegisters like src/x64/interface-descriptors-x64.cc
- DefaultInitializePlatformSpecific(data, kParameterCount);
+ const Register default_stub_registers[] = {r0, r1, r2, r3, r4};
+
+ data->RestrictAllocatableRegisters(default_stub_registers,
+ arraysize(default_stub_registers));
+
+ CHECK_LE(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
const Register FastNewFunctionContextDescriptor::FunctionRegister() {
@@ -85,27 +90,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void FastCloneRegExpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3, r2, r1, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3, r2, r1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3, r2, r1, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 86eac5db1a..2950de0a0c 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -11,6 +11,7 @@
#include "src/base/division-by-constant.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
+#include "src/callable.h"
#include "src/codegen.h"
#include "src/counters.h"
#include "src/debug/debug.h"
@@ -30,45 +31,94 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, size, create_code_object) {}
-void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1, Register exclusion2,
- Register exclusion3) {
+TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
+ }
+}
+
+int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) const {
+ int bytes = 0;
RegList exclusions = 0;
- if (!exclusion1.is(no_reg)) {
+ if (exclusion1 != no_reg) {
exclusions |= exclusion1.bit();
- if (!exclusion2.is(no_reg)) {
+ if (exclusion2 != no_reg) {
exclusions |= exclusion2.bit();
- if (!exclusion3.is(no_reg)) {
+ if (exclusion3 != no_reg) {
exclusions |= exclusion3.bit();
}
}
}
- stm(db_w, sp, (kCallerSaved | lr.bit()) & ~exclusions);
+ RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
+
+ bytes += NumRegs(list) * kPointerSize;
if (fp_mode == kSaveFPRegs) {
- SaveFPRegs(sp, lr);
+ bytes += DwVfpRegister::NumRegisters() * DwVfpRegister::kSizeInBytes;
}
+
+ return bytes;
}
-void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ int bytes = 0;
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
+ stm(db_w, sp, list);
+
+ bytes += NumRegs(list) * kPointerSize;
+
+ if (fp_mode == kSaveFPRegs) {
+ SaveFPRegs(sp, lr);
+ bytes += DwVfpRegister::NumRegisters() * DwVfpRegister::kSizeInBytes;
+ }
+
+ return bytes;
+}
+
+int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
if (fp_mode == kSaveFPRegs) {
RestoreFPRegs(sp, lr);
+ bytes += DwVfpRegister::NumRegisters() * DwVfpRegister::kSizeInBytes;
}
RegList exclusions = 0;
- if (!exclusion1.is(no_reg)) {
+ if (exclusion1 != no_reg) {
exclusions |= exclusion1.bit();
- if (!exclusion2.is(no_reg)) {
+ if (exclusion2 != no_reg) {
exclusions |= exclusion2.bit();
- if (!exclusion3.is(no_reg)) {
+ if (exclusion3 != no_reg) {
exclusions |= exclusion3.bit();
}
}
}
- ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~exclusions);
+ RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
+ ldm(ia_w, sp, list);
+
+ bytes += NumRegs(list) * kPointerSize;
+
+ return bytes;
}
void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); }
@@ -234,7 +284,7 @@ void MacroAssembler::Swap(Register reg1,
Register reg2,
Register scratch,
Condition cond) {
- if (scratch.is(no_reg)) {
+ if (scratch == no_reg) {
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
@@ -268,33 +318,33 @@ void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
}
void TurboAssembler::Move(Register dst, Register src, Condition cond) {
- if (!dst.is(src)) {
+ if (dst != src) {
mov(dst, src, LeaveCC, cond);
}
}
void TurboAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
Condition cond) {
- if (!dst.is(src)) {
+ if (dst != src) {
vmov(dst, src, cond);
}
}
void TurboAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
Condition cond) {
- if (!dst.is(src)) {
+ if (dst != src) {
vmov(dst, src, cond);
}
}
void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
- if (!dst.is(src)) {
+ if (dst != src) {
vmov(dst, src);
}
}
void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
- if (srcdst0.is(srcdst1)) return; // Swapping aliased registers emits nothing.
+ if (srcdst0 == srcdst1) return; // Swapping aliased registers emits nothing.
DCHECK(VfpRegisterIsAvailable(srcdst0));
DCHECK(VfpRegisterIsAvailable(srcdst1));
@@ -302,8 +352,8 @@ void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
if (CpuFeatures::IsSupported(NEON)) {
vswp(srcdst0, srcdst1);
} else {
- DCHECK(!srcdst0.is(kScratchDoubleReg));
- DCHECK(!srcdst1.is(kScratchDoubleReg));
+ DCHECK(srcdst0 != kScratchDoubleReg);
+ DCHECK(srcdst1 != kScratchDoubleReg);
vmov(kScratchDoubleReg, srcdst0);
vmov(srcdst0, srcdst1);
vmov(srcdst1, kScratchDoubleReg);
@@ -311,7 +361,7 @@ void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
}
void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
- if (!srcdst0.is(srcdst1)) {
+ if (srcdst0 != srcdst1) {
vswp(srcdst0, srcdst1);
}
}
@@ -324,7 +374,7 @@ void MacroAssembler::Mls(Register dst, Register src1, Register src2,
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!srcA.is(scratch));
+ DCHECK(srcA != scratch);
mul(scratch, src1, src2, LeaveCC, cond);
sub(dst, srcA, scratch, LeaveCC, cond);
}
@@ -448,17 +498,12 @@ void MacroAssembler::InNewSpace(Register object,
CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
}
-
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
+void MacroAssembler::RecordWriteField(Register object, int offset,
+ Register value, Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -481,14 +526,8 @@ void MacroAssembler::RecordWriteField(
bind(&ok);
}
- RecordWrite(object,
- dst,
- value,
- lr_status,
- save_fp,
- remembered_set_action,
- OMIT_SMI_CHECK,
- pointers_to_here_check_for_value);
+ RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK);
bind(&done);
@@ -500,94 +539,78 @@ void MacroAssembler::RecordWriteField(
}
}
-// Will clobber 3 registers: object, map and dst. The register 'object' contains
-// a heap object pointer. A scratch register also needs to be available.
-void MacroAssembler::RecordWriteForMap(Register object,
- Register map,
- Register dst,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode fp_mode) {
- if (emit_debug_code()) {
- ldr(dst, FieldMemOperand(map, HeapObject::kMapOffset));
- cmp(dst, Operand(isolate()->factory()->meta_map()));
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+void TurboAssembler::SaveRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
}
- if (!FLAG_incremental_marking) {
- return;
- }
+ stm(db_w, sp, regs);
+}
- if (emit_debug_code()) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- cmp(scratch, map);
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+void TurboAssembler::RestoreRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
}
+ ldm(ia_w, sp, regs);
+}
- Label done;
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
+ // i.e. always emit remember set and save FP registers in RecordWriteStub. If
+ // large performance regression is observed, we should use these values to
+ // avoid unnecessary work.
- // A single check of the map's pages interesting flag suffices, since it is
- // only set during incremental collection, and then it's also guaranteed that
- // the from object's page's interesting flag is also set. This optimization
- // relies on the fact that maps can never be in new space.
- CheckPageFlag(map,
- map, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
+ RegList registers = callable.descriptor().allocatable_registers();
- add(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
- if (emit_debug_code()) {
- Label ok;
- tst(dst, Operand(kPointerSize - 1));
- b(eq, &ok);
- stop("Unaligned cell in write barrier");
- bind(&ok);
- }
+ SaveRegisters(registers);
- // Record the actual write.
- if (lr_status == kLRHasNotBeenSaved) {
- push(lr);
- }
- RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
- fp_mode);
- CallStub(&stub);
- if (lr_status == kLRHasNotBeenSaved) {
- pop(lr);
- }
+ Register object_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kObject));
+ Register slot_parameter(
+ callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register isolate_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kIsolate));
+ Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kFPMode));
- bind(&done);
+ Push(object);
+ Push(address);
- // Count number of write barriers in generated code.
- isolate()->counters()->write_barriers_static()->Increment();
- {
- UseScratchRegisterScope temps(this);
- IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1,
- temps.Acquire(), dst);
- }
+ Pop(slot_parameter);
+ Pop(object_parameter);
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
- mov(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
- }
+ Move(isolate_parameter,
+ Operand(ExternalReference::isolate_address(isolate())));
+ Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
+ Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
+ Call(callable.code(), RelocInfo::CODE_TARGET);
+
+ RestoreRegisters(registers);
}
// Will clobber 3 registers: object, address, and value. The register 'object'
// contains a heap object pointer. The heap object tag is shifted away.
// A scratch register also needs to be available.
-void MacroAssembler::RecordWrite(
- Register object,
- Register address,
- Register value,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
- DCHECK(!object.is(value));
+void MacroAssembler::RecordWrite(Register object, Register address,
+ Register value, LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ DCHECK(object != value);
if (emit_debug_code()) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -609,13 +632,9 @@ void MacroAssembler::RecordWrite(
JumpIfSmi(value, &done);
}
- if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
- }
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -626,9 +645,13 @@ void MacroAssembler::RecordWrite(
if (lr_status == kLRHasNotBeenSaved) {
push(lr);
}
+#ifdef V8_CSA_WRITE_BARRIER
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+#else
RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
fp_mode);
CallStub(&stub);
+#endif
if (lr_status == kLRHasNotBeenSaved) {
pop(lr);
}
@@ -652,10 +675,8 @@ void MacroAssembler::RecordWrite(
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address,
- Register scratch,
- SaveFPRegsMode fp_mode,
- RememberedSetFinalAction and_then) {
+ Register address, Register scratch,
+ SaveFPRegsMode fp_mode) {
Label done;
if (emit_debug_code()) {
Label ok;
@@ -677,20 +698,13 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Call stub on end of buffer.
// Check for end of buffer.
tst(scratch, Operand(StoreBuffer::kStoreBufferMask));
- if (and_then == kFallThroughAtEnd) {
- b(ne, &done);
- } else {
- DCHECK(and_then == kReturnAtEnd);
- Ret(ne);
- }
+ Ret(ne);
push(lr);
StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
CallStub(&store_buffer_overflow);
pop(lr);
bind(&done);
- if (and_then == kReturnAtEnd) {
- Ret();
- }
+ Ret();
}
void TurboAssembler::PushCommonFrame(Register marker_reg) {
@@ -854,8 +868,8 @@ void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
void TurboAssembler::VmovExtended(Register dst, int src_code) {
- DCHECK_LE(SwVfpRegister::kMaxNumRegisters, src_code);
- DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, src_code);
+ DCHECK_LE(SwVfpRegister::kNumRegisters, src_code);
+ DCHECK_GT(SwVfpRegister::kNumRegisters * 2, src_code);
if (src_code & 0x1) {
VmovHigh(dst, DwVfpRegister::from_code(src_code / 2));
} else {
@@ -864,8 +878,8 @@ void TurboAssembler::VmovExtended(Register dst, int src_code) {
}
void TurboAssembler::VmovExtended(int dst_code, Register src) {
- DCHECK_LE(SwVfpRegister::kMaxNumRegisters, dst_code);
- DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, dst_code);
+ DCHECK_LE(SwVfpRegister::kNumRegisters, dst_code);
+ DCHECK_GT(SwVfpRegister::kNumRegisters * 2, dst_code);
if (dst_code & 0x1) {
VmovHigh(DwVfpRegister::from_code(dst_code / 2), src);
} else {
@@ -876,8 +890,8 @@ void TurboAssembler::VmovExtended(int dst_code, Register src) {
void TurboAssembler::VmovExtended(int dst_code, int src_code) {
if (src_code == dst_code) return;
- if (src_code < SwVfpRegister::kMaxNumRegisters &&
- dst_code < SwVfpRegister::kMaxNumRegisters) {
+ if (src_code < SwVfpRegister::kNumRegisters &&
+ dst_code < SwVfpRegister::kNumRegisters) {
// src and dst are both s-registers.
vmov(SwVfpRegister::from_code(dst_code),
SwVfpRegister::from_code(src_code));
@@ -896,13 +910,13 @@ void TurboAssembler::VmovExtended(int dst_code, int src_code) {
src_offset = dst_offset ^ 1;
}
if (dst_offset) {
- if (dst_d_reg.is(src_d_reg)) {
+ if (dst_d_reg == src_d_reg) {
vdup(Neon32, dst_d_reg, src_d_reg, 0);
} else {
vsli(Neon64, dst_d_reg, src_d_reg, 32);
}
} else {
- if (dst_d_reg.is(src_d_reg)) {
+ if (dst_d_reg == src_d_reg) {
vdup(Neon32, dst_d_reg, src_d_reg, 1);
} else {
vsri(Neon64, dst_d_reg, src_d_reg, 32);
@@ -915,13 +929,13 @@ void TurboAssembler::VmovExtended(int dst_code, int src_code) {
// s-registers.
int scratchSCode = kScratchDoubleReg.low().code();
int scratchSCode2 = kScratchDoubleReg2.low().code();
- if (src_code < SwVfpRegister::kMaxNumRegisters) {
+ if (src_code < SwVfpRegister::kNumRegisters) {
// src is an s-register, dst is not.
vmov(kScratchDoubleReg, dst_d_reg);
vmov(SwVfpRegister::from_code(scratchSCode + dst_offset),
SwVfpRegister::from_code(src_code));
vmov(dst_d_reg, kScratchDoubleReg);
- } else if (dst_code < SwVfpRegister::kMaxNumRegisters) {
+ } else if (dst_code < SwVfpRegister::kNumRegisters) {
// dst is an s-register, src is not.
vmov(kScratchDoubleReg, src_d_reg);
vmov(SwVfpRegister::from_code(dst_code),
@@ -938,7 +952,7 @@ void TurboAssembler::VmovExtended(int dst_code, int src_code) {
}
void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) {
- if (dst_code < SwVfpRegister::kMaxNumRegisters) {
+ if (dst_code < SwVfpRegister::kNumRegisters) {
vldr(SwVfpRegister::from_code(dst_code), src);
} else {
// TODO(bbudge) If Neon supported, use load single lane form of vld1.
@@ -950,7 +964,7 @@ void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) {
}
void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) {
- if (src_code < SwVfpRegister::kMaxNumRegisters) {
+ if (src_code < SwVfpRegister::kNumRegisters) {
vstr(SwVfpRegister::from_code(src_code), dst);
} else {
// TODO(bbudge) If Neon supported, use store single lane form of vst1.
@@ -1181,19 +1195,6 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type) {
return frame_ends;
}
-void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
- Register argc) {
- Push(lr, fp, context, target);
- add(fp, sp, Operand(2 * kPointerSize));
- Push(argc);
-}
-
-void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
- Register argc) {
- Pop(argc);
- Pop(lr, fp, context, target);
-}
-
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StackFrame::Type frame_type) {
DCHECK(frame_type == StackFrame::EXIT ||
@@ -1229,7 +1230,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
SaveFPRegs(sp, scratch);
// Note that d0 will be accessible at
// fp - ExitFrameConstants::kFrameSize -
- // DwVfpRegister::kMaxNumRegisters * kDoubleSize,
+ // DwVfpRegister::kNumRegisters * kDoubleSize,
// since the sp slot and code slot were pushed after the fp.
}
@@ -1276,8 +1277,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
const int offset = ExitFrameConstants::kFixedFrameSizeFromFp;
- sub(r3, fp,
- Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
+ sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
RestoreFPRegs(r3, scratch);
}
@@ -1401,8 +1401,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract if values are
// passed in registers.
- DCHECK(actual.is_immediate() || actual.reg().is(r0));
- DCHECK(expected.is_immediate() || expected.reg().is(r2));
+ DCHECK(actual.is_immediate() || actual.reg() == r0);
+ DCHECK(expected.is_immediate() || expected.reg() == r2);
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -1496,8 +1496,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
InvokeFlag flag) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- DCHECK(function.is(r1));
- DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r3));
+ DCHECK(function == r1);
+ DCHECK_IMPLIES(new_target.is_valid(), new_target == r3);
// On function call, call into the debugger if necessary.
CheckDebugHook(function, new_target, expected, actual);
@@ -1537,7 +1537,7 @@ void MacroAssembler::InvokeFunction(Register fun, Register new_target,
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in r1.
- DCHECK(fun.is(r1));
+ DCHECK(fun == r1);
Register expected_reg = r2;
Register temp_reg = r4;
@@ -1560,7 +1560,7 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in r1.
- DCHECK(function.is(r1));
+ DCHECK(function == r1);
// Get the function and setup the context.
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
@@ -1615,118 +1615,12 @@ void MacroAssembler::PopStackHandler() {
}
-void MacroAssembler::Allocate(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
- DCHECK(object_size <= kMaxRegularHeapObjectSize);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Operand(0x7091));
- mov(scratch1, Operand(0x7191));
- mov(scratch2, Operand(0x7291));
- }
- jmp(gc_required);
- return;
- }
-
- DCHECK(!AreAliased(result, scratch1, scratch2));
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- DCHECK_EQ(0, object_size & kObjectAlignmentMask);
-
- // Check relative positions of allocation top and limit addresses.
- // The values must be adjacent in memory to allow the use of LDM.
- // Also, assert that the registers are numbered such that the values
- // are loaded in the correct order.
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
- intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
- DCHECK((limit - top) == kPointerSize);
-
- UseScratchRegisterScope temps(this);
-
- // Set up allocation top address register.
- Register top_address = scratch1;
- Register alloc_limit = temps.Acquire();
- Register result_end = scratch2;
- mov(top_address, Operand(allocation_top));
-
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into alloc_limit.
- ldm(ia, top_address, result.bit() | alloc_limit.bit());
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry.
- ldr(alloc_limit, MemOperand(top_address));
- cmp(result, alloc_limit);
- Check(eq, kUnexpectedAllocationTop);
- }
- // Load allocation limit. Result already contains allocation top.
- ldr(alloc_limit, MemOperand(top_address, limit - top));
- }
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
- Label aligned;
- b(eq, &aligned);
- if ((flags & PRETENURE) != 0) {
- cmp(result, Operand(alloc_limit));
- b(hs, gc_required);
- }
- mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
- str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
- bind(&aligned);
- }
-
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top. We have already acquired the scratch register at
- // this point, so we cannot just use add().
- DCHECK(object_size > 0);
- Register source = result;
- int shift = 0;
- while (object_size != 0) {
- if (((object_size >> shift) & 0x03) == 0) {
- shift += 2;
- } else {
- int bits = object_size & (0xff << shift);
- object_size -= bits;
- shift += 8;
- Operand bits_operand(bits);
- DCHECK(bits_operand.InstructionsRequired(this) == 1);
- add(result_end, source, bits_operand);
- source = result_end;
- }
- }
-
- cmp(result_end, Operand(alloc_limit));
- b(hi, gc_required);
-
- str(result_end, MemOperand(top_address));
-
- // Tag object.
- add(result, result, Operand(kHeapObjectTag));
-}
-
void MacroAssembler::CompareObjectType(Register object,
Register map,
Register type_reg,
InstanceType type) {
UseScratchRegisterScope temps(this);
- const Register temp = type_reg.is(no_reg) ? temps.Acquire() : type_reg;
+ const Register temp = type_reg == no_reg ? temps.Acquire() : type_reg;
ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(map, temp, type);
@@ -1745,57 +1639,11 @@ void MacroAssembler::CompareRoot(Register obj,
Heap::RootListIndex index) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!obj.is(scratch));
+ DCHECK(obj != scratch);
LoadRoot(scratch, index);
cmp(obj, scratch);
}
-void MacroAssembler::CompareMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* early_success) {
- ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- CompareMap(scratch, map, early_success);
-}
-
-
-void MacroAssembler::CompareMap(Register obj_map,
- Handle<Map> map,
- Label* early_success) {
- cmp(obj_map, Operand(map));
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
-
- Label success;
- CompareMap(obj, scratch, map, &success);
- b(ne, fail);
- bind(&success);
-}
-
-void MacroAssembler::CheckMap(Register obj, Register scratch,
- Heap::RootListIndex index, Label* fail,
- SmiCheckType smi_check_type) {
- UseScratchRegisterScope temps(this);
- Register root_register = temps.Acquire();
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
- ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- LoadRoot(root_register, index);
- cmp(scratch, root_register);
- b(ne, fail);
-}
-
-
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
mov(value, Operand(cell));
ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
@@ -1884,7 +1732,7 @@ void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
void MacroAssembler::TryDoubleToInt32Exact(Register result,
DwVfpRegister double_input,
LowDwVfpRegister double_scratch) {
- DCHECK(!double_input.is(double_scratch));
+ DCHECK(double_input != double_scratch);
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
vcvt_f64_s32(double_scratch, double_scratch.low());
@@ -2066,21 +1914,6 @@ void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
}
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch) {
- // Load the initial map. The global functions all have initial maps.
- ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
- b(&ok);
- bind(&fail);
- Abort(kGlobalFunctionsMustHaveInitialMap);
- bind(&ok);
- }
-}
-
void TurboAssembler::InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
@@ -2219,20 +2052,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
-void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
- Register first, Register second, Register scratch1, Register scratch2,
- Label* failure) {
- // Test that both first and second are sequential one-byte strings.
- // Assume that they are non-smis.
- ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
- ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
- ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
-
- JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
- scratch2, failure);
-}
-
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
Label* not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
@@ -2245,28 +2064,6 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
bind(&succeed);
}
-void MacroAssembler::AllocateJSValue(Register result, Register constructor,
- Register value, Register scratch1,
- Register scratch2, Label* gc_required) {
- DCHECK(!result.is(constructor));
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!result.is(value));
-
- // Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Initialize the JSValue.
- LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
- str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
- LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOrHashOffset));
- str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
- str(value, FieldMemOperand(result, JSValue::kValueOffset));
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-}
-
void TurboAssembler::CheckFor32DRegs(Register scratch) {
mov(scratch, Operand(ExternalReference::cpu_features()));
ldr(scratch, MemOperand(scratch));
@@ -2294,7 +2091,7 @@ void TurboAssembler::FloatMaxHelper(T result, T left, T right,
Label* out_of_line) {
// This trivial case is caught sooner, so that the out-of-line code can be
// completely avoided.
- DCHECK(!left.is(right));
+ DCHECK(left != right);
if (CpuFeatures::IsSupported(ARMv8)) {
CpuFeatureScope scope(this, ARMv8);
@@ -2306,7 +2103,7 @@ void TurboAssembler::FloatMaxHelper(T result, T left, T right,
VFPCompareAndSetFlags(left, right);
b(vs, out_of_line);
// Avoid a conditional instruction if the result register is unique.
- bool aliased_result_reg = result.is(left) || result.is(right);
+ bool aliased_result_reg = result == left || result == right;
Move(result, right, aliased_result_reg ? mi : al);
Move(result, left, gt);
b(ne, &done);
@@ -2322,7 +2119,7 @@ void TurboAssembler::FloatMaxHelper(T result, T left, T right,
template <typename T>
void TurboAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
- DCHECK(!left.is(right));
+ DCHECK(left != right);
// ARMv8: At least one of left and right is a NaN.
// Anything else: At least one of left and right is a NaN, or both left and
@@ -2338,7 +2135,7 @@ void TurboAssembler::FloatMinHelper(T result, T left, T right,
Label* out_of_line) {
// This trivial case is caught sooner, so that the out-of-line code can be
// completely avoided.
- DCHECK(!left.is(right));
+ DCHECK(left != right);
if (CpuFeatures::IsSupported(ARMv8)) {
CpuFeatureScope scope(this, ARMv8);
@@ -2350,7 +2147,7 @@ void TurboAssembler::FloatMinHelper(T result, T left, T right,
VFPCompareAndSetFlags(left, right);
b(vs, out_of_line);
// Avoid a conditional instruction if the result register is unique.
- bool aliased_result_reg = result.is(left) || result.is(right);
+ bool aliased_result_reg = result == left || result == right;
Move(result, left, aliased_result_reg ? mi : al);
Move(result, right, gt);
b(ne, &done);
@@ -2364,13 +2161,13 @@ void TurboAssembler::FloatMinHelper(T result, T left, T right,
// We could use a single 'vorr' instruction here if we had NEON support.
// The algorithm used is -((-L) + (-R)), which is most efficiently expressed
// as -((-L) - R).
- if (left.is(result)) {
- DCHECK(!right.is(result));
+ if (left == result) {
+ DCHECK(right != result);
vneg(result, left);
vsub(result, result, right);
vneg(result, result);
} else {
- DCHECK(!left.is(result));
+ DCHECK(left != result);
vneg(result, right);
vsub(result, result, left);
vneg(result, result);
@@ -2381,7 +2178,7 @@ void TurboAssembler::FloatMinHelper(T result, T left, T right,
template <typename T>
void TurboAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
- DCHECK(!left.is(right));
+ DCHECK(left != right);
// At least one of left and right is a NaN. Use vadd to propagate the NaN
// appropriately. +/-0 is handled inline.
@@ -2428,21 +2225,6 @@ void TurboAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
FloatMinOutOfLineHelper(result, left, right);
}
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
- Register first, Register second, Register scratch1, Register scratch2,
- Label* failure) {
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- and_(scratch1, first, Operand(kFlatOneByteStringMask));
- and_(scratch2, second, Operand(kFlatOneByteStringMask));
- cmp(scratch1, Operand(kFlatOneByteStringTag));
- // Ignore second test if first test failed.
- cmp(scratch2, Operand(kFlatOneByteStringTag), eq);
- b(ne, failure);
-}
-
static const int kRegisterPassedArguments = 4;
int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -2488,7 +2270,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
}
void TurboAssembler::MovToFloatParameter(DwVfpRegister src) {
- DCHECK(src.is(d0));
+ DCHECK(src == d0);
if (!use_eabi_hardfloat()) {
vmov(r0, r1, src);
}
@@ -2502,8 +2284,8 @@ void TurboAssembler::MovToFloatResult(DwVfpRegister src) {
void TurboAssembler::MovToFloatParameters(DwVfpRegister src1,
DwVfpRegister src2) {
- DCHECK(src1.is(d0));
- DCHECK(src2.is(d1));
+ DCHECK(src1 == d0);
+ DCHECK(src2 == d1);
if (!use_eabi_hardfloat()) {
vmov(r0, r1, src1);
vmov(r2, r3, src2);
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index b929a46232..d8dded8cc1 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -14,20 +14,20 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {Register::kCode_r0};
-const Register kReturnRegister1 = {Register::kCode_r1};
-const Register kReturnRegister2 = {Register::kCode_r2};
-const Register kJSFunctionRegister = {Register::kCode_r1};
-const Register kContextRegister = {Register::kCode_r7};
-const Register kAllocateSizeRegister = {Register::kCode_r1};
-const Register kInterpreterAccumulatorRegister = {Register::kCode_r0};
-const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
-const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};
-const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
-const Register kJavaScriptCallArgCountRegister = {Register::kCode_r0};
-const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r3};
-const Register kRuntimeCallFunctionRegister = {Register::kCode_r1};
-const Register kRuntimeCallArgCountRegister = {Register::kCode_r0};
+constexpr Register kReturnRegister0 = r0;
+constexpr Register kReturnRegister1 = r1;
+constexpr Register kReturnRegister2 = r2;
+constexpr Register kJSFunctionRegister = r1;
+constexpr Register kContextRegister = r7;
+constexpr Register kAllocateSizeRegister = r1;
+constexpr Register kInterpreterAccumulatorRegister = r0;
+constexpr Register kInterpreterBytecodeOffsetRegister = r5;
+constexpr Register kInterpreterBytecodeArrayRegister = r6;
+constexpr Register kInterpreterDispatchTableRegister = r8;
+constexpr Register kJavaScriptCallArgCountRegister = r0;
+constexpr Register kJavaScriptCallNewTargetRegister = r3;
+constexpr Register kRuntimeCallFunctionRegister = r1;
+constexpr Register kRuntimeCallArgCountRegister = r0;
// ----------------------------------------------------------------------------
// Static helper functions
@@ -39,8 +39,8 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
// Give alias names to registers
-const Register cp = {Register::kCode_r7}; // JavaScript context pointer.
-const Register kRootRegister = {Register::kCode_r10}; // Roots array pointer.
+constexpr Register cp = r7; // JavaScript context pointer.
+constexpr Register kRootRegister = r10; // Roots array pointer.
// Flags used for AllocateHeapNumber
enum TaggingMode {
@@ -53,10 +53,6 @@ enum TaggingMode {
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum PointersToHereCheck {
- kPointersToHereMaybeInteresting,
- kPointersToHereAreAlwaysInteresting
-};
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
@@ -88,13 +84,7 @@ enum TargetAddressStorageMode {
class TurboAssembler : public Assembler {
public:
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ =
- Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
- }
- }
+ CodeObjectRequired create_code_object);
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() const { return has_frame_; }
@@ -209,7 +199,7 @@ class TurboAssembler : public Assembler {
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Condition cond = al) {
- DCHECK(!src1.is(src2));
+ DCHECK(src1 != src2);
if (src1.code() > src2.code()) {
ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
} else {
@@ -399,6 +389,13 @@ class TurboAssembler : public Assembler {
// Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
void CheckFor32DRegs(Register scratch);
+ void SaveRegisters(RegList registers);
+ void RestoreRegisters(RegList registers);
+
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode);
+
// Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
// values to location, saving [d0..(d15|d31)].
void SaveFPRegs(Register location, Register scratch);
@@ -407,12 +404,23 @@ class TurboAssembler : public Assembler {
// values to location, restoring [d0..(d15|d31)].
void RestoreFPRegs(Register location, Register scratch);
- void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
- void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ // Calculate how much stack space (in bytes) are required to store caller
+ // registers excluding those specified in the arguments.
+ int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg) const;
+
+ // Push caller saved registers on the stack, and return the number of bytes
+ // stack pointer is adjusted.
+ int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
+ // Restore caller saved registers from the stack, and return the number of
+ // bytes stack pointer is adjusted.
+ int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
@@ -422,7 +430,7 @@ class TurboAssembler : public Assembler {
// NaNs or +/-0.0, are expected to be rare and are handled in out-of-line
// code. The specific behaviour depends on supported instructions.
//
- // These functions assume (and assert) that !left.is(right). It is permitted
+ // These functions assume (and assert) that left!=right. It is permitted
// for the result to alias either input register.
void FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right,
Label* out_of_line);
@@ -457,7 +465,7 @@ class TurboAssembler : public Assembler {
void Move(Register dst, Register src, Condition cond = al);
void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
Condition cond = al) {
- if (!src.IsRegister() || !src.rm().is(dst) || sbit != LeaveCC) {
+ if (!src.IsRegister() || src.rm() != dst || sbit != LeaveCC) {
mov(dst, src, sbit, cond);
}
}
@@ -607,15 +615,12 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// GC Support
- enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
-
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
Register addr, Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
+ SaveFPRegsMode save_fp);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
@@ -650,26 +655,7 @@ class MacroAssembler : public TurboAssembler {
Register object, int offset, Register value, Register scratch,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
-
- // As above, but the offset has the tag presubtracted. For use with
- // MemOperand(reg, off).
- inline void RecordWriteContextSlot(
- Register context, int offset, Register value, Register scratch,
- LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting) {
- RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
- lr_status, save_fp, remembered_set_action, smi_check,
- pointers_to_here_check_for_value);
- }
-
- void RecordWriteForMap(Register object, Register map, Register dst,
- LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
@@ -678,9 +664,7 @@ class MacroAssembler : public TurboAssembler {
Register object, Register address, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
@@ -699,11 +683,6 @@ class MacroAssembler : public TurboAssembler {
bool restore_context,
bool argument_count_is_length = false);
- // Load the global object from the current context.
- void LoadGlobalObject(Register dst) {
- LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
- }
-
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
@@ -711,12 +690,6 @@ class MacroAssembler : public TurboAssembler {
void LoadNativeContextSlot(int index, Register dst);
- // Load the initial map from the global function. The registers
- // function and map can be the same, function is then overwritten.
- void LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch);
-
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -755,29 +728,6 @@ class MacroAssembler : public TurboAssembler {
void PopStackHandler();
// ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space or old space. The object_size is
- // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. If the space is exhausted control continues at the gc_required
- // label. The allocated object is returned in result. If the flag
- // tag_allocated_object is true the result is tagged as as a heap object.
- // All registers are clobbered also when control continues at the gc_required
- // label.
- void Allocate(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
-
- // Allocate and initialize a JSValue wrapper with the specified {constructor}
- // and {value}.
- void AllocateJSValue(Register result, Register constructor, Register value,
- Register scratch1, Register scratch2,
- Label* gc_required);
-
- // ---------------------------------------------------------------------------
// Support functions.
// Machine code version of Map::GetConstructor().
@@ -805,38 +755,6 @@ class MacroAssembler : public TurboAssembler {
Register type_reg,
InstanceType type);
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
- // set with result of map compare. If multiple map compares are required, the
- // compare sequences branches to early_success.
- void CompareMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* early_success);
-
- // As above, but the map of the object is already loaded into the register
- // which is preserved by the code generated.
- void CompareMap(Register obj_map,
- Handle<Map> map,
- Label* early_success);
-
- // Check if the map of an object is equal to a specified map and branch to
- // label if not. Skip the smi check if not required (object is known to be a
- // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type);
-
-
- void CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- SmiCheckType smi_check_type);
-
void GetWeakValue(Register value, Handle<WeakCell> cell);
// Load the value of the weak cell in the value register. Branch to the given
@@ -961,20 +879,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// String utilities
- // Checks if both objects are sequential one-byte strings and jumps to label
- // if either is not. Assumes that neither object is a smi.
- void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- // Checks if both instance types are sequential one-byte strings and jumps to
- // label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialOneByte(
- Register first_object_instance_type, Register second_object_instance_type,
- Register scratch1, Register scratch2, Label* failure);
-
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
void LoadInstanceDescriptors(Register map, Register descriptors);
@@ -991,9 +895,6 @@ class MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg);
}
- void EnterBuiltinFrame(Register context, Register target, Register argc);
- void LeaveBuiltinFrame(Register context, Register target, Register argc);
-
private:
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 81867e6c79..f83d6f2a2a 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1713,10 +1713,6 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0, int32_t arg1,
int32_t arg6, int32_t arg7,
int32_t arg8);
-typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int32_t arg0, int32_t arg1,
- int32_t arg2, int32_t arg3,
- int32_t arg4);
-
// These prototypes handle the four types of FP calls.
typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
@@ -1907,34 +1903,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
external);
target(arg0, arg1, Redirection::ReverseRedirection(arg2));
- } else if (redirection->type() ==
- ExternalReference::BUILTIN_CALL_TRIPLE) {
- // builtin call returning ObjectTriple.
- SimulatorRuntimeTripleCall target =
- reinterpret_cast<SimulatorRuntimeTripleCall>(external);
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- PrintF(
- "Call to host triple returning runtime function %p "
- "args %08x, %08x, %08x, %08x, %08x",
- static_cast<void*>(FUNCTION_ADDR(target)), arg1, arg2, arg3, arg4,
- arg5);
- if (!stack_aligned) {
- PrintF(" with unaligned stack %08x\n", get_register(sp));
- }
- PrintF("\n");
- }
- CHECK(stack_aligned);
- // arg0 is a hidden argument pointing to the return location, so don't
- // pass it to the target function.
- ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned { %p, %p, %p }\n", static_cast<void*>(result.x),
- static_cast<void*>(result.y), static_cast<void*>(result.z));
- }
- // Return is passed back in address pointed to by hidden first argument.
- ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
- *sim_result = result;
- set_register(r0, arg0);
} else {
// builtin call.
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
@@ -2208,6 +2176,8 @@ void Simulator::DecodeType01(Instruction* instr) {
int rd = instr->RdValue();
int rt = instr->RmValue();
int rn = instr->RnValue();
+ DCHECK_NE(rd, rn);
+ DCHECK_NE(rd, rt);
int32_t addr = get_register(rn);
switch (instr->Bits(22, 21)) {
case 0: {
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 06036205ff..ea2e5b1571 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -27,132 +27,21 @@ void RelocInfo::apply(intptr_t delta) {
*p += delta; // Relocate entry.
}
-inline int CPURegister::code() const {
- DCHECK(IsValid());
- return reg_code;
-}
-
-
-inline CPURegister::RegisterType CPURegister::type() const {
- DCHECK(IsValidOrNone());
- return reg_type;
-}
-
-inline RegList CPURegister::bit() const {
- DCHECK(static_cast<size_t>(reg_code) < (sizeof(RegList) * kBitsPerByte));
- return IsValid() ? 1UL << reg_code : 0;
-}
-
-
-inline int CPURegister::SizeInBits() const {
- DCHECK(IsValid());
- return reg_size;
-}
-
-
-inline int CPURegister::SizeInBytes() const {
- DCHECK(IsValid());
- DCHECK(SizeInBits() % 8 == 0);
- return reg_size / 8;
-}
-
-inline bool CPURegister::Is8Bits() const {
- DCHECK(IsValid());
- return reg_size == 8;
-}
-
-inline bool CPURegister::Is16Bits() const {
- DCHECK(IsValid());
- return reg_size == 16;
-}
-
-inline bool CPURegister::Is32Bits() const {
- DCHECK(IsValid());
- return reg_size == 32;
-}
-
-
-inline bool CPURegister::Is64Bits() const {
- DCHECK(IsValid());
- return reg_size == 64;
-}
-
-inline bool CPURegister::Is128Bits() const {
- DCHECK(IsValid());
- return reg_size == 128;
-}
-
-inline bool CPURegister::IsValid() const {
- if (IsValidRegister() || IsValidVRegister()) {
- DCHECK(!IsNone());
- return true;
- } else {
- DCHECK(IsNone());
- return false;
- }
-}
-
-
-inline bool CPURegister::IsValidRegister() const {
- return IsRegister() &&
- ((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)) &&
- ((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
-}
-
-inline bool CPURegister::IsValidVRegister() const {
- return IsVRegister() &&
- ((reg_size == kBRegSizeInBits) || (reg_size == kHRegSizeInBits) ||
- (reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits) ||
- (reg_size == kQRegSizeInBits)) &&
- (reg_code < kNumberOfVRegisters);
-}
-
-inline bool CPURegister::IsNone() const {
- // kNoRegister types should always have size 0 and code 0.
- DCHECK((reg_type != kNoRegister) || (reg_code == 0));
- DCHECK((reg_type != kNoRegister) || (reg_size == 0));
-
- return reg_type == kNoRegister;
-}
-
-
-inline bool CPURegister::Is(const CPURegister& other) const {
- DCHECK(IsValidOrNone() && other.IsValidOrNone());
- return Aliases(other) && (reg_size == other.reg_size);
-}
-
-
-inline bool CPURegister::Aliases(const CPURegister& other) const {
- DCHECK(IsValidOrNone() && other.IsValidOrNone());
- return (reg_code == other.reg_code) && (reg_type == other.reg_type);
-}
-
-
-inline bool CPURegister::IsRegister() const {
- return reg_type == kRegister;
-}
-
-inline bool CPURegister::IsVRegister() const { return reg_type == kVRegister; }
inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
- return (reg_size == other.reg_size) && (reg_type == other.reg_type);
-}
-
-
-inline bool CPURegister::IsValidOrNone() const {
- return IsValid() || IsNone();
+ return (reg_size_ == other.reg_size_) && (reg_type_ == other.reg_type_);
}
inline bool CPURegister::IsZero() const {
DCHECK(IsValid());
- return IsRegister() && (reg_code == kZeroRegCode);
+ return IsRegister() && (reg_code_ == kZeroRegCode);
}
inline bool CPURegister::IsSP() const {
DCHECK(IsValid());
- return IsRegister() && (reg_code == kSPRegInternalCode);
+ return IsRegister() && (reg_code_ == kSPRegInternalCode);
}
@@ -254,44 +143,53 @@ inline VRegister VRegister::VRegFromCode(unsigned code) {
}
inline Register CPURegister::W() const {
- DCHECK(IsValidRegister());
- return Register::WRegFromCode(reg_code);
+ DCHECK(IsRegister());
+ return Register::WRegFromCode(reg_code_);
}
+inline Register CPURegister::Reg() const {
+ DCHECK(IsRegister());
+ return Register::Create(reg_code_, reg_size_);
+}
+
+inline VRegister CPURegister::VReg() const {
+ DCHECK(IsVRegister());
+ return VRegister::Create(reg_code_, reg_size_);
+}
inline Register CPURegister::X() const {
- DCHECK(IsValidRegister());
- return Register::XRegFromCode(reg_code);
+ DCHECK(IsRegister());
+ return Register::XRegFromCode(reg_code_);
}
inline VRegister CPURegister::V() const {
- DCHECK(IsValidVRegister());
- return VRegister::VRegFromCode(reg_code);
+ DCHECK(IsVRegister());
+ return VRegister::VRegFromCode(reg_code_);
}
inline VRegister CPURegister::B() const {
- DCHECK(IsValidVRegister());
- return VRegister::BRegFromCode(reg_code);
+ DCHECK(IsVRegister());
+ return VRegister::BRegFromCode(reg_code_);
}
inline VRegister CPURegister::H() const {
- DCHECK(IsValidVRegister());
- return VRegister::HRegFromCode(reg_code);
+ DCHECK(IsVRegister());
+ return VRegister::HRegFromCode(reg_code_);
}
inline VRegister CPURegister::S() const {
- DCHECK(IsValidVRegister());
- return VRegister::SRegFromCode(reg_code);
+ DCHECK(IsVRegister());
+ return VRegister::SRegFromCode(reg_code_);
}
inline VRegister CPURegister::D() const {
- DCHECK(IsValidVRegister());
- return VRegister::DRegFromCode(reg_code);
+ DCHECK(IsVRegister());
+ return VRegister::DRegFromCode(reg_code_);
}
inline VRegister CPURegister::Q() const {
- DCHECK(IsValidVRegister());
- return VRegister::QRegFromCode(reg_code);
+ DCHECK(IsVRegister());
+ return VRegister::QRegFromCode(reg_code_);
}
@@ -541,15 +439,12 @@ MemOperand::MemOperand(Register base,
DCHECK(shift == LSL);
}
-
MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
- : base_(base), addrmode_(addrmode) {
+ : base_(base), regoffset_(NoReg), addrmode_(addrmode) {
DCHECK(base.Is64Bits() && !base.IsZero());
if (offset.IsImmediate()) {
offset_ = offset.ImmediateValue();
-
- regoffset_ = NoReg;
} else if (offset.IsShiftedRegister()) {
DCHECK((addrmode == Offset) || (addrmode == PostIndex));
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index 5fa5872da6..e851fa5d78 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -3498,7 +3498,7 @@ void Assembler::cmlt(const VRegister& vd, const VRegister& vn, int value) {
NEON_3SAME_LIST(DEFINE_ASM_FUNC)
#undef DEFINE_ASM_FUNC
-#define NEON_FP3SAME_LIST(V) \
+#define NEON_FP3SAME_LIST_V2(V) \
V(fadd, NEON_FADD, FADD) \
V(fsub, NEON_FSUB, FSUB) \
V(fmul, NEON_FMUL, FMUL) \
@@ -3538,7 +3538,7 @@ NEON_3SAME_LIST(DEFINE_ASM_FUNC)
} \
NEONFP3Same(vd, vn, vm, op); \
}
-NEON_FP3SAME_LIST(DEFINE_ASM_FUNC)
+NEON_FP3SAME_LIST_V2(DEFINE_ASM_FUNC)
#undef DEFINE_ASM_FUNC
void Assembler::addp(const VRegister& vd, const VRegister& vn) {
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index caaf1c6953..47ce6667c8 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -84,74 +84,96 @@ const int kNumSafepointRegisters = 32;
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
#define kSafepointSavedRegisters CPURegList::GetSafepointSavedRegisters().list()
#define kNumSafepointSavedRegisters \
- CPURegList::GetSafepointSavedRegisters().Count();
+ CPURegList::GetSafepointSavedRegisters().Count()
// Some CPURegister methods can return Register and VRegister types, so we
// need to declare them in advance.
-struct Register;
-struct VRegister;
+class Register;
+class VRegister;
-struct CPURegister {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = -1
- };
+ kRegAfterLast
+};
+class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
+ public:
enum RegisterType {
- // The kInvalid value is used to detect uninitialized static instances,
- // which are always zero-initialized before any constructors are called.
- kInvalid = 0,
kRegister,
kVRegister,
kNoRegister
};
- constexpr CPURegister() : CPURegister(0, 0, CPURegister::kNoRegister) {}
-
- constexpr CPURegister(int reg_code, int reg_size, RegisterType reg_type,
- int lane_count = 1)
- : reg_code(reg_code),
- reg_size(reg_size),
- reg_type(reg_type),
- lane_count(lane_count) {}
-
- static CPURegister Create(int reg_code, int reg_size, RegisterType reg_type,
- int lane_count = 1) {
- CPURegister r = {reg_code, reg_size, reg_type, lane_count};
- return r;
- }
-
- int code() const;
- RegisterType type() const;
- RegList bit() const;
- int SizeInBits() const;
- int SizeInBytes() const;
- bool Is8Bits() const;
- bool Is16Bits() const;
- bool Is32Bits() const;
- bool Is64Bits() const;
- bool Is128Bits() const;
- bool IsValid() const;
- bool IsValidOrNone() const;
- bool IsValidRegister() const;
- bool IsValidVRegister() const;
- bool IsNone() const;
- bool Is(const CPURegister& other) const;
- bool Aliases(const CPURegister& other) const;
+ static constexpr CPURegister no_reg() {
+ return CPURegister{0, 0, kNoRegister};
+ }
+
+ template <int code, int size, RegisterType type>
+ static constexpr CPURegister Create() {
+ static_assert(IsValid(code, size, type), "Cannot create invalid registers");
+ return CPURegister{code, size, type};
+ }
+
+ static CPURegister Create(int code, int size, RegisterType type) {
+ DCHECK(IsValid(code, size, type));
+ return CPURegister{code, size, type};
+ }
+
+ RegisterType type() const { return reg_type_; }
+ RegList bit() const {
+ DCHECK(static_cast<size_t>(reg_code_) < (sizeof(RegList) * kBitsPerByte));
+ return IsValid() ? 1UL << reg_code_ : 0;
+ }
+ int SizeInBits() const {
+ DCHECK(IsValid());
+ return reg_size_;
+ }
+ int SizeInBytes() const {
+ DCHECK(IsValid());
+ DCHECK(SizeInBits() % 8 == 0);
+ return reg_size_ / 8;
+ }
+ bool Is8Bits() const {
+ DCHECK(IsValid());
+ return reg_size_ == 8;
+ }
+ bool Is16Bits() const {
+ DCHECK(IsValid());
+ return reg_size_ == 16;
+ }
+ bool Is32Bits() const {
+ DCHECK(IsValid());
+ return reg_size_ == 32;
+ }
+ bool Is64Bits() const {
+ DCHECK(IsValid());
+ return reg_size_ == 64;
+ }
+ bool Is128Bits() const {
+ DCHECK(IsValid());
+ return reg_size_ == 128;
+ }
+ bool IsValid() const { return reg_type_ != kNoRegister; }
+ bool IsNone() const { return reg_type_ == kNoRegister; }
+ bool Is(const CPURegister& other) const {
+ return Aliases(other) && (reg_size_ == other.reg_size_);
+ }
+ bool Aliases(const CPURegister& other) const {
+ return (reg_code_ == other.reg_code_) && (reg_type_ == other.reg_type_);
+ }
bool IsZero() const;
bool IsSP() const;
- bool IsRegister() const;
- bool IsVRegister() const;
+ bool IsRegister() const { return reg_type_ == kRegister; }
+ bool IsVRegister() const { return reg_type_ == kVRegister; }
bool IsFPRegister() const { return IsS() || IsD(); }
- bool IsW() const { return IsValidRegister() && Is32Bits(); }
- bool IsX() const { return IsValidRegister() && Is64Bits(); }
+ bool IsW() const { return IsRegister() && Is32Bits(); }
+ bool IsX() const { return IsRegister() && Is64Bits(); }
// These assertions ensure that the size and type of the register are as
// described. They do not consider the number of lanes that make up a vector.
@@ -166,6 +188,9 @@ struct CPURegister {
bool IsD() const { return IsV() && Is64Bits(); }
bool IsQ() const { return IsV() && Is128Bits(); }
+ Register Reg() const;
+ VRegister VReg() const;
+
Register X() const;
Register W() const;
VRegister V() const;
@@ -181,25 +206,51 @@ struct CPURegister {
bool is(const CPURegister& other) const { return Is(other); }
bool is_valid() const { return IsValid(); }
- int reg_code;
- int reg_size;
- RegisterType reg_type;
- int lane_count;
-};
+ protected:
+ int reg_size_;
+ RegisterType reg_type_;
+ friend class RegisterBase;
-struct Register : public CPURegister {
- static Register Create(int code, int size) {
- return Register(CPURegister::Create(code, size, CPURegister::kRegister));
+ constexpr CPURegister(int code, int size, RegisterType type)
+ : RegisterBase(code), reg_size_(size), reg_type_(type) {}
+
+ static constexpr bool IsValidRegister(int code, int size) {
+ return (size == kWRegSizeInBits || size == kXRegSizeInBits) &&
+ (code < kNumberOfRegisters || code == kSPRegInternalCode);
}
- constexpr Register() : CPURegister() {}
+ static constexpr bool IsValidVRegister(int code, int size) {
+ return (size == kBRegSizeInBits || size == kHRegSizeInBits ||
+ size == kSRegSizeInBits || size == kDRegSizeInBits ||
+ size == kQRegSizeInBits) &&
+ code < kNumberOfVRegisters;
+ }
- constexpr explicit Register(const CPURegister& r) : CPURegister(r) {}
+ static constexpr bool IsValid(int code, int size, RegisterType type) {
+ return (type == kRegister && IsValidRegister(code, size)) ||
+ (type == kVRegister && IsValidVRegister(code, size));
+ }
- bool IsValid() const {
- DCHECK(IsRegister() || IsNone());
- return IsValidRegister();
+ static constexpr bool IsNone(int code, int size, RegisterType type) {
+ return type == kNoRegister && code == 0 && size == 0;
+ }
+};
+
+static_assert(IS_TRIVIALLY_COPYABLE(CPURegister),
+ "CPURegister can efficiently be passed by value");
+
+class Register : public CPURegister {
+ public:
+ static constexpr Register no_reg() { return Register(CPURegister::no_reg()); }
+
+ template <int code, int size>
+ static constexpr Register Create() {
+ return Register(CPURegister::Create<code, size, CPURegister::kRegister>());
+ }
+
+ static Register Create(int code, int size) {
+ return Register(CPURegister::Create(code, size, CPURegister::kRegister));
}
static Register XRegFromCode(unsigned code);
@@ -209,10 +260,6 @@ struct Register : public CPURegister {
// These memebers are necessary for compilation.
// A few of them may be unused for now.
- static constexpr int kNumRegisters = kNumberOfRegisters;
- STATIC_ASSERT(kNumRegisters == Code::kAfterLast);
- static int NumRegisters() { return kNumRegisters; }
-
// We allow crankshaft to use the following registers:
// - x0 to x15
// - x18 to x24
@@ -234,26 +281,40 @@ struct Register : public CPURegister {
}
// End of V8 compatibility section -----------------------
+ //
+ private:
+ constexpr explicit Register(const CPURegister& r) : CPURegister(r) {}
};
+static_assert(IS_TRIVIALLY_COPYABLE(Register),
+ "Register can efficiently be passed by value");
+
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
-struct VRegister : public CPURegister {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = -1
- };
+ kDoubleAfterLast
+};
+
+class VRegister : public CPURegister {
+ public:
+ static constexpr VRegister no_reg() {
+ return VRegister(CPURegister::no_reg(), 0);
+ }
+
+ template <int code, int size, int lane_count = 1>
+ static constexpr VRegister Create() {
+ static_assert(IsValidLaneCount(lane_count), "Invalid lane count");
+ return VRegister(CPURegister::Create<code, size, kVRegister>(), lane_count);
+ }
- static VRegister Create(int reg_code, int reg_size, int lane_count = 1) {
- DCHECK(base::bits::IsPowerOfTwo(lane_count) && (lane_count <= 16));
- VRegister v(CPURegister::Create(reg_code, reg_size, CPURegister::kVRegister,
- lane_count));
- DCHECK(v.IsValidVRegister());
- return v;
+ static VRegister Create(int code, int size, int lane_count = 1) {
+ DCHECK(IsValidLaneCount(lane_count));
+ return VRegister(CPURegister::Create(code, size, CPURegister::kVRegister),
+ lane_count);
}
static VRegister Create(int reg_code, VectorFormat format) {
@@ -262,15 +323,6 @@ struct VRegister : public CPURegister {
return VRegister::Create(reg_code, reg_size, reg_count);
}
- constexpr VRegister() : CPURegister() {}
-
- constexpr explicit VRegister(const CPURegister& r) : CPURegister(r) {}
-
- bool IsValid() const {
- DCHECK(IsVRegister() || IsNone());
- return IsValidVRegister();
- }
-
static VRegister BRegFromCode(unsigned code);
static VRegister HRegFromCode(unsigned code);
static VRegister SRegFromCode(unsigned code);
@@ -303,14 +355,14 @@ struct VRegister : public CPURegister {
return VRegister::Create(code(), kDRegSizeInBits, 1);
}
- bool Is8B() const { return (Is64Bits() && (lane_count == 8)); }
- bool Is16B() const { return (Is128Bits() && (lane_count == 16)); }
- bool Is4H() const { return (Is64Bits() && (lane_count == 4)); }
- bool Is8H() const { return (Is128Bits() && (lane_count == 8)); }
- bool Is2S() const { return (Is64Bits() && (lane_count == 2)); }
- bool Is4S() const { return (Is128Bits() && (lane_count == 4)); }
- bool Is1D() const { return (Is64Bits() && (lane_count == 1)); }
- bool Is2D() const { return (Is128Bits() && (lane_count == 2)); }
+ bool Is8B() const { return (Is64Bits() && (lane_count_ == 8)); }
+ bool Is16B() const { return (Is128Bits() && (lane_count_ == 16)); }
+ bool Is4H() const { return (Is64Bits() && (lane_count_ == 4)); }
+ bool Is8H() const { return (Is128Bits() && (lane_count_ == 8)); }
+ bool Is2S() const { return (Is64Bits() && (lane_count_ == 2)); }
+ bool Is4S() const { return (Is128Bits() && (lane_count_ == 4)); }
+ bool Is1D() const { return (Is64Bits() && (lane_count_ == 1)); }
+ bool Is2D() const { return (Is128Bits() && (lane_count_ == 2)); }
// For consistency, we assert the number of lanes of these scalar registers,
// even though there are no vectors of equivalent total size with which they
@@ -333,22 +385,22 @@ struct VRegister : public CPURegister {
bool IsLaneSizeS() const { return LaneSizeInBits() == kSRegSizeInBits; }
bool IsLaneSizeD() const { return LaneSizeInBits() == kDRegSizeInBits; }
- bool IsScalar() const { return lane_count == 1; }
- bool IsVector() const { return lane_count > 1; }
+ bool IsScalar() const { return lane_count_ == 1; }
+ bool IsVector() const { return lane_count_ > 1; }
bool IsSameFormat(const VRegister& other) const {
- return (reg_size == other.reg_size) && (lane_count == other.lane_count);
+ return (reg_size_ == other.reg_size_) && (lane_count_ == other.lane_count_);
}
- int LaneCount() const { return lane_count; }
+ int LaneCount() const { return lane_count_; }
- unsigned LaneSizeInBytes() const { return SizeInBytes() / lane_count; }
+ unsigned LaneSizeInBytes() const { return SizeInBytes() / lane_count_; }
unsigned LaneSizeInBits() const { return LaneSizeInBytes() * 8; }
// Start of V8 compatibility section ---------------------
static constexpr int kMaxNumRegisters = kNumberOfVRegisters;
- STATIC_ASSERT(kMaxNumRegisters == Code::kAfterLast);
+ STATIC_ASSERT(kMaxNumRegisters == kDoubleAfterLast);
// Crankshaft can use all the V registers except:
// - d15 which is used to keep the 0 double value
@@ -359,51 +411,52 @@ struct VRegister : public CPURegister {
return VRegister::Create(code, kDRegSizeInBits);
}
// End of V8 compatibility section -----------------------
-};
-static_assert(sizeof(CPURegister) == sizeof(Register),
- "CPURegister must be same size as Register");
-static_assert(sizeof(CPURegister) == sizeof(VRegister),
- "CPURegister must be same size as VRegister");
+ private:
+ int lane_count_;
-#define DEFINE_REGISTER(register_class, name, code, size, type) \
- constexpr register_class name { CPURegister(code, size, type) }
-#define ALIAS_REGISTER(register_class, alias, name) \
- constexpr register_class alias = name
+ constexpr explicit VRegister(const CPURegister& r, int lane_count)
+ : CPURegister(r), lane_count_(lane_count) {}
+
+ static constexpr bool IsValidLaneCount(int lane_count) {
+ return base::bits::IsPowerOfTwo(lane_count) && lane_count <= 16;
+ }
+};
+
+static_assert(IS_TRIVIALLY_COPYABLE(VRegister),
+ "VRegister can efficiently be passed by value");
// No*Reg is used to indicate an unused argument, or an error case. Note that
// these all compare equal (using the Is() method). The Register and VRegister
// variants are provided for convenience.
-DEFINE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
-DEFINE_REGISTER(VRegister, NoVReg, 0, 0, CPURegister::kNoRegister);
-DEFINE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
+constexpr Register NoReg = Register::no_reg();
+constexpr VRegister NoVReg = VRegister::no_reg();
+constexpr CPURegister NoCPUReg = CPURegister::no_reg();
// v8 compatibility.
-DEFINE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
+constexpr Register no_reg = NoReg;
-#define DEFINE_REGISTERS(N) \
- DEFINE_REGISTER(Register, w##N, N, kWRegSizeInBits, CPURegister::kRegister); \
- DEFINE_REGISTER(Register, x##N, N, kXRegSizeInBits, CPURegister::kRegister);
+#define DEFINE_REGISTER(register_class, name, ...) \
+ constexpr register_class name = register_class::Create<__VA_ARGS__>()
+#define ALIAS_REGISTER(register_class, alias, name) \
+ constexpr register_class alias = name
+
+#define DEFINE_REGISTERS(N) \
+ DEFINE_REGISTER(Register, w##N, N, kWRegSizeInBits); \
+ DEFINE_REGISTER(Register, x##N, N, kXRegSizeInBits);
GENERAL_REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS
-DEFINE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
- CPURegister::kRegister);
-DEFINE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
- CPURegister::kRegister);
-
-#define DEFINE_VREGISTERS(N) \
- DEFINE_REGISTER(VRegister, b##N, N, kBRegSizeInBits, \
- CPURegister::kVRegister); \
- DEFINE_REGISTER(VRegister, h##N, N, kHRegSizeInBits, \
- CPURegister::kVRegister); \
- DEFINE_REGISTER(VRegister, s##N, N, kSRegSizeInBits, \
- CPURegister::kVRegister); \
- DEFINE_REGISTER(VRegister, d##N, N, kDRegSizeInBits, \
- CPURegister::kVRegister); \
- DEFINE_REGISTER(VRegister, q##N, N, kQRegSizeInBits, \
- CPURegister::kVRegister); \
- DEFINE_REGISTER(VRegister, v##N, N, kQRegSizeInBits, CPURegister::kVRegister);
+DEFINE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits);
+DEFINE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits);
+
+#define DEFINE_VREGISTERS(N) \
+ DEFINE_REGISTER(VRegister, b##N, N, kBRegSizeInBits); \
+ DEFINE_REGISTER(VRegister, h##N, N, kHRegSizeInBits); \
+ DEFINE_REGISTER(VRegister, s##N, N, kSRegSizeInBits); \
+ DEFINE_REGISTER(VRegister, d##N, N, kDRegSizeInBits); \
+ DEFINE_REGISTER(VRegister, q##N, N, kQRegSizeInBits); \
+ DEFINE_REGISTER(VRegister, v##N, N, kQRegSizeInBits);
GENERAL_REGISTER_CODE_LIST(DEFINE_VREGISTERS)
#undef DEFINE_VREGISTERS
@@ -432,6 +485,9 @@ ALIAS_REGISTER(Register, lr, x30);
ALIAS_REGISTER(Register, xzr, x31);
ALIAS_REGISTER(Register, wzr, w31);
+// Register used for padding stack slots.
+ALIAS_REGISTER(Register, padreg, x31);
+
// Keeps the 0 double value.
ALIAS_REGISTER(VRegister, fp_zero, d15);
// MacroAssembler fixed V Registers.
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 957eb0ae4e..2a994fca01 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -33,8 +33,7 @@ namespace internal {
void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ Mov(x5, Operand(x0, LSL, kPointerSizeLog2));
__ Str(x1, MemOperand(jssp, x5));
- __ Push(x1);
- __ Push(x2);
+ __ Push(x1, x2);
__ Add(x0, x0, Operand(3));
__ TailCallRuntime(Runtime::kNewArray);
}
@@ -130,6 +129,8 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't care if MacroAssembler scratch registers are corrupted.
saved_regs.Remove(*(masm->TmpList()));
saved_fp_regs.Remove(*(masm->FPTmpList()));
+ DCHECK_EQ(saved_regs.Count() % 2, 0);
+ DCHECK_EQ(saved_fp_regs.Count() % 2, 0);
__ PushCPURegList(saved_regs);
if (save_doubles()) {
@@ -373,10 +374,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Sub(temp_argv, temp_argv, 1 * kPointerSize);
}
- // Reserve three slots to preserve x21-x23 callee-saved registers. If the
- // result size is too large to be returned in registers then also reserve
- // space for the return value.
- int extra_stack_space = 3 + (result_size() <= 2 ? 0 : result_size());
+ // Reserve three slots to preserve x21-x23 callee-saved registers.
+ int extra_stack_space = 3;
// Enter the exit frame.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
@@ -389,11 +388,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Poke(argc, 2 * kPointerSize);
__ Poke(target, 3 * kPointerSize);
- if (result_size() > 2) {
- // Save the location of the return value into x8 for call.
- __ Add(x8, __ StackPointer(), Operand(4 * kPointerSize));
- }
-
// We normally only keep tagged values in callee-saved registers, as they
// could be pushed onto the stack by called stubs and functions, and on the
// stack they can confuse the GC. However, we're only calling C functions
@@ -463,18 +457,10 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Blr(target);
__ Bind(&return_location);
- if (result_size() > 2) {
- DCHECK_EQ(3, result_size());
- // Read result values stored on stack.
- __ Ldr(x0, MemOperand(__ StackPointer(), 4 * kPointerSize));
- __ Ldr(x1, MemOperand(__ StackPointer(), 5 * kPointerSize));
- __ Ldr(x2, MemOperand(__ StackPointer(), 6 * kPointerSize));
- }
- // Result returned in x0, x1:x0 or x2:x1:x0 - do not destroy these registers!
+ // Result returned in x0 or x1:x0 - do not destroy these registers!
// x0 result0 The return code from the call.
- // x1 result1 For calls which return ObjectPair or ObjectTriple.
- // x2 result2 For calls which return ObjectTriple.
+ // x1 result1 For calls which return ObjectPair.
// x21 argv
// x22 argc
// x23 target
@@ -620,25 +606,26 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate());
__ Mov(x10, ExternalReference(js_entry_sp));
__ Ldr(x11, MemOperand(x10));
- __ Cbnz(x11, &non_outermost_js);
- __ Str(fp, MemOperand(x10));
- __ Mov(x12, StackFrame::OUTERMOST_JSENTRY_FRAME);
- __ Push(x12);
- __ B(&done);
- __ Bind(&non_outermost_js);
- // We spare one instruction by pushing xzr since the marker is 0.
+
+ // Select between the inner and outermost frame marker, based on the JS entry
+ // sp. We assert that the inner marker is zero, so we can use xzr to save a
+ // move instruction.
DCHECK(StackFrame::INNER_JSENTRY_FRAME == 0);
- __ Push(xzr);
+ __ Cmp(x11, 0); // If x11 is zero, this is the outermost frame.
+ __ Csel(x12, xzr, StackFrame::OUTERMOST_JSENTRY_FRAME, ne);
+ __ B(ne, &done);
+ __ Str(fp, MemOperand(x10));
+
__ Bind(&done);
+ __ Push(x12);
// The frame set up looks like this:
// jssp[0] : JS entry frame marker.
// jssp[1] : C entry FP.
// jssp[2] : stack frame marker.
- // jssp[3] : stack frmae marker.
+ // jssp[3] : stack frame marker.
// jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
-
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
__ B(&invoke);
@@ -665,7 +652,22 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Invoke: Link this frame into the handler chain.
__ Bind(&invoke);
- __ PushStackHandler();
+
+ // Push new stack handler.
+ DCHECK(jssp.Is(__ StackPointer()));
+ static_assert(StackHandlerConstants::kSize == 1 * kPointerSize,
+ "Unexpected offset for StackHandlerConstants::kSize");
+ static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize,
+ "Unexpected offset for StackHandlerConstants::kNextOffset");
+
+ // Link the current handler as the next handler.
+ __ Mov(x11, ExternalReference(IsolateAddressId::kHandlerAddress, isolate()));
+ __ Ldr(x10, MemOperand(x11));
+ __ Push(x10);
+
+ // Set this new handler as the current one.
+ __ Str(jssp, MemOperand(x11));
+
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the B(&invoke) above, which
// restores all callee-saved registers (including cp and fp) to their
@@ -689,9 +691,13 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Call(BUILTIN_CODE(isolate(), JSEntryTrampoline), RelocInfo::CODE_TARGET);
}
- // Unlink this frame from the handler chain.
- __ PopStackHandler();
-
+ // Pop the stack handler and unlink this frame from the handler chain.
+ static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize,
+ "Unexpected offset for StackHandlerConstants::kNextOffset");
+ __ Pop(x10);
+ __ Mov(x11, ExternalReference(IsolateAddressId::kHandlerAddress, isolate()));
+ __ Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
+ __ Str(x10, MemOperand(x11));
__ Bind(&exit);
// x0 holds the result.
@@ -705,17 +711,20 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
- __ Pop(x10);
- __ Cmp(x10, StackFrame::OUTERMOST_JSENTRY_FRAME);
- __ B(ne, &non_outermost_js_2);
- __ Mov(x11, ExternalReference(js_entry_sp));
- __ Str(xzr, MemOperand(x11));
- __ Bind(&non_outermost_js_2);
-
- // Restore the top frame descriptors from the stack.
- __ Pop(x10);
- __ Mov(x11, ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate()));
- __ Str(x10, MemOperand(x11));
+ {
+ Register c_entry_fp = x11;
+ __ Pop(x10, c_entry_fp);
+ __ Cmp(x10, StackFrame::OUTERMOST_JSENTRY_FRAME);
+ __ B(ne, &non_outermost_js_2);
+ __ Mov(x12, ExternalReference(js_entry_sp));
+ __ Str(xzr, MemOperand(x12));
+ __ Bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ Mov(x12,
+ ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate()));
+ __ Str(c_entry_fp, MemOperand(x12));
+ }
// Reset the stack to the callee saved registers.
__ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
@@ -859,8 +868,8 @@ RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object,
CPURegList pool_available = GetValidRegistersForAllocation();
CPURegList used_regs(object, address, scratch);
pool_available.Remove(used_regs);
- scratch1_ = Register(pool_available.PopLowestIndex());
- scratch2_ = Register(pool_available.PopLowestIndex());
+ scratch1_ = pool_available.PopLowestIndex().Reg();
+ scratch2_ = pool_available.PopLowestIndex().Reg();
// The scratch registers will be restored by other means so we don't need
// to save them with the other caller saved registers.
@@ -869,6 +878,70 @@ RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object,
saved_regs_.Remove(scratch2_);
}
+RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
+ // Find the mode depending on the first two instructions.
+ Instruction* instr1 =
+ reinterpret_cast<Instruction*>(stub->instruction_start());
+ Instruction* instr2 = instr1->following();
+
+ if (instr1->IsUncondBranchImm()) {
+ DCHECK(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
+ return INCREMENTAL;
+ }
+
+ DCHECK(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
+
+ if (instr2->IsUncondBranchImm()) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ DCHECK(instr2->IsPCRelAddressing());
+
+ return STORE_BUFFER_ONLY;
+}
+
+// We patch the two first instructions of the stub back and forth between an
+// adr and branch when we start and stop incremental heap marking.
+// The branch is
+// b label
+// The adr is
+// adr xzr label
+// so effectively a nop.
+void RecordWriteStub::Patch(Code* stub, Mode mode) {
+ // We are going to patch the two first instructions of the stub.
+ PatchingAssembler patcher(stub->GetIsolate(), stub->instruction_start(), 2);
+ Instruction* instr1 = patcher.InstructionAt(0);
+ Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
+ // Instructions must be either 'adr' or 'b'.
+ DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
+ DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
+ // Retrieve the offsets to the labels.
+ auto offset_to_incremental_noncompacting =
+ static_cast<int32_t>(instr1->ImmPCOffset());
+ auto offset_to_incremental_compacting =
+ static_cast<int32_t>(instr2->ImmPCOffset());
+
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ DCHECK(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ patcher.adr(xzr, offset_to_incremental_noncompacting);
+ patcher.adr(xzr, offset_to_incremental_compacting);
+ break;
+ case INCREMENTAL:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
+ patcher.adr(xzr, offset_to_incremental_compacting);
+ break;
+ case INCREMENTAL_COMPACTION:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ patcher.adr(xzr, offset_to_incremental_noncompacting);
+ patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
+ break;
+ }
+ DCHECK(GetMode(stub) == mode);
+}
+
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// We need some extra registers for this stub, they have been allocated
// but we need to save them before using them.
@@ -892,7 +965,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
__ RememberedSetHelper(object(), address(),
value(), // scratch1
- save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
+ save_fp_regs_mode());
__ Bind(&dont_need_remembered_set);
}
@@ -945,7 +1018,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
__ RememberedSetHelper(object(), address(),
value(), // scratch1
- save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
+ save_fp_regs_mode());
} else {
__ Ret();
}
@@ -987,7 +1060,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
__ RememberedSetHelper(object(), address(),
value(), // scratch1
- save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
+ save_fp_regs_mode());
} else {
__ Ret();
}
@@ -1018,7 +1091,7 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
__ RememberedSetHelper(object(), address(),
value(), // scratch1
- save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
+ save_fp_regs_mode());
}
__ Ret();
@@ -1042,11 +1115,11 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
DontEmitDebugCodeScope no_debug_code(tasm);
Label entry_hook_call_start;
tasm->Bind(&entry_hook_call_start);
- tasm->Push(lr);
+ tasm->Push(padreg, lr);
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
DCHECK(tasm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
kProfileEntryHookCallSize);
- tasm->Pop(lr);
+ tasm->Pop(lr, padreg);
}
}
@@ -1057,11 +1130,11 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
DontEmitDebugCodeScope no_debug_code(masm);
Label entry_hook_call_start;
__ Bind(&entry_hook_call_start);
- __ Push(lr);
+ __ Push(padreg, lr);
__ CallStub(&stub);
DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
kProfileEntryHookCallSize);
- __ Pop(lr);
+ __ Pop(lr, padreg);
}
}
@@ -1075,6 +1148,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ PushCPURegList(kCallerSaved);
DCHECK(kCallerSaved.IncludesAliasOf(lr));
const int kNumSavedRegs = kCallerSaved.Count();
+ DCHECK_EQ(kNumSavedRegs % 2, 0);
// Compute the function's address as the first argument.
__ Sub(x0, lr, kProfileEntryHookCallSize);
@@ -1195,8 +1269,10 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
}
CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
- spill_list.Combine(lr);
spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
+ spill_list.Combine(lr);
+ spill_list.Combine(padreg); // Add padreg to make the list of even length.
+ DCHECK_EQ(spill_list.Count() % 2, 0);
__ PushCPURegList(spill_list);
@@ -1624,18 +1700,18 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return static_cast<int>(ref0.address() - ref1.address());
}
-
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions.
// 'stack_space' is the space to be unwound on exit (includes the call JS
// arguments space and the additional space allocated for the fast call).
// 'spill_offset' is the offset from the stack pointer where
// CallApiFunctionAndReturn can spill registers.
-static void CallApiFunctionAndReturn(
- MacroAssembler* masm, Register function_address,
- ExternalReference thunk_ref, int stack_space,
- MemOperand* stack_space_operand, int spill_offset,
- MemOperand return_value_operand, MemOperand* context_restore_operand) {
+static void CallApiFunctionAndReturn(MacroAssembler* masm,
+ Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space, int spill_offset,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
ASM_LOCATION("CallApiFunctionAndReturn");
Isolate* isolate = masm->isolate();
ExternalReference next_address =
@@ -1742,10 +1818,6 @@ static void CallApiFunctionAndReturn(
__ Ldr(cp, *context_restore_operand);
}
- if (stack_space_operand != NULL) {
- __ Ldr(w2, *stack_space_operand);
- }
-
__ LeaveExitFrame(false, x1, !restore_context);
// Check if the function scheduled an exception.
@@ -1754,11 +1826,7 @@ static void CallApiFunctionAndReturn(
__ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex,
&promote_scheduled_exception);
- if (stack_space_operand != NULL) {
- __ Drop(x2, 1);
- } else {
- __ Drop(stack_space);
- }
+ __ DropSlots(stack_space);
__ Ret();
// Re-throw by promoting a scheduled exception.
@@ -1811,22 +1879,20 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kIsolateIndex == 1);
STATIC_ASSERT(FCA::kHolderIndex == 0);
- // new target
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ Register undef = x7;
+ __ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
- // context, callee and call data.
- __ Push(context, callee, call_data);
+ // Push new target, context, callee and call data.
+ __ Push(undef, context, callee, call_data);
- Register scratch = call_data;
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
Register isolate_reg = x5;
__ Mov(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
// FunctionCallbackArguments:
// return value, return value default, isolate, holder.
- __ Push(scratch, scratch, isolate_reg, holder);
+ __ Push(undef, undef, isolate_reg, holder);
- // Enter a new context
+ // Enter a new context.
if (is_lazy()) {
// ----------- S t a t e -------------------------------------
// -- sp[0] : holder
@@ -1839,8 +1905,9 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// -- sp[(FCA::kArgsLength + argc + 1) * 8] : accessor_holder
// -----------------------------------------------------------
- // Load context from accessor_holder
+ // Load context from accessor_holder.
Register accessor_holder = context;
+ Register scratch = undef;
Register scratch2 = callee;
__ Ldr(accessor_holder,
MemOperand(__ StackPointer(),
@@ -1852,10 +1919,10 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
__ Tst(scratch2, Operand(1 << Map::kIsConstructor));
__ B(ne, &skip_looking_for_constructor);
__ GetMapConstructor(context, scratch, scratch, scratch2);
- __ bind(&skip_looking_for_constructor);
+ __ Bind(&skip_looking_for_constructor);
__ Ldr(context, FieldMemOperand(context, JSFunction::kContextOffset));
} else {
- // Load context from callee
+ // Load context from callee.
__ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
@@ -1867,8 +1934,8 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// not controlled by GC.
const int kApiStackSpace = 3;
- // Allocate space for CallApiFunctionAndReturn can store some scratch
- // registeres on the stack.
+ // Allocate space so that CallApiFunctionAndReturn can store some scratch
+ // registers on the stack.
const int kCallApiFunctionSpillSpace = 4;
FrameScope frame_scope(masm, StackFrame::MANUAL);
@@ -1899,19 +1966,19 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
return_value_offset = 2 + FCA::kReturnValueOffset;
}
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+ // The number of arguments might be odd, but will be padded when calling the
+ // stub. We do not round up stack_space here, this will be done in
+ // CallApiFunctionAndReturn.
const int stack_space = argc() + FCA::kArgsLength + 2;
- MemOperand* stack_space_operand = nullptr;
-
+ DCHECK_EQ((stack_space - argc()) % 2, 0);
const int spill_offset = 1 + kApiStackSpace;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
- stack_space_operand, spill_offset,
- return_value_operand, &context_restore_operand);
+ spill_offset, return_value_operand,
+ &context_restore_operand);
}
void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
@@ -1924,23 +1991,31 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
Register receiver = ApiGetterDescriptor::ReceiverRegister();
Register holder = ApiGetterDescriptor::HolderRegister();
Register callback = ApiGetterDescriptor::CallbackRegister();
- Register scratch = x4;
- Register scratch2 = x5;
- Register scratch3 = x6;
- DCHECK(!AreAliased(receiver, holder, callback, scratch));
-
- __ Push(receiver);
-
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ Mov(scratch2, Operand(ExternalReference::isolate_address(isolate())));
- __ Ldr(scratch3, FieldMemOperand(callback, AccessorInfo::kDataOffset));
- __ Push(scratch3, scratch, scratch, scratch2, holder);
- __ Push(Smi::kZero); // should_throw_on_error -> false
- __ Ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
- __ Push(scratch);
+ Register data = x4;
+ Register undef = x5;
+ Register isolate_address = x6;
+ Register name = x7;
+ DCHECK(!AreAliased(receiver, holder, callback, data, undef, isolate_address,
+ name));
+
+ __ Ldr(data, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
+ __ Mov(isolate_address,
+ Operand(ExternalReference::isolate_address(isolate())));
+ __ Ldr(name, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+
+ // PropertyCallbackArguments:
+ // receiver, data, return value, return value default, isolate, holder,
+ // should_throw_on_error
+ // These are followed by the property name, which is also pushed below the
+ // exit frame to make the GC aware of it.
+ __ Push(receiver, data, undef, undef, isolate_address, holder, xzr, name);
// v8::PropertyCallbackInfo::args_ array and name handle.
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+ static const int kStackUnwindSpace =
+ PropertyCallbackArguments::kArgsLength + 1;
+ static_assert(kStackUnwindSpace % 2 == 0,
+ "slots must be a multiple of 2 for stack pointer alignment");
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
__ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
@@ -1948,8 +2023,8 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
const int kApiStackSpace = 1;
- // Allocate space for CallApiFunctionAndReturn can store some scratch
- // registeres on the stack.
+ // Allocate space so that CallApiFunctionAndReturn can store some scratch
+ // registers on the stack.
const int kCallApiFunctionSpillSpace = 4;
FrameScope frame_scope(masm, StackFrame::MANUAL);
@@ -1965,16 +2040,17 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
ExternalReference::invoke_accessor_getter_callback(isolate());
Register api_function_address = x2;
- __ Ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ Register js_getter = x4;
+ __ Ldr(js_getter, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
__ Ldr(api_function_address,
- FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+ FieldMemOperand(js_getter, Foreign::kForeignAddressOffset));
const int spill_offset = 1 + kApiStackSpace;
// +3 is to skip prolog, return address and name handle.
MemOperand return_value_operand(
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- kStackUnwindSpace, NULL, spill_offset,
+ kStackUnwindSpace, spill_offset,
return_value_operand, NULL);
}
diff --git a/deps/v8/src/arm64/code-stubs-arm64.h b/deps/v8/src/arm64/code-stubs-arm64.h
index 6cedfadeb9..0713d3a319 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.h
+++ b/deps/v8/src/arm64/code-stubs-arm64.h
@@ -68,69 +68,9 @@ class RecordWriteStub: public PlatformCodeStub {
bool SometimesSetsUpAFrame() override { return false; }
- static Mode GetMode(Code* stub) {
- // Find the mode depending on the first two instructions.
- Instruction* instr1 =
- reinterpret_cast<Instruction*>(stub->instruction_start());
- Instruction* instr2 = instr1->following();
-
- if (instr1->IsUncondBranchImm()) {
- DCHECK(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
- return INCREMENTAL;
- }
-
- DCHECK(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
-
- if (instr2->IsUncondBranchImm()) {
- return INCREMENTAL_COMPACTION;
- }
-
- DCHECK(instr2->IsPCRelAddressing());
+ static Mode GetMode(Code* stub);
- return STORE_BUFFER_ONLY;
- }
-
- // We patch the two first instructions of the stub back and forth between an
- // adr and branch when we start and stop incremental heap marking.
- // The branch is
- // b label
- // The adr is
- // adr xzr label
- // so effectively a nop.
- static void Patch(Code* stub, Mode mode) {
- // We are going to patch the two first instructions of the stub.
- PatchingAssembler patcher(stub->GetIsolate(), stub->instruction_start(), 2);
- Instruction* instr1 = patcher.InstructionAt(0);
- Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
- // Instructions must be either 'adr' or 'b'.
- DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
- DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
- // Retrieve the offsets to the labels.
- auto offset_to_incremental_noncompacting =
- static_cast<int32_t>(instr1->ImmPCOffset());
- auto offset_to_incremental_compacting =
- static_cast<int32_t>(instr2->ImmPCOffset());
-
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- patcher.adr(xzr, offset_to_incremental_noncompacting);
- patcher.adr(xzr, offset_to_incremental_compacting);
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
- patcher.adr(xzr, offset_to_incremental_compacting);
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- patcher.adr(xzr, offset_to_incremental_noncompacting);
- patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
- break;
- }
- DCHECK(GetMode(stub) == mode);
- }
+ static void Patch(Code* stub, Mode mode);
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
@@ -181,8 +121,8 @@ class RecordWriteStub: public PlatformCodeStub {
Register object_;
Register address_;
Register scratch0_;
- Register scratch1_;
- Register scratch2_;
+ Register scratch1_ = NoReg;
+ Register scratch2_ = NoReg;
CPURegList saved_regs_;
CPURegList saved_fp_regs_;
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 065a0ded2d..0180797215 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -202,10 +202,6 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Ldr(reg, MemOperand(x1, src_offset));
}
- // Push state from the last output frame.
- __ Ldr(x6, MemOperand(current_frame, FrameDescription::state_offset()));
- __ Push(x6);
-
// TODO(all): ARM copies a lot (if not all) of the last output frame onto the
// stack, then pops it all into registers. Here, we try to load it directly
// into the relevant registers. Is this correct? If so, we should improve the
@@ -276,6 +272,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ Push(entry_id);
}
+bool Deoptimizer::PadTopOfStackRegister() { return true; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
diff --git a/deps/v8/src/arm64/eh-frame-arm64.cc b/deps/v8/src/arm64/eh-frame-arm64.cc
index 09a3ccb709..507cbd1c2b 100644
--- a/deps/v8/src/arm64/eh-frame-arm64.cc
+++ b/deps/v8/src/arm64/eh-frame-arm64.cc
@@ -29,15 +29,15 @@ void EhFrameWriter::WriteInitialStateInCie() {
// static
int EhFrameWriter::RegisterToDwarfCode(Register name) {
switch (name.code()) {
- case Register::kCode_x28:
+ case kRegCode_x28:
return kJsSpDwarfCode;
- case Register::kCode_x29:
+ case kRegCode_x29:
return kFpDwarfCode;
- case Register::kCode_x30:
+ case kRegCode_x30:
return kLrDwarfCode;
- case Register::kCode_x31:
+ case kRegCode_x31:
return kCSpDwarfCode;
- case Register::kCode_x0:
+ case kRegCode_x0:
return kX0DwarfCode;
default:
UNIMPLEMENTED();
diff --git a/deps/v8/src/arm64/frame-constants-arm64.cc b/deps/v8/src/arm64/frame-constants-arm64.cc
index 02cd6839b8..327c0ed188 100644
--- a/deps/v8/src/arm64/frame-constants-arm64.cc
+++ b/deps/v8/src/arm64/frame-constants-arm64.cc
@@ -19,6 +19,11 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
+int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
+ // Round up to a multiple of two, to make the frame a multiple of 16 bytes.
+ return RoundUp(register_count, 2);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/frame-constants-arm64.h b/deps/v8/src/arm64/frame-constants-arm64.h
index 0d5a066359..882a57a851 100644
--- a/deps/v8/src/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/arm64/frame-constants-arm64.h
@@ -18,8 +18,9 @@ class ExitFrameConstants : public TypedFrameConstants {
public:
static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
- DEFINE_TYPED_FRAME_SIZES(2);
- static const int kLastExitFrameField = kCodeOffset;
+ static const int kPaddingOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
+ DEFINE_TYPED_FRAME_SIZES(3);
+ static const int kLastExitFrameField = kPaddingOffset;
static const int kConstantPoolOffset = 0; // Not used
};
diff --git a/deps/v8/src/arm64/instructions-arm64-constants.cc b/deps/v8/src/arm64/instructions-arm64-constants.cc
new file mode 100644
index 0000000000..5f1b49fbdc
--- /dev/null
+++ b/deps/v8/src/arm64/instructions-arm64-constants.cc
@@ -0,0 +1,46 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstdint>
+
+namespace v8 {
+namespace internal {
+
+// ISA constants. --------------------------------------------------------------
+
+// The following code initializes float/double variables with bit patterns
+// without using static initializers (which is surprisingly difficult in
+// C++). These variables are used by client code as extern float16,
+// extern float and extern double types, which works because (I think) the
+// linker ignores the types. This is kept in a separate source file to
+// avoid breaking jumbo builds.
+//
+// TODO(mostynb): replace these with std::numeric_limits constexpr's where
+// possible, and figure out how to replace *DefaultNaN with something clean,
+// then move this code back into instructions-arm64.cc with the same types
+// that client code uses.
+
+extern const uint16_t kFP16PositiveInfinity = 0x7c00;
+extern const uint16_t kFP16NegativeInfinity = 0xfc00;
+extern const uint32_t kFP32PositiveInfinity = 0x7f800000;
+extern const uint32_t kFP32NegativeInfinity = 0xff800000;
+extern const uint64_t kFP64PositiveInfinity = 0x7ff0000000000000UL;
+extern const uint64_t kFP64NegativeInfinity = 0xfff0000000000000UL;
+
+// This value is a signalling NaN as both a double and as a float (taking the
+// least-significant word).
+extern const uint64_t kFP64SignallingNaN = 0x7ff000007f800001;
+extern const uint32_t kFP32SignallingNaN = 0x7f800001;
+
+// A similar value, but as a quiet NaN.
+extern const uint64_t kFP64QuietNaN = 0x7ff800007fc00001;
+extern const uint32_t kFP32QuietNaN = 0x7fc00001;
+
+// The default NaN values (for FPCR.DN=1).
+extern const uint64_t kFP64DefaultNaN = 0x7ff8000000000000UL;
+extern const uint32_t kFP32DefaultNaN = 0x7fc00000;
+extern const uint16_t kFP16DefaultNaN = 0x7e00;
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
index f4dbd75533..d6f106b800 100644
--- a/deps/v8/src/arm64/instructions-arm64.cc
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -4,15 +4,12 @@
#if V8_TARGET_ARCH_ARM64
-#define ARM64_DEFINE_FP_STATICS
-
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/instructions-arm64.h"
namespace v8 {
namespace internal {
-
bool Instruction::IsLoad() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index b6b38166bf..0c59a425cc 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -18,44 +18,26 @@ namespace internal {
typedef uint32_t Instr;
-// The following macros initialize a float/double variable with a bit pattern
-// without using static initializers: If ARM64_DEFINE_FP_STATICS is defined, the
-// symbol is defined as uint32_t/uint64_t initialized with the desired bit
-// pattern. Otherwise, the same symbol is declared as an external float/double.
-#if defined(ARM64_DEFINE_FP_STATICS)
-#define DEFINE_FLOAT16(name, value) extern const uint16_t name = value
-#define DEFINE_FLOAT(name, value) extern const uint32_t name = value
-#define DEFINE_DOUBLE(name, value) extern const uint64_t name = value
-#else
-#define DEFINE_FLOAT16(name, value) extern const float16 name
-#define DEFINE_FLOAT(name, value) extern const float name
-#define DEFINE_DOUBLE(name, value) extern const double name
-#endif // defined(ARM64_DEFINE_FP_STATICS)
-
-DEFINE_FLOAT16(kFP16PositiveInfinity, 0x7c00);
-DEFINE_FLOAT16(kFP16NegativeInfinity, 0xfc00);
-DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000);
-DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000);
-DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL);
-DEFINE_DOUBLE(kFP64NegativeInfinity, 0xfff0000000000000UL);
+extern const float16 kFP16PositiveInfinity;
+extern const float16 kFP16NegativeInfinity;
+extern const float kFP32PositiveInfinity;
+extern const float kFP32NegativeInfinity;
+extern const double kFP64PositiveInfinity;
+extern const double kFP64NegativeInfinity;
// This value is a signalling NaN as both a double and as a float (taking the
// least-significant word).
-DEFINE_DOUBLE(kFP64SignallingNaN, 0x7ff000007f800001);
-DEFINE_FLOAT(kFP32SignallingNaN, 0x7f800001);
+extern const double kFP64SignallingNaN;
+extern const float kFP32SignallingNaN;
// A similar value, but as a quiet NaN.
-DEFINE_DOUBLE(kFP64QuietNaN, 0x7ff800007fc00001);
-DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001);
+extern const double kFP64QuietNaN;
+extern const float kFP32QuietNaN;
// The default NaN values (for FPCR.DN=1).
-DEFINE_DOUBLE(kFP64DefaultNaN, 0x7ff8000000000000UL);
-DEFINE_FLOAT(kFP32DefaultNaN, 0x7fc00000);
-DEFINE_FLOAT16(kFP16DefaultNaN, 0x7e00);
-
-#undef DEFINE_FLOAT16
-#undef DEFINE_FLOAT
-#undef DEFINE_DOUBLE
+extern const double kFP64DefaultNaN;
+extern const float kFP32DefaultNaN;
+extern const float16 kFP16DefaultNaN;
unsigned CalcLSDataSize(LoadStoreOp op);
unsigned CalcLSPairDataSize(LoadStorePairOp op);
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index b66477b74e..6f0a600aa2 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -24,9 +24,14 @@ void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // TODO(albertnetymk): Use default for now; should call
- // RestrictAllocatableRegisters like src/x64/interface-descriptors-x64.cc
- DefaultInitializePlatformSpecific(data, kParameterCount);
+ const Register default_stub_registers[] = {x0, x1, x2, x3, x4};
+
+ data->RestrictAllocatableRegisters(default_stub_registers,
+ arraysize(default_stub_registers));
+
+ CHECK_LE(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
const Register FastNewFunctionContextDescriptor::FunctionRegister() {
@@ -87,38 +92,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void FastCloneRegExpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x3: closure
- // x2: object literal index
- // x1: constant properties
- // x0: object literal flags
- Register registers[] = {x3, x2, x1, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x3: closure
- // x2: array literal index
- // x1: constant elements
- Register registers[] = {x3, x2, x1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x3: closure
- // x2: object literal index
- // x1: constant properties
- // x0: object literal flags
- Register registers[] = {x3, x2, x1, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1 function the function to call
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index 72676acebd..c9da9d12d0 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -1124,19 +1124,17 @@ void TurboAssembler::SmiUntag(Register dst, Register src) {
void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
-void MacroAssembler::SmiUntagToDouble(VRegister dst, Register src,
- UntagMode mode) {
+void MacroAssembler::SmiUntagToDouble(VRegister dst, Register src) {
DCHECK(dst.Is64Bits() && src.Is64Bits());
- if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
+ if (FLAG_enable_slow_asserts) {
AssertSmi(src);
}
Scvtf(dst, src, kSmiShift);
}
-void MacroAssembler::SmiUntagToFloat(VRegister dst, Register src,
- UntagMode mode) {
+void MacroAssembler::SmiUntagToFloat(VRegister dst, Register src) {
DCHECK(dst.Is32Bits() && src.Is64Bits());
- if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
+ if (FLAG_enable_slow_asserts) {
AssertSmi(src);
}
Scvtf(dst, src, kSmiShift);
@@ -1355,6 +1353,13 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
}
}
+void TurboAssembler::DropArguments(const Register& count, uint64_t unit_size) {
+ Drop(count, unit_size);
+}
+
+void TurboAssembler::DropSlots(int64_t count, uint64_t unit_size) {
+ Drop(count, unit_size);
+}
void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo(unit_size));
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 071de92cc4..f10ddceab5 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -8,6 +8,7 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
+#include "src/callable.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
@@ -34,27 +35,67 @@ CPURegList TurboAssembler::DefaultFPTmpList() {
return CPURegList(fp_scratch1, fp_scratch2);
}
-void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1, Register exclusion2,
- Register exclusion3) {
+TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(isolate, buffer, buffer_size),
+ isolate_(isolate),
+#if DEBUG
+ allow_macro_instructions_(true),
+#endif
+ tmp_list_(DefaultTmpList()),
+ fptmp_list_(DefaultFPTmpList()),
+ sp_(jssp),
+ use_real_aborts_(true) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
+ }
+}
+
+int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) const {
+ int bytes = 0;
+ auto list = kCallerSaved;
+ list.Remove(exclusion1, exclusion2, exclusion3);
+ bytes += list.Count() * kXRegSizeInBits / 8;
+
+ if (fp_mode == kSaveFPRegs) {
+ bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
+ }
+ return bytes;
+}
+
+int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
auto list = kCallerSaved;
list.Remove(exclusion1, exclusion2, exclusion3);
PushCPURegList(list);
+ bytes += list.Count() * kXRegSizeInBits / 8;
if (fp_mode == kSaveFPRegs) {
PushCPURegList(kCallerSavedV);
+ bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
}
+ return bytes;
}
-void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
- Register exclusion2, Register exclusion3) {
+int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
if (fp_mode == kSaveFPRegs) {
PopCPURegList(kCallerSavedV);
+ bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
}
auto list = kCallerSaved;
list.Remove(exclusion1, exclusion2, exclusion3);
PopCPURegList(list);
+ bytes += list.Count() * kXRegSizeInBits / 8;
+
+ return bytes;
}
void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
@@ -1143,45 +1184,6 @@ void TurboAssembler::PopCPURegList(CPURegList registers) {
PopPostamble(registers.Count(), size);
}
-
-void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
- int size = src.SizeInBytes();
-
- PushPreamble(count, size);
-
- if (FLAG_optimize_for_size && count > 8) {
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
-
- Label loop;
- Mov(temp, count / 2);
- Bind(&loop);
- PushHelper(2, size, src, src, NoReg, NoReg);
- Subs(temp, temp, 1);
- B(ne, &loop);
-
- count %= 2;
- }
-
- // Push up to four registers at a time if possible because if the current
- // stack pointer is csp and the register size is 32, registers must be pushed
- // in blocks of four in order to maintain the 16-byte alignment for csp.
- while (count >= 4) {
- PushHelper(4, size, src, src, src, src);
- count -= 4;
- }
- if (count >= 2) {
- PushHelper(2, size, src, src, NoReg, NoReg);
- count -= 2;
- }
- if (count == 1) {
- PushHelper(1, size, src, NoReg, NoReg, NoReg);
- count -= 1;
- }
- DCHECK(count == 0);
-}
-
-
void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
@@ -1304,6 +1306,8 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
}
void TurboAssembler::PushPreamble(Operand total_size) {
+ if (total_size.IsZero()) return;
+
if (csp.Is(StackPointer())) {
// If the current stack pointer is csp, then it must be aligned to 16 bytes
// on entry and the total size of the specified registers must also be a
@@ -1323,6 +1327,8 @@ void TurboAssembler::PushPreamble(Operand total_size) {
}
void TurboAssembler::PopPostamble(Operand total_size) {
+ if (total_size.IsZero()) return;
+
if (csp.Is(StackPointer())) {
// If the current stack pointer is csp, then it must be aligned to 16 bytes
// on entry and the total size of the specified registers must also be a
@@ -2040,22 +2046,6 @@ void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
}
}
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
- Register first, Register second, Register scratch1, Register scratch2,
- Label* failure) {
- DCHECK(!AreAliased(first, second, scratch1, scratch2));
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch1, first, kFlatOneByteStringMask);
- And(scratch2, second, kFlatOneByteStringMask);
- Cmp(scratch1, kFlatOneByteStringTag);
- Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
- B(ne, failure);
-}
-
-
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
Label* not_unique_name) {
STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
@@ -2417,10 +2407,8 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
if (type == StackFrame::INTERNAL) {
DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
- Push(lr, fp);
- Push(type_reg);
Mov(code_reg, Operand(CodeObject()));
- Push(code_reg);
+ Push(lr, fp, type_reg, code_reg);
Add(fp, jssp, InternalFrameConstants::kFixedFrameSizeFromFp);
// jssp[4] : lr
// jssp[3] : fp
@@ -2431,7 +2419,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
Mov(type_reg, StackFrame::TypeToMarker(type));
Push(lr, fp);
Mov(fp, csp);
- Push(type_reg, xzr);
+ Push(type_reg, padreg);
// csp[3] : lr
// csp[2] : fp
// csp[1] : type
@@ -2439,8 +2427,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
} else {
DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
- Push(lr, fp);
- Push(type_reg);
+ Push(lr, fp, type_reg);
Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp);
// jssp[2] : lr
// jssp[1] : fp
@@ -2466,6 +2453,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::ExitFramePreserveFPRegs() {
+ DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
PushCPURegList(kCallerSavedV);
}
@@ -2485,19 +2473,6 @@ void MacroAssembler::ExitFrameRestoreFPRegs() {
}
}
-void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
- Register argc) {
- Push(lr, fp, context, target);
- add(fp, jssp, Operand(2 * kPointerSize));
- Push(argc);
-}
-
-void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
- Register argc) {
- Pop(argc);
- Pop(target, context, fp, lr);
-}
-
void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
int extra_space,
StackFrame::Type frame_type) {
@@ -2509,20 +2484,21 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
Push(lr, fp);
Mov(fp, StackPointer());
Mov(scratch, StackFrame::TypeToMarker(frame_type));
- Push(scratch);
- Push(xzr);
+ Push(scratch, xzr);
Mov(scratch, Operand(CodeObject()));
- Push(scratch);
+ Push(scratch, padreg);
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: STUB marker
// fp[-16]: Space reserved for SPOffset.
- // jssp -> fp[-24]: CodeObject()
+ // fp[-24]: CodeObject()
+ // jssp -> fp[-32]: padding
STATIC_ASSERT((2 * kPointerSize) == ExitFrameConstants::kCallerSPOffset);
STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kSPOffset);
STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kCodeOffset);
+ STATIC_ASSERT((-4 * kPointerSize) == ExitFrameConstants::kPaddingOffset);
// Save the frame pointer and context pointer in the top frame.
Mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
@@ -2532,15 +2508,18 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
Str(cp, MemOperand(scratch));
- STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kLastExitFrameField);
+ STATIC_ASSERT((-4 * kPointerSize) == ExitFrameConstants::kLastExitFrameField);
if (save_doubles) {
ExitFramePreserveFPRegs();
}
+ // Round the number of space we need to claim to a multiple of two.
+ int slots_to_claim = RoundUp(extra_space + 1, 2);
+
// Reserve space for the return address and for user requested memory.
// We do this before aligning to make sure that we end up correctly
// aligned with the minimum of wasted space.
- Claim(extra_space + 1, kXRegSize);
+ Claim(slots_to_claim, kXRegSize);
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: STUB marker
@@ -2640,110 +2619,6 @@ void MacroAssembler::MaybeDropFrames() {
ne);
}
-void MacroAssembler::PushStackHandler() {
- DCHECK(jssp.Is(StackPointer()));
- // Adjust this code if the asserts don't hold.
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-
- // For the JSEntry handler, we must preserve the live registers x0-x4.
- // (See JSEntryStub::GenerateBody().)
-
- // Link the current handler as the next handler.
- Mov(x11, ExternalReference(IsolateAddressId::kHandlerAddress, isolate()));
- Ldr(x10, MemOperand(x11));
- Push(x10);
-
- // Set this new handler as the current one.
- Str(jssp, MemOperand(x11));
-}
-
-
-void MacroAssembler::PopStackHandler() {
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- Pop(x10);
- Mov(x11, ExternalReference(IsolateAddressId::kHandlerAddress, isolate()));
- Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
- Str(x10, MemOperand(x11));
-}
-
-
-void MacroAssembler::Allocate(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
- DCHECK(object_size <= kMaxRegularHeapObjectSize);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- // We apply salt to the original zap value to easily spot the values.
- Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
- Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
- Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
- }
- B(gc_required);
- return;
- }
-
- UseScratchRegisterScope temps(this);
- Register scratch3 = temps.AcquireX();
-
- DCHECK(!AreAliased(result, scratch1, scratch2, scratch3));
- DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- DCHECK(0 == (object_size & kObjectAlignmentMask));
-
- // Check relative positions of allocation top and limit addresses.
- // The values must be adjacent in memory to allow the use of LDP.
- ExternalReference heap_allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
- ExternalReference heap_allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
- intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
- DCHECK((limit - top) == kPointerSize);
-
- // Set up allocation top address and allocation limit registers.
- Register top_address = scratch1;
- Register alloc_limit = scratch2;
- Register result_end = scratch3;
- Mov(top_address, Operand(heap_allocation_top));
-
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into alloc_limit.
- Ldp(result, alloc_limit, MemOperand(top_address));
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry.
- Ldr(alloc_limit, MemOperand(top_address));
- Cmp(result, alloc_limit);
- Check(eq, kUnexpectedAllocationTop);
- }
- // Load allocation limit. Result already contains allocation top.
- Ldr(alloc_limit, MemOperand(top_address, limit - top));
- }
-
- // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
- // the same alignment on ARM64.
- STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
-
- // Calculate new top and bail out if new space is exhausted.
- Adds(result_end, result, object_size);
- Ccmp(result_end, alloc_limit, NoFlag, cc);
- B(hi, gc_required);
-
- Str(result_end, MemOperand(top_address));
-
- // Tag the object.
- ObjectTag(result, result);
-}
-
void MacroAssembler::JumpIfObjectType(Register object,
Register map,
Register type_reg,
@@ -2755,38 +2630,6 @@ void MacroAssembler::JumpIfObjectType(Register object,
}
-void MacroAssembler::AllocateJSValue(Register result, Register constructor,
- Register value, Register scratch1,
- Register scratch2, Label* gc_required) {
- DCHECK(!result.is(constructor));
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!result.is(value));
-
- // Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Initialize the JSValue.
- LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
- Str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
- LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- Str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOrHashOffset));
- Str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
- Str(value, FieldMemOperand(result, JSValue::kValueOffset));
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-}
-
-
-void MacroAssembler::JumpIfNotObjectType(Register object,
- Register map,
- Register type_reg,
- InstanceType type,
- Label* if_not_object) {
- JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
-}
-
-
// Sets condition flags based on comparison, and returns type in type_reg.
void MacroAssembler::CompareObjectType(Register object,
Register map,
@@ -2806,67 +2649,6 @@ void MacroAssembler::CompareInstanceType(Register map,
}
-void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) {
- UseScratchRegisterScope temps(this);
- Register obj_map = temps.AcquireX();
- Ldr(obj_map, FieldMemOperand(obj, HeapObject::kMapOffset));
- CompareRoot(obj_map, index);
-}
-
-
-void MacroAssembler::CompareObjectMap(Register obj, Register scratch,
- Handle<Map> map) {
- Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- CompareMap(scratch, map);
-}
-
-
-void MacroAssembler::CompareMap(Register obj_map,
- Handle<Map> map) {
- Cmp(obj_map, Operand(map));
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
-
- CompareObjectMap(obj, scratch, map);
- B(ne, fail);
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
- Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- JumpIfNotRoot(scratch, index, fail);
-}
-
-
-void MacroAssembler::CheckMap(Register obj_map,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj_map, fail);
- }
-
- CompareMap(obj_map, map);
- B(ne, fail);
-}
-
-
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
Mov(value, Operand(cell));
Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
@@ -2899,14 +2681,6 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
Bind(&done);
}
-void MacroAssembler::PushRoot(Heap::RootListIndex index) {
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- LoadRoot(temp, index);
- Push(temp);
-}
-
-
void MacroAssembler::CompareRoot(const Register& obj,
Heap::RootListIndex index) {
UseScratchRegisterScope temps(this);
@@ -2978,10 +2752,8 @@ bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address,
- Register scratch1,
- SaveFPRegsMode fp_mode,
- RememberedSetFinalAction and_then) {
+ Register address, Register scratch1,
+ SaveFPRegsMode fp_mode) {
DCHECK(!AreAliased(object, address, scratch1));
Label done, store_buffer_overflow;
if (emit_debug_code()) {
@@ -3003,13 +2775,8 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Call stub on end of buffer.
// Check for end of buffer.
Tst(scratch1, StoreBuffer::kStoreBufferMask);
- if (and_then == kFallThroughAtEnd) {
- B(ne, &done);
- } else {
- DCHECK(and_then == kReturnAtEnd);
- B(eq, &store_buffer_overflow);
- Ret();
- }
+ B(eq, &store_buffer_overflow);
+ Ret();
Bind(&store_buffer_overflow);
Push(lr);
@@ -3018,14 +2785,15 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Pop(lr);
Bind(&done);
- if (and_then == kReturnAtEnd) {
- Ret();
- }
+ Ret();
}
void MacroAssembler::PopSafepointRegisters() {
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ DCHECK_GE(num_unsaved, 0);
+ DCHECK_EQ(num_unsaved % 2, 0);
+ DCHECK_EQ(kSafepointSavedRegisters % 2, 0);
PopXRegList(kSafepointSavedRegisters);
Drop(num_unsaved);
}
@@ -3035,7 +2803,9 @@ void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the stack, so
// adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- DCHECK(num_unsaved >= 0);
+ DCHECK_GE(num_unsaved, 0);
+ DCHECK_EQ(num_unsaved % 2, 0);
+ DCHECK_EQ(kSafepointSavedRegisters % 2, 0);
Claim(num_unsaved);
PushXRegList(kSafepointSavedRegisters);
}
@@ -3097,17 +2867,12 @@ void TurboAssembler::CheckPageFlagClear(const Register& object,
TestAndBranchIfAllClear(scratch, mask, if_all_clear);
}
-
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
+void MacroAssembler::RecordWriteField(Register object, int offset,
+ Register value, Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -3130,14 +2895,8 @@ void MacroAssembler::RecordWriteField(
Bind(&ok);
}
- RecordWrite(object,
- scratch,
- value,
- lr_status,
- save_fp,
- remembered_set_action,
- OMIT_SMI_CHECK,
- pointers_to_here_check_for_value);
+ RecordWrite(object, scratch, value, lr_status, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK);
Bind(&done);
@@ -3149,93 +2908,79 @@ void MacroAssembler::RecordWriteField(
}
}
+void TurboAssembler::SaveRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ CPURegList regs(lr);
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs.Combine(Register::XRegFromCode(i));
+ }
+ }
-// Will clobber: object, map, dst.
-// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
-void MacroAssembler::RecordWriteForMap(Register object,
- Register map,
- Register dst,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode fp_mode) {
- ASM_LOCATION_IN_ASSEMBLER("MacroAssembler::RecordWrite");
- DCHECK(!AreAliased(object, map));
-
- if (emit_debug_code()) {
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
+ PushCPURegList(regs);
+}
- CompareObjectMap(map, temp, isolate()->factory()->meta_map());
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+void TurboAssembler::RestoreRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ CPURegList regs(lr);
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs.Combine(Register::XRegFromCode(i));
+ }
}
- if (!FLAG_incremental_marking) {
- return;
- }
+ PopCPURegList(regs);
+}
- if (emit_debug_code()) {
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
+ // i.e. always emit remember set and save FP registers in RecordWriteStub. If
+ // large performance regression is observed, we should use these values to
+ // avoid unnecessary work.
- Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- Cmp(temp, map);
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
- }
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
+ RegList registers = callable.descriptor().allocatable_registers();
- // First, check if a write barrier is even needed. The tests below
- // catch stores of smis and stores into the young generation.
- Label done;
+ SaveRegisters(registers);
- // A single check of the map's pages interesting flag suffices, since it is
- // only set during incremental collection, and then it's also guaranteed that
- // the from object's page's interesting flag is also set. This optimization
- // relies on the fact that maps can never be in new space.
- CheckPageFlagClear(map,
- map, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- &done);
+ Register object_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kObject));
+ Register slot_parameter(
+ callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register isolate_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kIsolate));
+ Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kFPMode));
- // Record the actual write.
- if (lr_status == kLRHasNotBeenSaved) {
- Push(lr);
- }
- Add(dst, object, HeapObject::kMapOffset - kHeapObjectTag);
- RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
- fp_mode);
- CallStub(&stub);
- if (lr_status == kLRHasNotBeenSaved) {
- Pop(lr);
- }
+ Push(object);
+ Push(address);
- Bind(&done);
+ Pop(slot_parameter);
+ Pop(object_parameter);
- // Count number of write barriers in generated code.
- isolate()->counters()->write_barriers_static()->Increment();
- IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, map,
- dst);
+ Mov(isolate_parameter, ExternalReference::isolate_address(isolate()));
+ Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
+ Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
+ Call(callable.code(), RelocInfo::CODE_TARGET);
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
- Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
- }
+ RestoreRegisters(registers);
}
-
// Will clobber: object, address, value.
// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
//
// The register 'object' contains a heap object pointer. The heap object tag is
// shifted away.
-void MacroAssembler::RecordWrite(
- Register object,
- Register address,
- Register value,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
+void MacroAssembler::RecordWrite(Register object, Register address,
+ Register value, LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
ASM_LOCATION_IN_ASSEMBLER("MacroAssembler::RecordWrite");
DCHECK(!AreAliased(object, value));
@@ -3257,12 +3002,9 @@ void MacroAssembler::RecordWrite(
JumpIfSmi(value, &done);
}
- if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
- CheckPageFlagClear(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- &done);
- }
+ CheckPageFlagClear(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask, &done);
CheckPageFlagClear(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -3270,13 +3012,17 @@ void MacroAssembler::RecordWrite(
// Record the actual write.
if (lr_status == kLRHasNotBeenSaved) {
- Push(lr);
+ Push(padreg, lr);
}
+#ifdef V8_CSA_WRITE_BARRIER
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+#else
RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
fp_mode);
CallStub(&stub);
+#endif
if (lr_status == kLRHasNotBeenSaved) {
- Pop(lr);
+ Pop(lr, padreg);
}
Bind(&done);
@@ -3498,22 +3244,6 @@ void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
}
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch) {
- // Load the initial map. The global functions all have initial maps.
- Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
- B(&ok);
- Bind(&fail);
- Abort(kGlobalFunctionsMustHaveInitialMap);
- Bind(&ok);
- }
-}
-
-
// This is the main Printf implementation. All other Printf variants call
// PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
void MacroAssembler::PrintfNoPreserve(const char * format,
@@ -3587,12 +3317,12 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
if (kPCSVarargs.IncludesAliasOf(args[i]) ||
kPCSVarargsFP.IncludesAliasOf(args[i])) {
if (args[i].IsRegister()) {
- Register old_arg = Register(args[i]);
+ Register old_arg = args[i].Reg();
Register new_arg = temps.AcquireSameSizeAs(old_arg);
Mov(new_arg, old_arg);
args[i] = new_arg;
} else {
- VRegister old_arg = VRegister(args[i]);
+ VRegister old_arg = args[i].VReg();
VRegister new_arg = temps.AcquireSameSizeAs(old_arg);
Fmov(new_arg, old_arg);
args[i] = new_arg;
@@ -3605,13 +3335,13 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
for (int i = 0; i < arg_count; i++) {
DCHECK(pcs[i].type() == args[i].type());
if (pcs[i].IsRegister()) {
- Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
+ Mov(pcs[i].Reg(), args[i].Reg(), kDiscardForSameWReg);
} else {
DCHECK(pcs[i].IsVRegister());
if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
- Fmov(VRegister(pcs[i]), VRegister(args[i]));
+ Fmov(pcs[i].VReg(), args[i].VReg());
} else {
- Fcvt(VRegister(pcs[i]), VRegister(args[i]));
+ Fcvt(pcs[i].VReg(), args[i].VReg());
}
}
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 4c1fc3d4b8..170266ca9d 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -151,20 +151,12 @@ inline BranchType InvertBranchType(BranchType type) {
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum PointersToHereCheck {
- kPointersToHereMaybeInteresting,
- kPointersToHereAreAlwaysInteresting
-};
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
enum TargetAddressStorageMode {
CAN_INLINE_TARGET_ADDRESS,
NEVER_INLINE_TARGET_ADDRESS
};
-enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag };
-enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles };
-enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
-enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
// The macro assembler supports moving automatically pre-shifted immediates for
// arithmetic and logical instructions, and then applying a post shift in the
@@ -184,21 +176,7 @@ enum PreShiftImmMode {
class TurboAssembler : public Assembler {
public:
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size),
- isolate_(isolate),
-#if DEBUG
- allow_macro_instructions_(true),
-#endif
- tmp_list_(DefaultTmpList()),
- fptmp_list_(DefaultFPTmpList()),
- sp_(jssp),
- use_real_aborts_(true) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ =
- Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
- }
- }
+ CodeObjectRequired create_code_object);
// The Abort method should call a V8 runtime function, but the CallRuntime
// mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
@@ -702,6 +680,20 @@ class TurboAssembler : public Assembler {
inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);
+ // Drop arguments from stack without actually accessing memory.
+ // This will currently drop 'count' arguments of the given size from the
+ // stack.
+ // TODO(arm64): Update this to round up the number of bytes dropped to
+ // a multiple of 16, so that we can remove jssp.
+ inline void DropArguments(const Register& count,
+ uint64_t unit_size = kXRegSize);
+
+ // Drop slots from stack without actually accessing memory.
+ // This will currently drop 'count' slots of the given size from the stack.
+ // TODO(arm64): Update this to round up the number of bytes dropped to
+ // a multiple of 16, so that we can remove jssp.
+ inline void DropSlots(int64_t count, uint64_t unit_size = kXRegSize);
+
// Re-synchronizes the system stack pointer (csp) with the current stack
// pointer (according to StackPointer()).
//
@@ -787,6 +779,13 @@ class TurboAssembler : public Assembler {
inline void push(Register src) { Push(src); }
inline void pop(Register dst) { Pop(dst); }
+ void SaveRegisters(RegList registers);
+ void RestoreRegisters(RegList registers);
+
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode);
+
// Alternative forms of Push and Pop, taking a RegList or CPURegList that
// specifies the registers that are to be pushed or popped. Higher-numbered
// registers are associated with higher memory addresses (as in the A32 push
@@ -800,12 +799,23 @@ class TurboAssembler : public Assembler {
void PushCPURegList(CPURegList registers);
void PopCPURegList(CPURegList registers);
- void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
- void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ // Calculate how much stack space (in bytes) are required to store caller
+ // registers excluding those specified in the arguments.
+ int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg) const;
+
+ // Push caller saved registers on the stack, and return the number of bytes
+ // stack pointer is adjusted.
+ int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
+ // Restore caller saved registers from the stack, and return the number of
+ // bytes stack pointer is adjusted.
+ int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
// Move an immediate into register dst, and return an Operand object for use
// with a subsequent instruction that accepts a shift. The value moved into
@@ -1576,7 +1586,6 @@ class MacroAssembler : public TurboAssembler {
// Push the specified register 'count' times.
void PushMultipleTimes(CPURegister src, Register count);
- void PushMultipleTimes(CPURegister src, int count);
// Sometimes callers need to push or pop multiple registers in a way that is
// difficult to structure efficiently for fixed Push or Pop calls. This scope
@@ -1724,10 +1733,8 @@ class MacroAssembler : public TurboAssembler {
inline void SmiTag(Register dst, Register src);
inline void SmiTag(Register smi);
- inline void SmiUntagToDouble(VRegister dst, Register src,
- UntagMode mode = kNotSpeculativeUntag);
- inline void SmiUntagToFloat(VRegister dst, Register src,
- UntagMode mode = kNotSpeculativeUntag);
+ inline void SmiUntagToDouble(VRegister dst, Register src);
+ inline void SmiUntagToFloat(VRegister dst, Register src);
// Tag and push in one step.
inline void SmiTagAndPush(Register src);
@@ -1794,12 +1801,6 @@ class MacroAssembler : public TurboAssembler {
// ---- String Utilities ----
- // Checks if both instance types are sequential one-byte strings and jumps to
- // label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialOneByte(
- Register first_object_instance_type, Register second_object_instance_type,
- Register scratch1, Register scratch2, Label* failure);
-
void JumpIfNotUniqueNameInstanceType(Register type, Label* not_unique_name);
// ---- Calling / Jumping helpers ----
@@ -1863,38 +1864,6 @@ class MacroAssembler : public TurboAssembler {
// Frame restart support
void MaybeDropFrames();
- // Exception handling
-
- // Push a new stack handler and link into stack handler chain.
- void PushStackHandler();
-
- // Unlink the stack handler on top of the stack from the stack handler chain.
- // Must preserve the result register.
- void PopStackHandler();
-
-
- // ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space or old space. The object_size is
- // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. The allocated object is returned in result.
- //
- // If the new space is exhausted control continues at the gc_required label.
- // In this case, the result and scratch registers may still be clobbered.
- void Allocate(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
-
- // Allocate and initialize a JSValue wrapper with the specified {constructor}
- // and {value}.
- void AllocateJSValue(Register result, Register constructor, Register value,
- Register scratch1, Register scratch2,
- Label* gc_required);
-
// ---------------------------------------------------------------------------
// Support functions.
@@ -1930,12 +1899,6 @@ class MacroAssembler : public TurboAssembler {
Label* if_cond_pass,
Condition cond = eq);
- void JumpIfNotObjectType(Register object,
- Register map,
- Register type_reg,
- InstanceType type,
- Label* if_not_object);
-
// Compare instance type in a map. map contains a valid map object whose
// object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@@ -1943,43 +1906,6 @@ class MacroAssembler : public TurboAssembler {
Register type_reg,
InstanceType type);
- // Compare an object's map with the specified map. Condition flags are set
- // with result of map compare.
- void CompareObjectMap(Register obj, Heap::RootListIndex index);
-
- // Compare an object's map with the specified map. Condition flags are set
- // with result of map compare.
- void CompareObjectMap(Register obj, Register scratch, Handle<Map> map);
-
- // As above, but the map of the object is already loaded into the register
- // which is preserved by the code generated.
- void CompareMap(Register obj_map,
- Handle<Map> map);
-
- // Check if the map of an object is equal to a specified map and branch to
- // label if not. Skip the smi check if not required (object is known to be a
- // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type);
-
-
- void CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- SmiCheckType smi_check_type);
-
- // As above, but the map of the object is already loaded into obj_map, and is
- // preserved.
- void CheckMap(Register obj_map,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type);
-
void GetWeakValue(Register value, Handle<WeakCell> cell);
// Load the value of the weak cell in the value register. Branch to the given
@@ -1990,9 +1916,6 @@ class MacroAssembler : public TurboAssembler {
// register.
void LoadElementsKindFromMap(Register result, Register map);
- // Load the value from the root list and push it onto the stack.
- void PushRoot(Heap::RootListIndex index);
-
// Compare the object in a register to a value from the root list.
void CompareRoot(const Register& obj, Heap::RootListIndex index);
@@ -2026,9 +1949,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Frames.
- void EnterBuiltinFrame(Register context, Register target, Register argc);
- void LeaveBuiltinFrame(Register context, Register target, Register argc);
-
// The stack pointer has to switch between csp and jssp when setting up and
// destroying the exit frame. Hence preserving/restoring the registers is
// slightly more complicated than simple push/pop operations.
@@ -2077,11 +1997,6 @@ class MacroAssembler : public TurboAssembler {
const Register& scratch,
bool restore_context);
- // Load the global object from the current context.
- void LoadGlobalObject(Register dst) {
- LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
- }
-
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
@@ -2098,19 +2013,12 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Garbage collector support (GC).
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
-
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch1,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
+ Register addr, Register scratch1,
+ SaveFPRegsMode save_fp);
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
@@ -2138,61 +2046,19 @@ class MacroAssembler : public TurboAssembler {
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
-
- // As above, but the offset has the tag presubtracted. For use with
- // MemOperand(reg, off).
- inline void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
+ Register object, int offset, Register value, Register scratch,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- lr_status,
- save_fp,
- remembered_set_action,
- smi_check,
- pointers_to_here_check_for_value);
- }
-
- void RecordWriteForMap(
- Register object,
- Register map,
- Register dst,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp);
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
void RecordWrite(
- Register object,
- Register address,
- Register value,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
+ Register object, Register address, Register value,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// Checks the color of an object. If the object is white we jump to the
// incremental marker.
@@ -2240,12 +2106,6 @@ class MacroAssembler : public TurboAssembler {
void LoadNativeContextSlot(int index, Register dst);
- // Load the initial map from the global function. The registers function and
- // map can be the same, function is then overwritten.
- void LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch);
-
// Like printf, but print at run-time from generated code.
//
// The caller must ensure that arguments for floating-point placeholders
@@ -2281,11 +2141,6 @@ class MacroAssembler : public TurboAssembler {
const CPURegister& arg2 = NoCPUReg,
const CPURegister& arg3 = NoCPUReg);
- // Return true if the sequence is a young sequence geneated by
- // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
- // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
- static bool IsYoungSequence(Isolate* isolate, byte* sequence);
-
private:
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index 231f4efd98..9881bae26b 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -231,8 +231,8 @@ void Simulator::CheckPCSComplianceAndRun() {
isolate_->stack_guard()->AdjustStackLimitForSimulator();
#ifdef DEBUG
- CHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count());
- CHECK_EQ(kNumberOfCalleeSavedVRegisters, kCalleeSavedV.Count());
+ DCHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count());
+ DCHECK_EQ(kNumberOfCalleeSavedVRegisters, kCalleeSavedV.Count());
int64_t saved_registers[kNumberOfCalleeSavedRegisters];
uint64_t saved_fpregisters[kNumberOfCalleeSavedVRegisters];
@@ -254,12 +254,12 @@ void Simulator::CheckPCSComplianceAndRun() {
// Start the simulation!
Run();
#ifdef DEBUG
- CHECK_EQ(original_stack, sp());
+ DCHECK_EQ(original_stack, sp());
// Check that callee-saved registers have been preserved.
register_list = kCalleeSaved;
fpregister_list = kCalleeSavedV;
for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
- CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
+ DCHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
}
for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) {
DCHECK(saved_fpregisters[i] ==
@@ -549,11 +549,6 @@ typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0, int64_t arg1,
int64_t arg6, int64_t arg7,
int64_t arg8);
-typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int64_t arg0, int64_t arg1,
- int64_t arg2, int64_t arg3,
- int64_t arg4, int64_t arg5,
- int64_t arg6, int64_t arg7);
-
typedef int64_t (*SimulatorRuntimeCompareCall)(double arg1, double arg2);
typedef double (*SimulatorRuntimeFPFPCall)(double arg1, double arg2);
typedef double (*SimulatorRuntimeFPCall)(double arg1);
@@ -643,38 +638,6 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
break;
}
- case ExternalReference::BUILTIN_CALL_TRIPLE: {
- // ObjectTriple f(v8::internal::Arguments).
- TraceSim("Type: BUILTIN_CALL TRIPLE\n");
- SimulatorRuntimeTripleCall target =
- reinterpret_cast<SimulatorRuntimeTripleCall>(external);
-
- // We don't know how many arguments are being passed, but we can
- // pass 8 without touching the stack. They will be ignored by the
- // host function if they aren't used.
- TraceSim(
- "Arguments: "
- "0x%016" PRIx64 ", 0x%016" PRIx64
- ", "
- "0x%016" PRIx64 ", 0x%016" PRIx64
- ", "
- "0x%016" PRIx64 ", 0x%016" PRIx64
- ", "
- "0x%016" PRIx64 ", 0x%016" PRIx64,
- arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
- // Return location passed in x8.
- ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(xreg(8));
- ObjectTriple result =
- target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
- TraceSim("Returned: {%p, %p, %p}\n", static_cast<void*>(result.x),
- static_cast<void*>(result.y), static_cast<void*>(result.z));
-#ifdef DEBUG
- CorruptAllCallerSavedCPURegisters();
-#endif
- *sim_result = result;
- break;
- }
-
case ExternalReference::DIRECT_API_CALL: {
// void f(v8::FunctionCallbackInfo&)
TraceSim("Type: DIRECT_API_CALL\n");
diff --git a/deps/v8/src/arm64/utils-arm64.cc b/deps/v8/src/arm64/utils-arm64.cc
index 38ec8478fc..26369d9875 100644
--- a/deps/v8/src/arm64/utils-arm64.cc
+++ b/deps/v8/src/arm64/utils-arm64.cc
@@ -132,6 +132,7 @@ int MaskToBit(uint64_t mask) {
return CountTrailingZeros(mask, 64);
}
+#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index 2017bf6f3e..97da2c2af2 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -21,11 +21,11 @@
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/scanner.h"
+#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module-builder.h"
-#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
namespace v8 {
@@ -248,8 +248,7 @@ CompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
allow_deref.emplace();
DCHECK(!compilation_info()->isolate()->has_pending_exception());
- ReportCompilationFailure(compilation_info()->script(),
- parser.failure_location(),
+ ReportCompilationFailure(parse_info()->script(), parser.failure_location(),
parser.failure_message());
return FAILED;
}
@@ -268,6 +267,21 @@ CompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
->asm_wasm_translation_peak_memory_bytes()
->AddSample(static_cast<int>(translate_zone_size));
translate_time_ = translate_timer.Elapsed().InMillisecondsF();
+ int module_size = compilation_info()->literal()->end_position() -
+ compilation_info()->literal()->start_position();
+ compilation_info()->isolate()->counters()->asm_module_size_bytes()->AddSample(
+ module_size);
+ int64_t translate_time_micro = translate_timer.Elapsed().InMicroseconds();
+ int translation_throughput =
+ translate_time_micro != 0
+ ? static_cast<int>(static_cast<int64_t>(module_size) /
+ translate_time_micro)
+ : 0;
+ compilation_info()
+ ->isolate()
+ ->counters()
+ ->asm_wasm_translation_throughput()
+ ->AddSample(translation_throughput);
if (FLAG_trace_asm_parser) {
PrintF(
"[asm.js translation successful: time=%0.3fms, "
@@ -291,7 +305,7 @@ CompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl() {
SyncCompileTranslatedAsmJs(
compilation_info()->isolate(), &thrower,
wasm::ModuleWireBytes(module_->begin(), module_->end()),
- compilation_info()->script(),
+ parse_info()->script(),
Vector<const byte>(asm_offsets_->begin(), asm_offsets_->size()))
.ToHandleChecked();
DCHECK(!thrower.error());
@@ -307,7 +321,7 @@ CompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl() {
compilation_info()->SetCode(
BUILTIN_CODE(compilation_info()->isolate(), InstantiateAsmJs));
- ReportCompilationSuccess(compilation_info()->script(),
+ ReportCompilationSuccess(parse_info()->script(),
compilation_info()->literal()->position(),
translate_time_, compile_time_, module_->size());
return SUCCEEDED;
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index b5d852fbe4..d0eb1050f6 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -12,8 +12,9 @@
#include "src/asmjs/asm-js.h"
#include "src/asmjs/asm-types.h"
#include "src/base/optional.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker.
+#include "src/flags.h"
#include "src/parsing/scanner.h"
+#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
@@ -103,13 +104,15 @@ void AsmJsParser::InitializeStdlibTypes() {
stdlib_dqdq2d_->AsFunctionType()->AddArgument(dq);
auto* f = AsmType::Float();
+ auto* fh = AsmType::Floatish();
auto* fq = AsmType::FloatQ();
- stdlib_fq2f_ = AsmType::Function(zone(), f);
- stdlib_fq2f_->AsFunctionType()->AddArgument(fq);
+ auto* fq2fh = AsmType::Function(zone(), fh);
+ fq2fh->AsFunctionType()->AddArgument(fq);
auto* s = AsmType::Signed();
- auto* s2s = AsmType::Function(zone(), s);
- s2s->AsFunctionType()->AddArgument(s);
+ auto* u = AsmType::Unsigned();
+ auto* s2u = AsmType::Function(zone(), u);
+ s2u->AsFunctionType()->AddArgument(s);
auto* i = AsmType::Int();
stdlib_i2s_ = AsmType::Function(zone_, s);
@@ -119,24 +122,36 @@ void AsmJsParser::InitializeStdlibTypes() {
stdlib_ii2s_->AsFunctionType()->AddArgument(i);
stdlib_ii2s_->AsFunctionType()->AddArgument(i);
+ // The signatures in "9 Standard Library" of the spec draft are outdated and
+ // have been superseded with the following by an errata:
+ // - Math.min/max : (signed, signed...) -> signed
+ // (double, double...) -> double
+ // (float, float...) -> float
auto* minmax_d = AsmType::MinMaxType(zone(), d, d);
- // *VIOLATION* The float variant is not part of the spec, but firefox accepts
- // it.
auto* minmax_f = AsmType::MinMaxType(zone(), f, f);
- auto* minmax_i = AsmType::MinMaxType(zone(), s, i);
+ auto* minmax_s = AsmType::MinMaxType(zone(), s, s);
stdlib_minmax_ = AsmType::OverloadedFunction(zone());
- stdlib_minmax_->AsOverloadedFunctionType()->AddOverload(minmax_i);
+ stdlib_minmax_->AsOverloadedFunctionType()->AddOverload(minmax_s);
stdlib_minmax_->AsOverloadedFunctionType()->AddOverload(minmax_f);
stdlib_minmax_->AsOverloadedFunctionType()->AddOverload(minmax_d);
+ // The signatures in "9 Standard Library" of the spec draft are outdated and
+ // have been superseded with the following by an errata:
+ // - Math.abs : (signed) -> unsigned
+ // (double?) -> double
+ // (float?) -> floatish
stdlib_abs_ = AsmType::OverloadedFunction(zone());
- stdlib_abs_->AsOverloadedFunctionType()->AddOverload(s2s);
+ stdlib_abs_->AsOverloadedFunctionType()->AddOverload(s2u);
stdlib_abs_->AsOverloadedFunctionType()->AddOverload(stdlib_dq2d_);
- stdlib_abs_->AsOverloadedFunctionType()->AddOverload(stdlib_fq2f_);
+ stdlib_abs_->AsOverloadedFunctionType()->AddOverload(fq2fh);
+ // The signatures in "9 Standard Library" of the spec draft are outdated and
+ // have been superseded with the following by an errata:
+ // - Math.ceil/floor/sqrt : (double?) -> double
+ // (float?) -> floatish
stdlib_ceil_like_ = AsmType::OverloadedFunction(zone());
stdlib_ceil_like_->AsOverloadedFunctionType()->AddOverload(stdlib_dq2d_);
- stdlib_ceil_like_->AsOverloadedFunctionType()->AddOverload(stdlib_fq2f_);
+ stdlib_ceil_like_->AsOverloadedFunctionType()->AddOverload(fq2fh);
stdlib_fround_ = AsmType::FroundType(zone());
}
@@ -767,6 +782,11 @@ void AsmJsParser::ValidateFunction() {
current_function_builder_->AddLocal(kWasmI32);
}
+ // Check against limit on number of local variables.
+ if (locals.size() + function_temp_locals_used_ > kV8MaxWasmFunctionLocals) {
+ FAIL("Number of local variables exceeds internal limit");
+ }
+
// End function
current_function_builder_->Emit(kExprEnd);
@@ -852,6 +872,7 @@ void AsmJsParser::ValidateFunctionParams(ZoneVector<AsmType*>* params) {
// 6.4 ValidateFunction - locals
void AsmJsParser::ValidateFunctionLocals(size_t param_count,
ZoneVector<ValueType>* locals) {
+ DCHECK(locals->empty());
// Local Variables.
while (Peek(TOK(var))) {
scanner_.EnterLocalScope();
@@ -2200,12 +2221,18 @@ AsmType* AsmJsParser::ValidateCall() {
} else if (callable->CanBeInvokedWith(AsmType::Float(),
param_specific_types)) {
return_type = AsmType::Float();
+ } else if (callable->CanBeInvokedWith(AsmType::Floatish(),
+ param_specific_types)) {
+ return_type = AsmType::Floatish();
} else if (callable->CanBeInvokedWith(AsmType::Double(),
param_specific_types)) {
return_type = AsmType::Double();
} else if (callable->CanBeInvokedWith(AsmType::Signed(),
param_specific_types)) {
return_type = AsmType::Signed();
+ } else if (callable->CanBeInvokedWith(AsmType::Unsigned(),
+ param_specific_types)) {
+ return_type = AsmType::Unsigned();
} else {
FAILn("Function use doesn't match definition");
}
@@ -2248,7 +2275,7 @@ AsmType* AsmJsParser::ValidateCall() {
current_function_builder_->Emit(kExprF32Max);
}
}
- } else if (param_specific_types[0]->IsA(AsmType::Int())) {
+ } else if (param_specific_types[0]->IsA(AsmType::Signed())) {
TemporaryVariableScope tmp_x(this);
TemporaryVariableScope tmp_y(this);
for (size_t i = 1; i < param_specific_types.size(); ++i) {
@@ -2275,14 +2302,13 @@ AsmType* AsmJsParser::ValidateCall() {
if (param_specific_types[0]->IsA(AsmType::Signed())) {
TemporaryVariableScope tmp(this);
current_function_builder_->EmitTeeLocal(tmp.get());
- current_function_builder_->Emit(kExprI32Clz);
- current_function_builder_->EmitWithU8(kExprIf, kLocalI32);
current_function_builder_->EmitGetLocal(tmp.get());
- current_function_builder_->Emit(kExprElse);
- current_function_builder_->EmitI32Const(0);
+ current_function_builder_->EmitI32Const(31);
+ current_function_builder_->Emit(kExprI32ShrS);
+ current_function_builder_->EmitTeeLocal(tmp.get());
+ current_function_builder_->Emit(kExprI32Xor);
current_function_builder_->EmitGetLocal(tmp.get());
current_function_builder_->Emit(kExprI32Sub);
- current_function_builder_->Emit(kExprEnd);
} else if (param_specific_types[0]->IsA(AsmType::DoubleQ())) {
current_function_builder_->Emit(kExprF64Abs);
} else if (param_specific_types[0]->IsA(AsmType::FloatQ())) {
@@ -2293,12 +2319,9 @@ AsmType* AsmJsParser::ValidateCall() {
break;
case VarKind::kMathFround:
- if (param_specific_types[0]->IsA(AsmType::DoubleQ())) {
- current_function_builder_->Emit(kExprF32ConvertF64);
- } else {
- DCHECK(param_specific_types[0]->IsA(AsmType::FloatQ()));
- }
- break;
+ // NOTE: Handled in {AsmJsParser::CallExpression} specially and treated
+ // as a coercion to "float" type. Cannot be reached as a call here.
+ UNREACHABLE();
default:
UNREACHABLE();
diff --git a/deps/v8/src/asmjs/asm-parser.h b/deps/v8/src/asmjs/asm-parser.h
index 1b0539ee38..bddb8c62e9 100644
--- a/deps/v8/src/asmjs/asm-parser.h
+++ b/deps/v8/src/asmjs/asm-parser.h
@@ -183,7 +183,6 @@ class AsmJsParser {
// Types used for stdlib function and their set up.
AsmType* stdlib_dq2d_;
AsmType* stdlib_dqdq2d_;
- AsmType* stdlib_fq2f_;
AsmType* stdlib_i2s_;
AsmType* stdlib_ii2s_;
AsmType* stdlib_minmax_;
diff --git a/deps/v8/src/asmjs/asm-scanner.cc b/deps/v8/src/asmjs/asm-scanner.cc
index 5d2c8b1fd1..fe9cabf9d6 100644
--- a/deps/v8/src/asmjs/asm-scanner.cc
+++ b/deps/v8/src/asmjs/asm-scanner.cc
@@ -203,6 +203,7 @@ std::string AsmJsScanner::Name(token_t token) const {
SPECIAL_TOKEN_LIST(V)
default:
break;
+#undef V
}
UNREACHABLE();
}
diff --git a/deps/v8/src/asmjs/switch-logic.cc b/deps/v8/src/asmjs/switch-logic.cc
index 93544da9b4..e12b3a33f1 100644
--- a/deps/v8/src/asmjs/switch-logic.cc
+++ b/deps/v8/src/asmjs/switch-logic.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
+
#include "src/asmjs/switch-logic.h"
namespace v8 {
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 35238081f9..b36c494129 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -52,6 +52,7 @@
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/disassembler.h"
+#include "src/elements.h"
#include "src/execution.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
@@ -300,14 +301,10 @@ const int kLastChunkTagBits = 1;
const int kLastChunkTagMask = 1;
const int kLastChunkTag = 1;
-void RelocInfo::update_wasm_memory_reference(
- Isolate* isolate, Address old_base, Address new_base,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmMemoryReference(rmode_));
- Address updated_reference = new_base + (wasm_memory_reference() - old_base);
- // The reference is not checked here but at runtime. Validity of references
- // may change over time.
- set_embedded_address(isolate, updated_reference, icache_flush_mode);
+void RelocInfo::set_wasm_context_reference(Isolate* isolate, Address address,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmContextReference(rmode_));
+ set_embedded_address(isolate, address, icache_flush_mode);
}
void RelocInfo::set_global_handle(Isolate* isolate, Address address,
@@ -321,16 +318,6 @@ Address RelocInfo::global_handle() const {
return embedded_address();
}
-void RelocInfo::update_wasm_memory_size(Isolate* isolate, uint32_t old_size,
- uint32_t new_size,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmMemorySizeReference(rmode_));
- uint32_t current_size_reference = wasm_memory_size_reference();
- uint32_t updated_size_reference =
- new_size + (current_size_reference - old_size);
- set_embedded_size(isolate, updated_size_reference, icache_flush_mode);
-}
-
void RelocInfo::update_wasm_global_reference(
Isolate* isolate, Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode) {
@@ -352,13 +339,8 @@ uint32_t RelocInfo::wasm_function_table_size_reference() const {
return embedded_size();
}
-uint32_t RelocInfo::wasm_memory_size_reference() const {
- DCHECK(IsWasmMemorySizeReference(rmode_));
- return embedded_size();
-}
-
-Address RelocInfo::wasm_memory_reference() const {
- DCHECK(IsWasmMemoryReference(rmode_));
+Address RelocInfo::wasm_context_reference() const {
+ DCHECK(IsWasmContextReference(rmode_));
return embedded_address();
}
@@ -659,10 +641,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "constant pool";
case VENEER_POOL:
return "veneer pool";
- case WASM_MEMORY_REFERENCE:
- return "wasm memory reference";
- case WASM_MEMORY_SIZE_REFERENCE:
- return "wasm memory size reference";
+ case WASM_CONTEXT_REFERENCE:
+ return "wasm context reference";
case WASM_GLOBAL_REFERENCE:
return "wasm global value reference";
case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
@@ -750,8 +730,7 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEOPT_ID:
case CONST_POOL:
case VENEER_POOL:
- case WASM_MEMORY_REFERENCE:
- case WASM_MEMORY_SIZE_REFERENCE:
+ case WASM_CONTEXT_REFERENCE:
case WASM_GLOBAL_REFERENCE:
case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
case WASM_GLOBAL_HANDLE:
@@ -776,8 +755,6 @@ static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
return ExternalReference::BUILTIN_CALL;
case 2:
return ExternalReference::BUILTIN_CALL_PAIR;
- case 3:
- return ExternalReference::BUILTIN_CALL_TRIPLE;
}
UNREACHABLE();
}
@@ -813,6 +790,10 @@ ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
return ExternalReference(isolate);
}
+ExternalReference ExternalReference::builtins_address(Isolate* isolate) {
+ return ExternalReference(isolate->builtins()->builtins_table_address());
+}
+
ExternalReference ExternalReference::interpreter_dispatch_table_address(
Isolate* isolate) {
return ExternalReference(isolate->interpreter()->dispatch_table_address());
@@ -1033,6 +1014,18 @@ ExternalReference ExternalReference::wasm_float64_pow(Isolate* isolate) {
Redirect(isolate, FUNCTION_ADDR(wasm::float64_pow_wrapper)));
}
+ExternalReference ExternalReference::wasm_set_thread_in_wasm_flag(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::set_thread_in_wasm_flag)));
+}
+
+ExternalReference ExternalReference::wasm_clear_thread_in_wasm_flag(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::clear_thread_in_wasm_flag)));
+}
+
static void f64_mod_wrapper(double* param0, double* param1) {
WriteDoubleValue(param0,
modulo(ReadDoubleValue(param0), ReadDoubleValue(param1)));
@@ -1422,6 +1415,19 @@ ExternalReference ExternalReference::get_or_create_hash_raw(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
}
+ExternalReference
+ExternalReference::copy_fast_number_jsarray_elements_to_typed_array(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(CopyFastNumberJSArrayElementsToTypedArray)));
+}
+
+ExternalReference ExternalReference::copy_typed_array_elements_to_typed_array(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(CopyTypedArrayElementsToTypedArray)));
+}
+
ExternalReference ExternalReference::try_internalize_string_function(
Isolate* isolate) {
return ExternalReference(Redirect(
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index aeecaa167c..2ebe88d534 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -361,11 +361,10 @@ class RelocInfo {
CODE_TARGET,
EMBEDDED_OBJECT,
// Wasm entries are to relocate pointers into the wasm memory embedded in
- // wasm code. Everything after WASM_MEMORY_REFERENCE (inclusive) is not
+ // wasm code. Everything after WASM_CONTEXT_REFERENCE (inclusive) is not
// GC'ed.
- WASM_MEMORY_REFERENCE,
+ WASM_CONTEXT_REFERENCE,
WASM_GLOBAL_REFERENCE,
- WASM_MEMORY_SIZE_REFERENCE,
WASM_FUNCTION_TABLE_SIZE_REFERENCE,
WASM_PROTECTED_INSTRUCTION_LANDING,
WASM_GLOBAL_HANDLE,
@@ -395,8 +394,8 @@ class RelocInfo {
// Pseudo-types
NUMBER_OF_MODES,
- NONE32, // never recorded 32-bit value
- NONE64, // never recorded 64-bit value
+ NONE32, // never recorded 32-bit value
+ NONE64, // never recorded 64-bit value
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
@@ -458,11 +457,8 @@ class RelocInfo {
static inline bool IsNone(Mode mode) {
return mode == NONE32 || mode == NONE64;
}
- static inline bool IsWasmMemoryReference(Mode mode) {
- return mode == WASM_MEMORY_REFERENCE;
- }
- static inline bool IsWasmMemorySizeReference(Mode mode) {
- return mode == WASM_MEMORY_SIZE_REFERENCE;
+ static inline bool IsWasmContextReference(Mode mode) {
+ return mode == WASM_CONTEXT_REFERENCE;
}
static inline bool IsWasmGlobalReference(Mode mode) {
return mode == WASM_GLOBAL_REFERENCE;
@@ -474,11 +470,10 @@ class RelocInfo {
return IsWasmPtrReference(mode) || IsWasmSizeReference(mode);
}
static inline bool IsWasmSizeReference(Mode mode) {
- return mode == WASM_MEMORY_SIZE_REFERENCE ||
- mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
+ return IsWasmFunctionTableSizeReference(mode);
}
static inline bool IsWasmPtrReference(Mode mode) {
- return mode == WASM_MEMORY_REFERENCE || mode == WASM_GLOBAL_REFERENCE ||
+ return mode == WASM_CONTEXT_REFERENCE || mode == WASM_GLOBAL_REFERENCE ||
mode == WASM_GLOBAL_HANDLE;
}
static inline bool IsWasmProtectedLanding(Mode mode) {
@@ -510,17 +505,14 @@ class RelocInfo {
// constant pool, otherwise the pointer is embedded in the instruction stream.
bool IsInConstantPool();
- Address wasm_memory_reference() const;
+ Address wasm_context_reference() const;
Address wasm_global_reference() const;
uint32_t wasm_function_table_size_reference() const;
uint32_t wasm_memory_size_reference() const;
Address global_handle() const;
- void update_wasm_memory_reference(
- Isolate* isolate, Address old_base, Address new_base,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- void update_wasm_memory_size(
- Isolate* isolate, uint32_t old_size, uint32_t new_size,
+ void set_wasm_context_reference(
+ Isolate* isolate, Address address,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_global_reference(
Isolate* isolate, Address old_base, Address new_base,
@@ -767,10 +759,6 @@ class ExternalReference BASE_EMBEDDED {
// ObjectPair f(v8::internal::Arguments).
BUILTIN_CALL_PAIR,
- // Builtin call that returns .
- // ObjectTriple f(v8::internal::Arguments).
- BUILTIN_CALL_TRIPLE,
-
// Builtin that takes float arguments and returns an int.
// int f(double, double).
BUILTIN_COMPARE_CALL,
@@ -830,6 +818,9 @@ class ExternalReference BASE_EMBEDDED {
// Isolate as an external reference.
static ExternalReference isolate_address(Isolate* isolate);
+ // The builtins table as an external reference, used by lazy deserialization.
+ static ExternalReference builtins_address(Isolate* isolate);
+
// One-of-a-kind references. These references are not part of a general
// pattern. This means that they have to be added to the
// ExternalReferenceTable in serialize.cc manually.
@@ -876,6 +867,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference wasm_word32_popcnt(Isolate* isolate);
static ExternalReference wasm_word64_popcnt(Isolate* isolate);
static ExternalReference wasm_float64_pow(Isolate* isolate);
+ static ExternalReference wasm_set_thread_in_wasm_flag(Isolate* isolate);
+ static ExternalReference wasm_clear_thread_in_wasm_flag(Isolate* isolate);
static ExternalReference f64_acos_wrapper_function(Isolate* isolate);
static ExternalReference f64_asin_wrapper_function(Isolate* isolate);
@@ -992,6 +985,11 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference get_or_create_hash_raw(Isolate* isolate);
+ static ExternalReference copy_fast_number_jsarray_elements_to_typed_array(
+ Isolate* isolate);
+ static ExternalReference copy_typed_array_elements_to_typed_array(
+ Isolate* isolate);
+
static ExternalReference page_flags(Page* page);
static ExternalReference ForDeoptEntry(Address entry);
@@ -1251,6 +1249,57 @@ class HeapObjectRequest {
int offset_;
};
+// Base type for CPU Registers.
+//
+// 1) We would prefer to use an enum for registers, but enum values are
+// assignment-compatible with int, which has caused code-generation bugs.
+//
+// 2) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the class in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+template <typename SubType, int kAfterLastRegister>
+class RegisterBase {
+ public:
+ static constexpr int kCode_no_reg = -1;
+ static constexpr int kNumRegisters = kAfterLastRegister;
+
+ static constexpr SubType no_reg() { return SubType{kCode_no_reg}; }
+
+ template <int code>
+ static constexpr SubType from_code() {
+ static_assert(code >= 0 && code < kNumRegisters, "must be valid reg code");
+ return SubType{code};
+ }
+
+ static SubType from_code(int code) {
+ DCHECK_LE(0, code);
+ DCHECK_GT(kNumRegisters, code);
+ return SubType{code};
+ }
+
+ bool is_valid() const { return reg_code_ != kCode_no_reg; }
+
+ int code() const {
+ DCHECK(is_valid());
+ return reg_code_;
+ }
+
+ int bit() const { return 1 << code(); }
+
+ inline bool operator==(SubType other) const {
+ return reg_code_ == other.reg_code_;
+ }
+ inline bool operator!=(SubType other) const { return !(*this == other); }
+
+ protected:
+ explicit constexpr RegisterBase(int code) : reg_code_(code) {}
+ int reg_code_;
+};
+
} // namespace internal
} // namespace v8
#endif // V8_ASSEMBLER_H_
diff --git a/deps/v8/src/ast/ast-expression-rewriter.cc b/deps/v8/src/ast/ast-expression-rewriter.cc
index 5a446442fd..02a4408a60 100644
--- a/deps/v8/src/ast/ast-expression-rewriter.cc
+++ b/deps/v8/src/ast/ast-expression-rewriter.cc
@@ -116,9 +116,11 @@ void AstExpressionRewriter::VisitWithStatement(WithStatement* node) {
void AstExpressionRewriter::VisitSwitchStatement(SwitchStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, tag);
- ZoneList<CaseClause*>* clauses = node->cases();
- for (int i = 0; i < clauses->length(); i++) {
- AST_REWRITE_LIST_ELEMENT(CaseClause, clauses, i);
+ for (CaseClause* clause : *node->cases()) {
+ if (!clause->is_default()) {
+ AST_REWRITE_PROPERTY(Expression, clause, label);
+ }
+ VisitStatements(clause->statements());
}
}
@@ -372,14 +374,6 @@ void AstExpressionRewriter::VisitSuperCallReference(SuperCallReference* node) {
}
-void AstExpressionRewriter::VisitCaseClause(CaseClause* node) {
- if (!node->is_default()) {
- AST_REWRITE_PROPERTY(Expression, node, label);
- }
- VisitStatements(node->statements());
-}
-
-
void AstExpressionRewriter::VisitEmptyParentheses(EmptyParentheses* node) {
NOTHING();
}
@@ -388,6 +382,10 @@ void AstExpressionRewriter::VisitGetIterator(GetIterator* node) {
AST_REWRITE_PROPERTY(Expression, node, iterable);
}
+void AstExpressionRewriter::VisitGetTemplateObject(GetTemplateObject* node) {
+ NOTHING();
+}
+
void AstExpressionRewriter::VisitImportCallExpression(
ImportCallExpression* node) {
REWRITE_THIS(node);
diff --git a/deps/v8/src/ast/ast-numbering.cc b/deps/v8/src/ast/ast-numbering.cc
index dc8cbd97a2..3df7aae861 100644
--- a/deps/v8/src/ast/ast-numbering.cc
+++ b/deps/v8/src/ast/ast-numbering.cc
@@ -19,7 +19,6 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
bool collect_type_profile = false)
: zone_(zone),
eager_literals_(eager_literals),
- next_id_(BailoutId::FirstUsable().ToInt()),
suspend_count_(0),
properties_(zone),
language_mode_(SLOPPY),
@@ -49,12 +48,6 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
void VisitArguments(ZoneList<Expression*>* arguments);
void VisitLiteralProperty(LiteralProperty* property);
- int ReserveId() {
- int tmp = next_id_;
- next_id_ += 1;
- return tmp;
- }
-
void DisableOptimization(BailoutReason reason) {
dont_optimize_reason_ = reason;
}
@@ -84,7 +77,6 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
Zone* zone_;
Compiler::EagerInnerFunctionLiterals* eager_literals_;
- int next_id_;
int suspend_count_;
AstProperties properties_;
LanguageMode language_mode_;
@@ -221,6 +213,7 @@ void AstNumberingVisitor::VisitUnaryOperation(UnaryOperation* node) {
} else {
Visit(node->expression());
}
+ ReserveFeedbackSlots(node);
}
@@ -265,7 +258,6 @@ void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
- node->set_osr_id(ReserveId());
node->set_first_suspend_id(suspend_count_);
Visit(node->body());
Visit(node->cond());
@@ -274,7 +266,6 @@ void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
- node->set_osr_id(ReserveId());
node->set_first_suspend_id(suspend_count_);
Visit(node->cond());
Visit(node->body());
@@ -354,13 +345,14 @@ void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
ReserveFeedbackSlots(node);
}
+void AstNumberingVisitor::VisitGetTemplateObject(GetTemplateObject* node) {}
+
void AstNumberingVisitor::VisitImportCallExpression(
ImportCallExpression* node) {
Visit(node->argument());
}
void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
- node->set_osr_id(ReserveId());
Visit(node->enumerable()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
Visit(node->each());
@@ -371,7 +363,6 @@ void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
- node->set_osr_id(ReserveId());
Visit(node->assign_iterator()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
Visit(node->next_result());
@@ -400,22 +391,15 @@ void AstNumberingVisitor::VisitIfStatement(IfStatement* node) {
void AstNumberingVisitor::VisitSwitchStatement(SwitchStatement* node) {
Visit(node->tag());
- ZoneList<CaseClause*>* cases = node->cases();
- for (int i = 0; i < cases->length(); i++) {
- VisitCaseClause(cases->at(i));
+ for (CaseClause* clause : *node->cases()) {
+ if (!clause->is_default()) Visit(clause->label());
+ VisitStatements(clause->statements());
+ ReserveFeedbackSlots(clause);
}
}
-void AstNumberingVisitor::VisitCaseClause(CaseClause* node) {
- if (!node->is_default()) Visit(node->label());
- VisitStatements(node->statements());
- ReserveFeedbackSlots(node);
-}
-
-
void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
- node->set_osr_id(ReserveId());
if (node->init() != NULL) Visit(node->init()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
if (node->cond() != NULL) Visit(node->cond());
@@ -429,9 +413,6 @@ void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
LanguageModeScope language_mode_scope(this, STRICT);
if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor());
- if (node->class_variable_proxy()) {
- VisitVariableProxy(node->class_variable_proxy());
- }
for (int i = 0; i < node->properties()->length(); i++) {
VisitLiteralProperty(node->properties()->at(i));
}
diff --git a/deps/v8/src/ast/ast-source-ranges.h b/deps/v8/src/ast/ast-source-ranges.h
index c915c5b99f..55554b1043 100644
--- a/deps/v8/src/ast/ast-source-ranges.h
+++ b/deps/v8/src/ast/ast-source-ranges.h
@@ -230,7 +230,7 @@ class SourceRangeMap final : public ZoneObject {
public:
explicit SourceRangeMap(Zone* zone) : map_(zone) {}
- AstNodeSourceRanges* Find(AstNode* node) {
+ AstNodeSourceRanges* Find(ZoneObject* node) {
auto it = map_.find(node);
if (it == map_.end()) return nullptr;
return it->second;
@@ -246,7 +246,7 @@ class SourceRangeMap final : public ZoneObject {
#undef DEFINE_MAP_INSERT
private:
- ZoneMap<AstNode*, AstNodeSourceRanges*> map_;
+ ZoneMap<ZoneObject*, AstNodeSourceRanges*> map_;
};
} // namespace internal
diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h
index 0f09ce831a..0fec89a58c 100644
--- a/deps/v8/src/ast/ast-traversal-visitor.h
+++ b/deps/v8/src/ast/ast-traversal-visitor.h
@@ -211,11 +211,6 @@ void AstTraversalVisitor<Subclass>::VisitSwitchStatement(
}
template <class Subclass>
-void AstTraversalVisitor<Subclass>::VisitCaseClause(CaseClause* clause) {
- UNREACHABLE();
-}
-
-template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitDoWhileStatement(
DoWhileStatement* stmt) {
PROCESS_NODE(stmt);
@@ -497,6 +492,12 @@ void AstTraversalVisitor<Subclass>::VisitGetIterator(GetIterator* expr) {
}
template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitGetTemplateObject(
+ GetTemplateObject* expr) {
+ PROCESS_EXPRESSION(expr);
+}
+
+template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitImportCallExpression(
ImportCallExpression* expr) {
PROCESS_EXPRESSION(expr);
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index c9c89d7745..b83ed4547e 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -168,6 +168,30 @@ void AstConsString::Internalize(Isolate* isolate) {
set_string(tmp);
}
+AstValue::AstValue(double n) : next_(nullptr) {
+ int int_value;
+ if (DoubleToSmiInteger(n, &int_value)) {
+ type_ = SMI;
+ smi_ = int_value;
+ } else {
+ type_ = NUMBER;
+ number_ = n;
+ }
+}
+
+bool AstValue::ToUint32(uint32_t* value) const {
+ if (IsSmi()) {
+ int num = smi_;
+ if (num < 0) return false;
+ *value = static_cast<uint32_t>(num);
+ return true;
+ }
+ if (IsHeapNumber()) {
+ return DoubleToUint32IfEqualToSelf(number_, value);
+ }
+ return false;
+}
+
bool AstValue::IsPropertyName() const {
if (type_ == STRING) {
uint32_t index;
@@ -242,6 +266,31 @@ void AstValue::Internalize(Isolate* isolate) {
}
}
+AstStringConstants::AstStringConstants(Isolate* isolate, uint32_t hash_seed)
+ : zone_(isolate->allocator(), ZONE_NAME),
+ string_table_(AstRawString::Compare),
+ hash_seed_(hash_seed) {
+ DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
+#define F(name, str) \
+ { \
+ const char* data = str; \
+ Vector<const uint8_t> literal(reinterpret_cast<const uint8_t*>(data), \
+ static_cast<int>(strlen(data))); \
+ uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>( \
+ literal.start(), literal.length(), hash_seed_); \
+ name##_string_ = new (&zone_) AstRawString(true, literal, hash_field); \
+ /* The Handle returned by the factory is located on the roots */ \
+ /* array, not on the temporary HandleScope, so this is safe. */ \
+ name##_string_->set_string(isolate->factory()->name##_string()); \
+ base::HashMap::Entry* entry = \
+ string_table_.InsertNew(name##_string_, name##_string_->Hash()); \
+ DCHECK_NULL(entry->value); \
+ entry->value = reinterpret_cast<void*>(1); \
+ }
+ AST_STRING_CONSTANTS(F)
+#undef F
+}
+
AstRawString* AstValueFactory::GetOneByteStringInternal(
Vector<const uint8_t> literal) {
if (literal.length() == 1 && IsInRange(literal[0], 'a', 'z')) {
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index b72e34a36c..e67c87b4c0 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -209,18 +209,7 @@ class AstValue : public ZoneObject {
return Smi::FromInt(smi_);
}
- bool ToUint32(uint32_t* value) const {
- if (IsSmi()) {
- int num = smi_;
- if (num < 0) return false;
- *value = static_cast<uint32_t>(num);
- return true;
- }
- if (IsHeapNumber()) {
- return DoubleToUint32IfEqualToSelf(number_, value);
- }
- return false;
- }
+ bool ToUint32(uint32_t* value) const;
bool EqualsString(const AstRawString* string) const {
return type_ == STRING && string_ == string;
@@ -274,16 +263,7 @@ class AstValue : public ZoneObject {
symbol_ = symbol;
}
- explicit AstValue(double n) : next_(nullptr) {
- int int_value;
- if (DoubleToSmiInteger(n, &int_value)) {
- type_ = SMI;
- smi_ = int_value;
- } else {
- type_ = NUMBER;
- number_ = n;
- }
- }
+ explicit AstValue(double n);
AstValue(Type t, int i) : type_(t), next_(nullptr) {
DCHECK(type_ == SMI);
@@ -316,7 +296,7 @@ class AstValue : public ZoneObject {
};
// For generating constants.
-#define STRING_CONSTANTS(F) \
+#define AST_STRING_CONSTANTS(F) \
F(anonymous_function, "(anonymous function)") \
F(arguments, "arguments") \
F(async, "async") \
@@ -361,34 +341,11 @@ class AstValue : public ZoneObject {
class AstStringConstants final {
public:
- AstStringConstants(Isolate* isolate, uint32_t hash_seed)
- : zone_(isolate->allocator(), ZONE_NAME),
- string_table_(AstRawString::Compare),
- hash_seed_(hash_seed) {
- DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
-#define F(name, str) \
- { \
- const char* data = str; \
- Vector<const uint8_t> literal(reinterpret_cast<const uint8_t*>(data), \
- static_cast<int>(strlen(data))); \
- uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>( \
- literal.start(), literal.length(), hash_seed_); \
- name##_string_ = new (&zone_) AstRawString(true, literal, hash_field); \
- /* The Handle returned by the factory is located on the roots */ \
- /* array, not on the temporary HandleScope, so this is safe. */ \
- name##_string_->set_string(isolate->factory()->name##_string()); \
- base::HashMap::Entry* entry = \
- string_table_.InsertNew(name##_string_, name##_string_->Hash()); \
- DCHECK_NULL(entry->value); \
- entry->value = reinterpret_cast<void*>(1); \
- }
- STRING_CONSTANTS(F)
-#undef F
- }
+ AstStringConstants(Isolate* isolate, uint32_t hash_seed);
#define F(name, str) \
const AstRawString* name##_string() const { return name##_string_; }
- STRING_CONSTANTS(F)
+ AST_STRING_CONSTANTS(F)
#undef F
uint32_t hash_seed() const { return hash_seed_; }
@@ -402,7 +359,7 @@ class AstStringConstants final {
uint32_t hash_seed_;
#define F(name, str) AstRawString* name##_string_;
- STRING_CONSTANTS(F)
+ AST_STRING_CONSTANTS(F)
#undef F
DISALLOW_COPY_AND_ASSIGN(AstStringConstants);
@@ -464,7 +421,7 @@ class AstValueFactory {
const AstRawString* name##_string() const { \
return string_constants_->name##_string(); \
}
- STRING_CONSTANTS(F)
+ AST_STRING_CONSTANTS(F)
#undef F
const AstConsString* empty_cons_string() const { return empty_cons_string_; }
@@ -544,7 +501,6 @@ class AstValueFactory {
} // namespace internal
} // namespace v8
-#undef STRING_CONSTANTS
#undef OTHER_CONSTANTS
#endif // V8_AST_AST_VALUE_FACTORY_H_
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 81ee8a200b..94abe81bda 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -139,11 +139,6 @@ bool Expression::IsValidReferenceExpression() const {
(IsVariableProxy() && AsVariableProxy()->IsValidReferenceExpression());
}
-bool Expression::IsValidReferenceExpressionOrThis() const {
- return IsValidReferenceExpression() ||
- (IsVariableProxy() && AsVariableProxy()->is_this());
-}
-
bool Expression::IsAnonymousFunctionDefinition() const {
return (IsFunctionLiteral() &&
AsFunctionLiteral()->IsAnonymousFunctionDefinition()) ||
@@ -249,15 +244,13 @@ void ForInStatement::AssignFeedbackSlots(FeedbackVectorSpec* spec,
FunctionKind kind,
FeedbackSlotCache* cache) {
AssignVectorSlots(each(), spec, language_mode, &each_slot_);
- for_in_feedback_slot_ = spec->AddGeneralSlot();
+ for_in_feedback_slot_ = spec->AddForInSlot();
}
Assignment::Assignment(NodeType node_type, Token::Value op, Expression* target,
Expression* value, int pos)
: Expression(pos, node_type), target_(target), value_(value) {
- bit_field_ |= IsUninitializedField::encode(false) |
- KeyTypeField::encode(ELEMENT) |
- StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op);
+ bit_field_ |= TokenField::encode(op);
}
void Assignment::AssignFeedbackSlots(FeedbackVectorSpec* spec,
@@ -290,6 +283,9 @@ bool FunctionLiteral::AllowsLazyCompilation() {
return scope()->AllowsLazyCompilation();
}
+Handle<String> FunctionLiteral::name(Isolate* isolate) const {
+ return raw_name_ ? raw_name_->string() : isolate->factory()->empty_string();
+}
int FunctionLiteral::start_position() const {
return scope()->start_position();
@@ -369,10 +365,6 @@ void ClassLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
home_object_slot_ = spec->AddStoreICSlot(language_mode);
}
- if (NeedsProxySlot()) {
- proxy_slot_ = spec->AddStoreICSlot(language_mode);
- }
-
for (int i = 0; i < properties()->length(); i++) {
ClassLiteral::Property* property = properties()->at(i);
Expression* value = property->value();
@@ -401,6 +393,9 @@ void ObjectLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FunctionKind kind,
FeedbackSlotCache* cache) {
+ // The empty object literal doesn't need any feedback vector slot.
+ if (this->IsEmptyObjectLiteral()) return;
+
MaterializedLiteral::AssignFeedbackSlots(spec, language_mode, kind, cache);
// This logic that computes the number of slots needed for vector store
@@ -642,7 +637,7 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
}
bool ObjectLiteral::IsFastCloningSupported() const {
- // The FastCloneShallowObject builtin doesn't copy elements, and object
+ // The CreateShallowObjectLiteratal builtin doesn't copy elements, and object
// literals don't support copy-on-write (COW) elements for now.
// TODO(mvstanton): make object literals support COW elements.
return fast_elements() && is_shallow() &&
@@ -650,6 +645,12 @@ bool ObjectLiteral::IsFastCloningSupported() const {
ConstructorBuiltins::kMaximumClonedShallowObjectProperties;
}
+bool ArrayLiteral::is_empty() const {
+ DCHECK(is_initialized());
+ return values()->is_empty() &&
+ (constant_elements().is_null() || constant_elements()->is_empty());
+}
+
int ArrayLiteral::InitDepthAndFlags() {
DCHECK_LT(first_spread_index_, 0);
if (is_initialized()) return depth();
@@ -822,6 +823,48 @@ void MaterializedLiteral::BuildConstants(Isolate* isolate) {
DCHECK(IsRegExpLiteral());
}
+Handle<TemplateObjectDescription> GetTemplateObject::GetOrBuildDescription(
+ Isolate* isolate) {
+ Handle<FixedArray> raw_strings =
+ isolate->factory()->NewFixedArray(this->raw_strings()->length(), TENURED);
+ bool raw_and_cooked_match = true;
+ for (int i = 0; i < raw_strings->length(); ++i) {
+ if (*this->raw_strings()->at(i)->value() !=
+ *this->cooked_strings()->at(i)->value()) {
+ raw_and_cooked_match = false;
+ }
+ raw_strings->set(i, *this->raw_strings()->at(i)->value());
+ }
+ Handle<FixedArray> cooked_strings = raw_strings;
+ if (!raw_and_cooked_match) {
+ cooked_strings = isolate->factory()->NewFixedArray(
+ this->cooked_strings()->length(), TENURED);
+ for (int i = 0; i < cooked_strings->length(); ++i) {
+ cooked_strings->set(i, *this->cooked_strings()->at(i)->value());
+ }
+ }
+ return isolate->factory()->NewTemplateObjectDescription(
+ this->hash(), raw_strings, cooked_strings);
+}
+
+void UnaryOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+ LanguageMode language_mode,
+ FunctionKind kind,
+ FeedbackSlotCache* cache) {
+ switch (op()) {
+ // Only unary plus, minus, and bitwise-not currently collect feedback.
+ case Token::ADD:
+ case Token::SUB:
+ case Token::BIT_NOT:
+ // Note that the slot kind remains "BinaryOp", as the operation
+ // is transformed into a binary operation in the BytecodeGenerator.
+ feedback_slot_ = spec->AddInterpreterBinaryOpICSlot();
+ return;
+ default:
+ return;
+ }
+}
+
void BinaryOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FunctionKind kind,
@@ -957,61 +1000,6 @@ bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
// ----------------------------------------------------------------------------
// Recording of type feedback
-Handle<Map> SmallMapList::at(int i) const { return Handle<Map>(list_.at(i)); }
-
-SmallMapList* Expression::GetReceiverTypes() {
- switch (node_type()) {
-#define NODE_LIST(V) \
- PROPERTY_NODE_LIST(V) \
- V(Call)
-#define GENERATE_CASE(Node) \
- case k##Node: \
- return static_cast<Node*>(this)->GetReceiverTypes();
- NODE_LIST(GENERATE_CASE)
-#undef NODE_LIST
-#undef GENERATE_CASE
- default:
- UNREACHABLE();
- }
-}
-
-KeyedAccessStoreMode Expression::GetStoreMode() const {
- switch (node_type()) {
-#define GENERATE_CASE(Node) \
- case k##Node: \
- return static_cast<const Node*>(this)->GetStoreMode();
- PROPERTY_NODE_LIST(GENERATE_CASE)
-#undef GENERATE_CASE
- default:
- UNREACHABLE();
- }
-}
-
-IcCheckType Expression::GetKeyType() const {
- switch (node_type()) {
-#define GENERATE_CASE(Node) \
- case k##Node: \
- return static_cast<const Node*>(this)->GetKeyType();
- PROPERTY_NODE_LIST(GENERATE_CASE)
-#undef GENERATE_CASE
- default:
- UNREACHABLE();
- }
-}
-
-bool Expression::IsMonomorphic() const {
- switch (node_type()) {
-#define GENERATE_CASE(Node) \
- case k##Node: \
- return static_cast<const Node*>(this)->IsMonomorphic();
- PROPERTY_NODE_LIST(GENERATE_CASE)
- CALL_NODE_LIST(GENERATE_CASE)
-#undef GENERATE_CASE
- default:
- UNREACHABLE();
- }
-}
-
void Call::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode, FunctionKind kind,
FeedbackSlotCache* cache) {
@@ -1045,9 +1033,8 @@ Call::CallType Call::GetCallType() const {
return OTHER_CALL;
}
-CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements,
- int pos)
- : Expression(pos, kCaseClause), label_(label), statements_(statements) {}
+CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements)
+ : label_(label), statements_(statements) {}
void CaseClause::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 79c5375d10..0253e6651e 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -16,7 +16,6 @@
#include "src/objects/literal-objects.h"
#include "src/parsing/token.h"
#include "src/runtime/runtime.h"
-#include "src/small-pointer-list.h"
namespace v8 {
namespace internal {
@@ -69,44 +68,38 @@ namespace internal {
V(ObjectLiteral) \
V(ArrayLiteral)
-#define PROPERTY_NODE_LIST(V) \
- V(Assignment) \
- V(CompoundAssignment) \
- V(CountOperation) \
- V(Property)
-
-#define CALL_NODE_LIST(V) \
- V(Call) \
- V(CallNew)
-
#define EXPRESSION_NODE_LIST(V) \
LITERAL_NODE_LIST(V) \
- PROPERTY_NODE_LIST(V) \
- CALL_NODE_LIST(V) \
- V(FunctionLiteral) \
- V(ClassLiteral) \
- V(NativeFunctionLiteral) \
- V(Conditional) \
- V(VariableProxy) \
- V(Literal) \
- V(Yield) \
- V(YieldStar) \
+ V(Assignment) \
V(Await) \
- V(Throw) \
- V(CallRuntime) \
- V(UnaryOperation) \
V(BinaryOperation) \
+ V(Call) \
+ V(CallNew) \
+ V(CallRuntime) \
+ V(ClassLiteral) \
V(CompareOperation) \
- V(Spread) \
- V(ThisFunction) \
- V(SuperPropertyReference) \
- V(SuperCallReference) \
- V(CaseClause) \
+ V(CompoundAssignment) \
+ V(Conditional) \
+ V(CountOperation) \
+ V(DoExpression) \
V(EmptyParentheses) \
+ V(FunctionLiteral) \
V(GetIterator) \
- V(DoExpression) \
+ V(GetTemplateObject) \
+ V(ImportCallExpression) \
+ V(Literal) \
+ V(NativeFunctionLiteral) \
+ V(Property) \
V(RewritableExpression) \
- V(ImportCallExpression)
+ V(Spread) \
+ V(SuperCallReference) \
+ V(SuperPropertyReference) \
+ V(ThisFunction) \
+ V(Throw) \
+ V(UnaryOperation) \
+ V(VariableProxy) \
+ V(Yield) \
+ V(YieldStar)
#define AST_NODE_LIST(V) \
DECLARATION_NODE_LIST(V) \
@@ -223,35 +216,6 @@ class Statement : public AstNode {
};
-class SmallMapList final {
- public:
- SmallMapList() {}
- SmallMapList(int capacity, Zone* zone) : list_(capacity, zone) {}
-
- void Reserve(int capacity, Zone* zone) { list_.Reserve(capacity, zone); }
- void Clear() { list_.Clear(); }
- void Sort() { list_.Sort(); }
-
- bool is_empty() const { return list_.is_empty(); }
- int length() const { return list_.length(); }
-
- void Add(Handle<Map> handle, Zone* zone) {
- list_.Add(handle.location(), zone);
- }
-
- Handle<Map> at(int i) const;
-
- Handle<Map> first() const { return at(0); }
- Handle<Map> last() const { return at(length() - 1); }
-
- private:
- // The list stores pointers to Map*, that is Map**, so it's GC safe.
- SmallPointerList<Map*> list_;
-
- DISALLOW_COPY_AND_ASSIGN(SmallMapList);
-};
-
-
class Expression : public AstNode {
public:
enum Context {
@@ -304,14 +268,6 @@ class Expression : public AstNode {
// that this also checks for loads of the global "undefined" variable.
bool IsUndefinedLiteral() const;
- // True iff the expression is a valid target for an assignment.
- bool IsValidReferenceExpressionOrThis() const;
-
- SmallMapList* GetReceiverTypes();
- KeyedAccessStoreMode GetStoreMode() const;
- IcCheckType GetKeyType() const;
- bool IsMonomorphic() const;
-
protected:
Expression(int pos, NodeType type) : AstNode(pos, type) {}
@@ -520,18 +476,11 @@ class IterationStatement : public BreakableStatement {
first_suspend_id_ = first_suspend_id;
}
- void set_osr_id(int id) { osr_id_ = BailoutId(id); }
- BailoutId OsrEntryId() const {
- DCHECK(!osr_id_.IsNone());
- return osr_id_;
- }
-
protected:
IterationStatement(ZoneList<const AstRawString*>* labels, int pos,
NodeType type)
: BreakableStatement(TARGET_FOR_ANONYMOUS, pos, type),
labels_(labels),
- osr_id_(BailoutId::None()),
body_(NULL),
suspend_count_(0),
first_suspend_id_(0) {}
@@ -542,7 +491,6 @@ class IterationStatement : public BreakableStatement {
private:
ZoneList<const AstRawString*>* labels_;
- BailoutId osr_id_;
Statement* body_;
int suspend_count_;
int first_suspend_id_;
@@ -861,16 +809,14 @@ class WithStatement final : public Statement {
Statement* statement_;
};
-
-class CaseClause final : public Expression {
+class CaseClause final : public ZoneObject {
public:
bool is_default() const { return label_ == NULL; }
Expression* label() const {
- CHECK(!is_default());
+ DCHECK(!is_default());
return label_;
}
void set_label(Expression* e) { label_ = e; }
- Label* body_target() { return &body_target_; }
ZoneList<Statement*>* statements() const { return statements_; }
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
@@ -881,40 +827,36 @@ class CaseClause final : public Expression {
private:
friend class AstNodeFactory;
- CaseClause(Expression* label, ZoneList<Statement*>* statements, int pos);
+ CaseClause(Expression* label, ZoneList<Statement*>* statements);
FeedbackSlot feedback_slot_;
Expression* label_;
- Label body_target_;
ZoneList<Statement*>* statements_;
};
class SwitchStatement final : public BreakableStatement {
public:
- void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
- tag_ = tag;
- cases_ = cases;
- }
-
ZoneList<const AstRawString*>* labels() const { return labels_; }
- Expression* tag() const { return tag_; }
- ZoneList<CaseClause*>* cases() const { return cases_; }
+ Expression* tag() const { return tag_; }
void set_tag(Expression* t) { tag_ = t; }
+ ZoneList<CaseClause*>* cases() { return &cases_; }
+
private:
friend class AstNodeFactory;
- SwitchStatement(ZoneList<const AstRawString*>* labels, int pos)
+ SwitchStatement(Zone* zone, ZoneList<const AstRawString*>* labels,
+ Expression* tag, int pos)
: BreakableStatement(TARGET_FOR_ANONYMOUS, pos, kSwitchStatement),
labels_(labels),
- tag_(NULL),
- cases_(NULL) {}
+ tag_(tag),
+ cases_(4, zone) {}
ZoneList<const AstRawString*>* labels_;
Expression* tag_;
- ZoneList<CaseClause*>* cases_;
+ ZoneList<CaseClause*> cases_;
};
@@ -1098,11 +1040,6 @@ class Literal final : public Expression {
// as array indices).
bool IsPropertyName() const { return value_->IsPropertyName(); }
- Handle<String> AsPropertyName() {
- DCHECK(IsPropertyName());
- return Handle<String>::cast(value());
- }
-
const AstRawString* AsRawPropertyName() {
DCHECK(IsPropertyName());
return value_->AsString();
@@ -1310,17 +1247,11 @@ class ObjectLiteralProperty final : public LiteralProperty {
Kind kind() const { return kind_; }
- // Type feedback information.
- bool IsMonomorphic() const { return !receiver_type_.is_null(); }
- Handle<Map> GetReceiverType() const { return receiver_type_; }
-
bool IsCompileTimeValue() const;
void set_emit_store(bool emit_store);
bool emit_store() const;
- void set_receiver_type(Handle<Map> map) { receiver_type_ = map; }
-
bool IsNullPrototype() const {
return IsPrototype() && value()->IsNullLiteral();
}
@@ -1336,7 +1267,6 @@ class ObjectLiteralProperty final : public LiteralProperty {
Kind kind_;
bool emit_store_;
- Handle<Map> receiver_type_;
};
@@ -1391,7 +1321,7 @@ class ObjectLiteral final : public AggregateLiteral {
// marked expressions, no store code is emitted.
void CalculateEmitStore(Zone* zone);
- // Determines whether the {FastCloneShallowObject} builtin can be used.
+ // Determines whether the {CreateShallowObjectLiteratal} builtin can be used.
bool IsFastCloningSupported() const;
// Assemble bitfield of flags for the CreateObjectLiteral helper.
@@ -1503,11 +1433,7 @@ class ArrayLiteral final : public AggregateLiteral {
ZoneList<Expression*>* values() const { return values_; }
- bool is_empty() const {
- DCHECK(is_initialized());
- return values()->is_empty() &&
- (constant_elements().is_null() || constant_elements()->is_empty());
- }
+ bool is_empty() const;
// Populate the depth field and flags, returns the depth.
int InitDepthAndFlags();
@@ -1523,7 +1449,7 @@ class ArrayLiteral final : public AggregateLiteral {
// Populate the constant elements fixed array.
void BuildConstantElements(Isolate* isolate);
- // Determines whether the {FastCloneShallowArray} builtin can be used.
+ // Determines whether the {CreateShallowArrayLiteral} builtin can be used.
bool IsFastCloningSupported() const;
// Assemble bitfield of flags for the CreateArrayLiteral helper.
@@ -1684,38 +1610,6 @@ class Property final : public Expression {
void set_obj(Expression* e) { obj_ = e; }
void set_key(Expression* e) { key_ = e; }
- bool IsStringAccess() const {
- return IsStringAccessField::decode(bit_field_);
- }
-
- // Type feedback information.
- bool IsMonomorphic() const { return receiver_types_.length() == 1; }
- SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- KeyedAccessStoreMode GetStoreMode() const { return STANDARD_STORE; }
- IcCheckType GetKeyType() const { return KeyTypeField::decode(bit_field_); }
- bool IsUninitialized() const {
- return !is_for_call() && HasNoTypeInformation();
- }
- bool HasNoTypeInformation() const {
- return GetInlineCacheState() == UNINITIALIZED;
- }
- InlineCacheState GetInlineCacheState() const {
- return InlineCacheStateField::decode(bit_field_);
- }
- void set_is_string_access(bool b) {
- bit_field_ = IsStringAccessField::update(bit_field_, b);
- }
- void set_key_type(IcCheckType key_type) {
- bit_field_ = KeyTypeField::update(bit_field_, key_type);
- }
- void set_inline_cache_state(InlineCacheState state) {
- bit_field_ = InlineCacheStateField::update(bit_field_, state);
- }
- void mark_for_call() {
- bit_field_ = IsForCallField::update(bit_field_, true);
- }
- bool is_for_call() const { return IsForCallField::decode(bit_field_); }
-
bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); }
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
@@ -1743,24 +1637,11 @@ class Property final : public Expression {
Property(Expression* obj, Expression* key, int pos)
: Expression(pos, kProperty), obj_(obj), key_(key) {
- bit_field_ |= IsForCallField::encode(false) |
- IsStringAccessField::encode(false) |
- InlineCacheStateField::encode(UNINITIALIZED);
}
- class IsForCallField
- : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
- class IsStringAccessField : public BitField<bool, IsForCallField::kNext, 1> {
- };
- class KeyTypeField
- : public BitField<IcCheckType, IsStringAccessField::kNext, 1> {};
- class InlineCacheStateField
- : public BitField<InlineCacheState, KeyTypeField::kNext, 4> {};
-
FeedbackSlot property_feedback_slot_;
Expression* obj_;
Expression* key_;
- SmallMapList receiver_types_;
};
@@ -1777,35 +1658,6 @@ class Call final : public Expression {
FeedbackSlot CallFeedbackICSlot() const { return ic_slot_; }
- SmallMapList* GetReceiverTypes() {
- if (expression()->IsProperty()) {
- return expression()->AsProperty()->GetReceiverTypes();
- }
- return nullptr;
- }
-
- bool IsMonomorphic() const {
- if (expression()->IsProperty()) {
- return expression()->AsProperty()->IsMonomorphic();
- }
- return !target_.is_null();
- }
-
- Handle<JSFunction> target() { return target_; }
-
- void SetKnownGlobalTarget(Handle<JSFunction> target) {
- target_ = target;
- set_is_uninitialized(false);
- }
- void set_target(Handle<JSFunction> target) { target_ = target; }
-
- bool is_uninitialized() const {
- return IsUninitializedField::decode(bit_field_);
- }
- void set_is_uninitialized(bool b) {
- bit_field_ = IsUninitializedField::update(bit_field_, b);
- }
-
bool is_possibly_eval() const {
return IsPossiblyEvalField::decode(bit_field_);
}
@@ -1842,23 +1694,15 @@ class Call final : public Expression {
expression_(expression),
arguments_(arguments) {
bit_field_ |=
- IsUninitializedField::encode(false) |
IsPossiblyEvalField::encode(possibly_eval == IS_POSSIBLY_EVAL);
-
- if (expression->IsProperty()) {
- expression->AsProperty()->mark_for_call();
- }
}
- class IsUninitializedField
- : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
class IsPossiblyEvalField
- : public BitField<bool, IsUninitializedField::kNext, 1> {};
+ : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
FeedbackSlot ic_slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
- Handle<JSFunction> target_;
};
@@ -1882,18 +1726,6 @@ class CallNew final : public Expression {
return callnew_feedback_slot_;
}
- bool IsMonomorphic() const { return IsMonomorphicField::decode(bit_field_); }
- Handle<JSFunction> target() const { return target_; }
-
- void set_is_monomorphic(bool monomorphic) {
- bit_field_ = IsMonomorphicField::update(bit_field_, monomorphic);
- }
- void set_target(Handle<JSFunction> target) { target_ = target; }
- void SetKnownGlobalTarget(Handle<JSFunction> target) {
- target_ = target;
- set_is_monomorphic(true);
- }
-
bool only_last_arg_is_spread() {
return !arguments_->is_empty() && arguments_->last()->IsSpread();
}
@@ -1905,16 +1737,11 @@ class CallNew final : public Expression {
: Expression(pos, kCallNew),
expression_(expression),
arguments_(arguments) {
- bit_field_ |= IsMonomorphicField::encode(false);
}
FeedbackSlot callnew_feedback_slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
- Handle<JSFunction> target_;
-
- class IsMonomorphicField
- : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
};
@@ -1968,6 +1795,11 @@ class UnaryOperation final : public Expression {
Expression* expression() const { return expression_; }
void set_expression(Expression* e) { expression_ = e; }
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FunctionKind kind, FeedbackSlotCache* cache);
+
+ FeedbackSlot UnaryOperationFeedbackSlot() const { return feedback_slot_; }
+
private:
friend class AstNodeFactory;
@@ -1977,6 +1809,7 @@ class UnaryOperation final : public Expression {
DCHECK(Token::IsUnaryOp(op));
}
+ FeedbackSlot feedback_slot_;
Expression* expression_;
class OperatorField
@@ -2025,26 +1858,10 @@ class CountOperation final : public Expression {
bool is_postfix() const { return !is_prefix(); }
Token::Value op() const { return TokenField::decode(bit_field_); }
- Token::Value binary_op() {
- return (op() == Token::INC) ? Token::ADD : Token::SUB;
- }
Expression* expression() const { return expression_; }
void set_expression(Expression* e) { expression_ = e; }
- bool IsMonomorphic() const { return receiver_types_.length() == 1; }
- SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- IcCheckType GetKeyType() const { return KeyTypeField::decode(bit_field_); }
- KeyedAccessStoreMode GetStoreMode() const {
- return StoreModeField::decode(bit_field_);
- }
- void set_key_type(IcCheckType type) {
- bit_field_ = KeyTypeField::update(bit_field_, type);
- }
- void set_store_mode(KeyedAccessStoreMode mode) {
- bit_field_ = StoreModeField::update(bit_field_, mode);
- }
-
// Feedback slot for binary operation is only used by ignition.
FeedbackSlot CountBinaryOpFeedbackSlot() const {
return binary_operation_slot_;
@@ -2059,22 +1876,16 @@ class CountOperation final : public Expression {
CountOperation(Token::Value op, bool is_prefix, Expression* expr, int pos)
: Expression(pos, kCountOperation), expression_(expr) {
- bit_field_ |=
- IsPrefixField::encode(is_prefix) | KeyTypeField::encode(ELEMENT) |
- StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op);
+ bit_field_ |= IsPrefixField::encode(is_prefix) | TokenField::encode(op);
}
class IsPrefixField
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
- class KeyTypeField : public BitField<IcCheckType, IsPrefixField::kNext, 1> {};
- class StoreModeField
- : public BitField<KeyedAccessStoreMode, KeyTypeField::kNext, 3> {};
- class TokenField : public BitField<Token::Value, StoreModeField::kNext, 7> {};
+ class TokenField : public BitField<Token::Value, IsPrefixField::kNext, 7> {};
FeedbackSlot slot_;
FeedbackSlot binary_operation_slot_;
Expression* expression_;
- SmallMapList receiver_types_;
};
@@ -2170,29 +1981,6 @@ class Assignment : public Expression {
void set_target(Expression* e) { target_ = e; }
void set_value(Expression* e) { value_ = e; }
- // Type feedback information.
- bool IsUninitialized() const {
- return IsUninitializedField::decode(bit_field_);
- }
- bool HasNoTypeInformation() {
- return IsUninitializedField::decode(bit_field_);
- }
- bool IsMonomorphic() const { return receiver_types_.length() == 1; }
- SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- IcCheckType GetKeyType() const { return KeyTypeField::decode(bit_field_); }
- KeyedAccessStoreMode GetStoreMode() const {
- return StoreModeField::decode(bit_field_);
- }
- void set_is_uninitialized(bool b) {
- bit_field_ = IsUninitializedField::update(bit_field_, b);
- }
- void set_key_type(IcCheckType key_type) {
- bit_field_ = KeyTypeField::update(bit_field_, key_type);
- }
- void set_store_mode(KeyedAccessStoreMode mode) {
- bit_field_ = StoreModeField::update(bit_field_, mode);
- }
-
// The assignment was generated as part of block-scoped sloppy-mode
// function hoisting, see
// ES#sec-block-level-function-declarations-web-legacy-compatibility-semantics
@@ -2216,20 +2004,14 @@ class Assignment : public Expression {
private:
friend class AstNodeFactory;
- class IsUninitializedField
- : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
- class KeyTypeField
- : public BitField<IcCheckType, IsUninitializedField::kNext, 1> {};
- class StoreModeField
- : public BitField<KeyedAccessStoreMode, KeyTypeField::kNext, 3> {};
- class TokenField : public BitField<Token::Value, StoreModeField::kNext, 7> {};
+ class TokenField
+ : public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
class LookupHoistingModeField : public BitField<bool, TokenField::kNext, 1> {
};
FeedbackSlot slot_;
Expression* target_;
Expression* value_;
- SmallMapList receiver_types_;
};
class CompoundAssignment final : public Assignment {
@@ -2511,9 +2293,7 @@ class FunctionLiteral final : public Expression {
MaybeHandle<String> name() const {
return raw_name_ ? raw_name_->string() : MaybeHandle<String>();
}
- Handle<String> name(Isolate* isolate) const {
- return raw_name_ ? raw_name_->string() : isolate->factory()->empty_string();
- }
+ Handle<String> name(Isolate* isolate) const;
bool has_shared_name() const { return raw_name_ != nullptr; }
const AstConsString* raw_name() const { return raw_name_; }
void set_raw_name(const AstConsString* name) { raw_name_ = name; }
@@ -2733,7 +2513,7 @@ class ClassLiteral final : public Expression {
typedef ClassLiteralProperty Property;
Scope* scope() const { return scope_; }
- VariableProxy* class_variable_proxy() const { return class_variable_proxy_; }
+ Variable* class_variable() const { return class_variable_; }
Expression* extends() const { return extends_; }
void set_extends(Expression* e) { extends_ = e; }
FunctionLiteral* constructor() const { return constructor_; }
@@ -2760,26 +2540,20 @@ class ClassLiteral final : public Expression {
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FunctionKind kind, FeedbackSlotCache* cache);
- bool NeedsProxySlot() const {
- return class_variable_proxy() != nullptr &&
- class_variable_proxy()->var()->IsUnallocated();
- }
-
FeedbackSlot HomeObjectSlot() const { return home_object_slot_; }
- FeedbackSlot ProxySlot() const { return proxy_slot_; }
private:
friend class AstNodeFactory;
- ClassLiteral(Scope* scope, VariableProxy* class_variable_proxy,
- Expression* extends, FunctionLiteral* constructor,
- ZoneList<Property*>* properties, int start_position,
- int end_position, bool has_name_static_property,
- bool has_static_computed_names, bool is_anonymous)
+ ClassLiteral(Scope* scope, Variable* class_variable, Expression* extends,
+ FunctionLiteral* constructor, ZoneList<Property*>* properties,
+ int start_position, int end_position,
+ bool has_name_static_property, bool has_static_computed_names,
+ bool is_anonymous)
: Expression(start_position, kClassLiteral),
end_position_(end_position),
scope_(scope),
- class_variable_proxy_(class_variable_proxy),
+ class_variable_(class_variable),
extends_(extends),
constructor_(constructor),
properties_(properties) {
@@ -2790,9 +2564,8 @@ class ClassLiteral final : public Expression {
int end_position_;
FeedbackSlot home_object_slot_;
- FeedbackSlot proxy_slot_;
Scope* scope_;
- VariableProxy* class_variable_proxy_;
+ Variable* class_variable_;
Expression* extends_;
FunctionLiteral* constructor_;
ZoneList<Property*>* properties_;
@@ -2991,6 +2764,31 @@ class GetIterator final : public Expression {
FeedbackSlot async_iterator_call_feedback_slot_;
};
+// Represents the spec operation `GetTemplateObject(templateLiteral)`
+// (defined at https://tc39.github.io/ecma262/#sec-gettemplateobject).
+class GetTemplateObject final : public Expression {
+ public:
+ ZoneList<Literal*>* cooked_strings() const { return cooked_strings_; }
+ ZoneList<Literal*>* raw_strings() const { return raw_strings_; }
+ int hash() const { return hash_; }
+
+ Handle<TemplateObjectDescription> GetOrBuildDescription(Isolate* isolate);
+
+ private:
+ friend class AstNodeFactory;
+
+ GetTemplateObject(ZoneList<Literal*>* cooked_strings,
+ ZoneList<Literal*>* raw_strings, int hash, int pos)
+ : Expression(pos, kGetTemplateObject),
+ cooked_strings_(cooked_strings),
+ raw_strings_(raw_strings),
+ hash_(hash) {}
+
+ ZoneList<Literal*>* cooked_strings_;
+ ZoneList<Literal*>* raw_strings_;
+ int hash_;
+};
+
// ----------------------------------------------------------------------------
// Basic visitor
// Sub-class should parametrize AstVisitor with itself, e.g.:
@@ -3185,9 +2983,13 @@ class AstNodeFactory final BASE_EMBEDDED {
STATEMENT_WITH_LABELS(DoWhileStatement)
STATEMENT_WITH_LABELS(WhileStatement)
STATEMENT_WITH_LABELS(ForStatement)
- STATEMENT_WITH_LABELS(SwitchStatement)
#undef STATEMENT_WITH_LABELS
+ SwitchStatement* NewSwitchStatement(ZoneList<const AstRawString*>* labels,
+ Expression* tag, int pos) {
+ return new (zone_) SwitchStatement(zone_, labels, tag, pos);
+ }
+
ForEachStatement* NewForEachStatement(ForEachStatement::VisitMode visit_mode,
ZoneList<const AstRawString*>* labels,
int pos) {
@@ -3292,9 +3094,9 @@ class AstNodeFactory final BASE_EMBEDDED {
SloppyBlockFunctionStatement(NewEmptyStatement(kNoSourcePosition));
}
- CaseClause* NewCaseClause(Expression* label, ZoneList<Statement*>* statements,
- int pos) {
- return new (zone_) CaseClause(label, statements, pos);
+ CaseClause* NewCaseClause(Expression* label,
+ ZoneList<Statement*>* statements) {
+ return new (zone_) CaseClause(label, statements);
}
Literal* NewStringLiteral(const AstRawString* string, int pos) {
@@ -3541,7 +3343,7 @@ class AstNodeFactory final BASE_EMBEDDED {
ClassLiteral::Property(key, value, kind, is_static, is_computed_name);
}
- ClassLiteral* NewClassLiteral(Scope* scope, VariableProxy* proxy,
+ ClassLiteral* NewClassLiteral(Scope* scope, Variable* variable,
Expression* extends,
FunctionLiteral* constructor,
ZoneList<ClassLiteral::Property*>* properties,
@@ -3550,7 +3352,7 @@ class AstNodeFactory final BASE_EMBEDDED {
bool has_static_computed_names,
bool is_anonymous) {
return new (zone_)
- ClassLiteral(scope, proxy, extends, constructor, properties,
+ ClassLiteral(scope, variable, extends, constructor, properties,
start_position, end_position, has_name_static_property,
has_static_computed_names, is_anonymous);
}
@@ -3599,6 +3401,13 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) GetIterator(iterable, hint, pos);
}
+ GetTemplateObject* NewGetTemplateObject(ZoneList<Literal*>* cooked_strings,
+ ZoneList<Literal*>* raw_strings,
+ int hash, int pos) {
+ return new (zone_)
+ GetTemplateObject(cooked_strings, raw_strings, hash, pos);
+ }
+
ImportCallExpression* NewImportCallExpression(Expression* args, int pos) {
return new (zone_) ImportCallExpression(args, pos);
}
diff --git a/deps/v8/src/ast/context-slot-cache.cc b/deps/v8/src/ast/context-slot-cache.cc
index b523330502..88d53713c2 100644
--- a/deps/v8/src/ast/context-slot-cache.cc
+++ b/deps/v8/src/ast/context-slot-cache.cc
@@ -8,13 +8,7 @@
#include "src/ast/scopes.h"
#include "src/bootstrapper.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/factory.h -> src/objects-inl.h
#include "src/objects-inl.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/feedback-vector.h ->
-// src/feedback-vector-inl.h
-#include "src/feedback-vector-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index eb9e3eb1c1..b3ab10aab9 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -126,16 +126,10 @@ void CallPrinter::VisitWithStatement(WithStatement* node) {
void CallPrinter::VisitSwitchStatement(SwitchStatement* node) {
Find(node->tag());
- ZoneList<CaseClause*>* cases = node->cases();
- for (int i = 0; i < cases->length(); i++) Find(cases->at(i));
-}
-
-
-void CallPrinter::VisitCaseClause(CaseClause* clause) {
- if (!clause->is_default()) {
- Find(clause->label());
+ for (CaseClause* clause : *node->cases()) {
+ if (!clause->is_default()) Find(clause->label());
+ FindStatements(clause->statements());
}
- FindStatements(clause->statements());
}
@@ -423,6 +417,8 @@ void CallPrinter::VisitGetIterator(GetIterator* node) {
}
}
+void CallPrinter::VisitGetTemplateObject(GetTemplateObject* node) {}
+
void CallPrinter::VisitImportCallExpression(ImportCallExpression* node) {
Print("ImportCall(");
Find(node->argument(), true);
@@ -837,20 +833,15 @@ void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
IndentedScope indent(this, "SWITCH", node->position());
PrintLabelsIndented(node->labels());
PrintIndentedVisit("TAG", node->tag());
- for (int i = 0; i < node->cases()->length(); i++) {
- Visit(node->cases()->at(i));
- }
-}
-
-
-void AstPrinter::VisitCaseClause(CaseClause* clause) {
- if (clause->is_default()) {
- IndentedScope indent(this, "DEFAULT", clause->position());
- PrintStatements(clause->statements());
- } else {
- IndentedScope indent(this, "CASE", clause->position());
- Visit(clause->label());
- PrintStatements(clause->statements());
+ for (CaseClause* clause : *node->cases()) {
+ if (clause->is_default()) {
+ IndentedScope indent(this, "DEFAULT");
+ PrintStatements(clause->statements());
+ } else {
+ IndentedScope indent(this, "CASE");
+ Visit(clause->label());
+ PrintStatements(clause->statements());
+ }
}
}
@@ -1261,6 +1252,10 @@ void AstPrinter::VisitGetIterator(GetIterator* node) {
Visit(node->iterable());
}
+void AstPrinter::VisitGetTemplateObject(GetTemplateObject* node) {
+ IndentedScope indent(this, "GET-TEMPLATE-OBJECT", node->position());
+}
+
void AstPrinter::VisitImportCallExpression(ImportCallExpression* node) {
IndentedScope indent(this, "IMPORT-CALL", node->position());
Visit(node->argument());
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 5eaa198022..07eacd3fe9 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -668,11 +668,9 @@ void DeclarationScope::Analyze(ParseInfo* info) {
// 1) top-level code,
// 2) a function/eval/module on the top-level
// 3) a function/eval in a scope that was already resolved.
- // 4) an asm.js function
DCHECK(scope->scope_type() == SCRIPT_SCOPE ||
scope->outer_scope()->scope_type() == SCRIPT_SCOPE ||
- scope->outer_scope()->already_resolved_ ||
- (info->asm_function_scope() && scope->is_function_scope()));
+ scope->outer_scope()->already_resolved_);
// The outer scope is never lazy.
scope->set_should_eager_compile();
@@ -1209,12 +1207,7 @@ Variable* Scope::DeclareVariableName(const AstRawString* name,
}
DCHECK(!is_with_scope());
DCHECK(!is_eval_scope());
- // Unlike DeclareVariable, DeclareVariableName allows declaring variables in
- // catch scopes: Parser::RewriteCatchPattern bypasses DeclareVariable by
- // calling DeclareLocal directly, and it doesn't make sense to add a similar
- // bypass mechanism for PreParser.
- DCHECK(is_declaration_scope() || (IsLexicalVariableMode(mode) &&
- (is_block_scope() || is_catch_scope())));
+ DCHECK(is_declaration_scope() || IsLexicalVariableMode(mode));
DCHECK(scope_info_.is_null());
// Declare the variable in the declaration scope.
@@ -1240,6 +1233,19 @@ Variable* Scope::DeclareVariableName(const AstRawString* name,
}
}
+void Scope::DeclareCatchVariableName(const AstRawString* name) {
+ DCHECK(!already_resolved_);
+ DCHECK(GetDeclarationScope()->is_being_lazily_parsed());
+ DCHECK(is_catch_scope());
+ DCHECK(scope_info_.is_null());
+
+ if (FLAG_preparser_scope_analysis) {
+ Declare(zone(), name, VAR);
+ } else {
+ variables_.DeclareName(zone(), name, VAR);
+ }
+}
+
void Scope::AddUnresolved(VariableProxy* proxy) {
DCHECK(!already_resolved_);
DCHECK(!proxy->is_resolved());
@@ -1735,8 +1741,8 @@ void Scope::Print(int n) {
if (is_declaration_scope() && AsDeclarationScope()->calls_sloppy_eval()) {
Indent(n1, "// scope calls sloppy 'eval'\n");
}
- if (is_declaration_scope() && AsDeclarationScope()->uses_super_property()) {
- Indent(n1, "// scope uses 'super' property\n");
+ if (is_declaration_scope() && AsDeclarationScope()->NeedsHomeObject()) {
+ Indent(n1, "// scope needs home object\n");
}
if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
if (is_declaration_scope()) {
@@ -1795,8 +1801,8 @@ void Scope::Print(int n) {
void Scope::CheckScopePositions() {
// Visible leaf scopes must have real positions.
if (!is_hidden() && inner_scope_ == nullptr) {
- CHECK_NE(kNoSourcePosition, start_position());
- CHECK_NE(kNoSourcePosition, end_position());
+ DCHECK_NE(kNoSourcePosition, start_position());
+ DCHECK_NE(kNoSourcePosition, end_position());
}
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
scope->CheckScopePositions();
@@ -1992,7 +1998,7 @@ void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
if (info->is_native()) {
// To avoid polluting the global object in native scripts
// - Variables must not be allocated to the global scope.
- CHECK_NOT_NULL(outer_scope());
+ DCHECK_NOT_NULL(outer_scope());
// - Variables must be bound locally or unallocated.
if (var->IsGlobalObjectProperty()) {
// The following variable name may be minified. If so, disable
@@ -2002,10 +2008,10 @@ void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
name->ToCString().get());
}
VariableLocation location = var->location();
- CHECK(location == VariableLocation::LOCAL ||
- location == VariableLocation::CONTEXT ||
- location == VariableLocation::PARAMETER ||
- location == VariableLocation::UNALLOCATED);
+ DCHECK(location == VariableLocation::LOCAL ||
+ location == VariableLocation::CONTEXT ||
+ location == VariableLocation::PARAMETER ||
+ location == VariableLocation::UNALLOCATED);
}
#endif
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 655ce8eda4..fe15508027 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -201,6 +201,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// The return value is meaningful only if FLAG_preparser_scope_analysis is on.
Variable* DeclareVariableName(const AstRawString* name, VariableMode mode);
+ void DeclareCatchVariableName(const AstRawString* name);
// Declarations list.
ThreadedList<Declaration>* declarations() { return &decls_; }
@@ -665,14 +666,13 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Inform the scope that the corresponding code uses "super".
void RecordSuperPropertyUsage() {
- DCHECK((IsConciseMethod(function_kind()) ||
- IsAccessorFunction(function_kind()) ||
- IsClassConstructor(function_kind())));
+ DCHECK(IsConciseMethod(function_kind()) ||
+ IsAccessorFunction(function_kind()) ||
+ IsClassConstructor(function_kind()));
scope_uses_super_property_ = true;
}
- // Does this scope access "super" property (super.foo).
- bool uses_super_property() const { return scope_uses_super_property_; }
+ // Does this scope access "super" property (super.foo).
bool NeedsHomeObject() const {
return scope_uses_super_property_ ||
(inner_scope_calls_eval_ && (IsConciseMethod(function_kind()) ||
diff --git a/deps/v8/src/background-parsing-task.cc b/deps/v8/src/background-parsing-task.cc
index fe9a26fe92..9d155d5af8 100644
--- a/deps/v8/src/background-parsing-task.cc
+++ b/deps/v8/src/background-parsing-task.cc
@@ -26,6 +26,7 @@ BackgroundParsingTask::BackgroundParsingTask(
// on the foreground thread.
DCHECK(options == ScriptCompiler::kProduceParserCache ||
options == ScriptCompiler::kProduceCodeCache ||
+ options == ScriptCompiler::kProduceFullCodeCache ||
options == ScriptCompiler::kNoCompileOptions);
VMState<PARSER> state(isolate);
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index 21d973ef7e..593b4972e1 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -18,8 +18,6 @@ namespace internal {
V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
- V(kBothRegistersWereSmisInSelectNonSmi, \
- "Both registers were smis in SelectNonSmi") \
V(kClassConstructorFunction, "Class constructor function") \
V(kClassLiteral, "Class literal") \
V(kCodeGenerationFailed, "Code generation failed") \
@@ -51,8 +49,6 @@ namespace internal {
"The function_data field should be a BytecodeArray on interpreter entry") \
V(kGenerator, "Generator") \
V(kGetIterator, "GetIterator") \
- V(kGlobalFunctionsMustHaveInitialMap, \
- "Global functions must have initial map") \
V(kGraphBuildingFailed, "Optimized graph construction failed") \
V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered") \
V(kIndexIsNegative, "Index is negative") \
@@ -117,7 +113,6 @@ namespace internal {
V(kTryFinallyStatement, "TryFinallyStatement") \
V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
- V(kUnexpectedAllocationTop, "Unexpected allocation top") \
V(kUnexpectedColorFound, "Unexpected color bit pattern found") \
V(kUnexpectedElementsKindInArrayConstructor, \
"Unexpected ElementsKind in array constructor") \
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index 1db96e6027..504be0370a 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -117,6 +117,7 @@ inline unsigned CountLeadingZeros64(uint64_t value) {
#endif
}
+DEFINE_32_64_OVERLOADS(CountLeadingZeros)
// ReverseBits(value) returns |value| in reverse bit order.
template <typename T>
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index 73488de5bd..df0d1110a5 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -201,8 +201,8 @@
#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 0
#endif
-// Number of bits to represent the page size for paged spaces. The value of 20
-// gives 1Mb bytes per page.
+// Number of bits to represent the page size for paged spaces. The value of 19
+// gives 512Kb bytes per page.
const int kPageSizeBits = 19;
#endif // V8_BASE_BUILD_CONFIG_H_
diff --git a/deps/v8/src/base/logging.cc b/deps/v8/src/base/logging.cc
index 740f1fa987..13fbec0e90 100644
--- a/deps/v8/src/base/logging.cc
+++ b/deps/v8/src/base/logging.cc
@@ -17,8 +17,12 @@ namespace base {
namespace {
+void DefaultDcheckHandler(const char* file, int line, const char* message);
+
void (*g_print_stack_trace)() = nullptr;
+void (*g_dcheck_function)(const char*, int, const char*) = DefaultDcheckHandler;
+
void PrettyPrintChar(std::ostream& os, int ch) {
switch (ch) {
#define CHAR_PRINT_CASE(ch) \
@@ -48,12 +52,20 @@ void PrettyPrintChar(std::ostream& os, int ch) {
}
}
+void DefaultDcheckHandler(const char* file, int line, const char* message) {
+ V8_Fatal(file, line, "Debug check failed: %s.", message);
+}
+
} // namespace
void SetPrintStackTrace(void (*print_stack_trace)()) {
g_print_stack_trace = print_stack_trace;
}
+void SetDcheckFunction(void (*dcheck_function)(const char*, int, const char*)) {
+ g_dcheck_function = dcheck_function ? dcheck_function : &DefaultDcheckHandler;
+}
+
// Define specialization to pretty print characters (escaping non-printable
// characters) and to print c strings as pointers instead of strings.
#define DEFINE_PRINT_CHECK_OPERAND_CHAR(type) \
@@ -125,3 +137,7 @@ void V8_Fatal(const char* file, int line, const char* format, ...) {
fflush(stderr);
v8::base::OS::Abort();
}
+
+void V8_Dcheck(const char* file, int line, const char* message) {
+ v8::base::g_dcheck_function(file, line, message);
+}
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index e89dfd3c3a..889c6885b2 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -17,6 +17,9 @@
[[noreturn]] PRINTF_FORMAT(3, 4) V8_BASE_EXPORT V8_NOINLINE
void V8_Fatal(const char* file, int line, const char* format, ...);
+V8_BASE_EXPORT V8_NOINLINE void V8_Dcheck(const char* file, int line,
+ const char* message);
+
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
// development, but they should not be relied on in the final product.
#ifdef DEBUG
@@ -41,6 +44,10 @@ namespace base {
// Overwrite the default function that prints a stack trace.
V8_BASE_EXPORT void SetPrintStackTrace(void (*print_stack_trace_)());
+// Override the default function that handles DCHECKs.
+V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
+ const char*));
+
// CHECK dies with a fatal error if condition is not true. It is *not*
// controlled by DEBUG, so the check will be executed regardless of
// compilation mode.
@@ -57,36 +64,36 @@ V8_BASE_EXPORT void SetPrintStackTrace(void (*print_stack_trace_)());
#ifdef DEBUG
-#define DCHECK_WITH_MSG(condition, message) \
- do { \
- if (V8_UNLIKELY(!(condition))) { \
- V8_Fatal(__FILE__, __LINE__, "Debug check failed: %s.", message); \
- } \
+#define DCHECK_WITH_MSG(condition, message) \
+ do { \
+ if (V8_UNLIKELY(!(condition))) { \
+ V8_Dcheck(__FILE__, __LINE__, message); \
+ } \
} while (0)
#define DCHECK(condition) DCHECK_WITH_MSG(condition, #condition)
// Helper macro for binary operators.
// Don't use this macro directly in your code, use CHECK_EQ et al below.
-#define CHECK_OP(name, op, lhs, rhs) \
- do { \
- if (std::string* _msg = ::v8::base::Check##name##Impl< \
- typename v8::base::pass_value_or_ref<decltype(lhs)>::type, \
- typename v8::base::pass_value_or_ref<decltype(rhs)>::type>( \
- (lhs), (rhs), #lhs " " #op " " #rhs)) { \
- V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", _msg->c_str()); \
- delete _msg; \
- } \
+#define CHECK_OP(name, op, lhs, rhs) \
+ do { \
+ if (std::string* _msg = ::v8::base::Check##name##Impl< \
+ typename ::v8::base::pass_value_or_ref<decltype(lhs)>::type, \
+ typename ::v8::base::pass_value_or_ref<decltype(rhs)>::type>( \
+ (lhs), (rhs), #lhs " " #op " " #rhs)) { \
+ V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", _msg->c_str()); \
+ delete _msg; \
+ } \
} while (0)
-#define DCHECK_OP(name, op, lhs, rhs) \
- do { \
- if (std::string* _msg = ::v8::base::Check##name##Impl< \
- typename v8::base::pass_value_or_ref<decltype(lhs)>::type, \
- typename v8::base::pass_value_or_ref<decltype(rhs)>::type>( \
- (lhs), (rhs), #lhs " " #op " " #rhs)) { \
- V8_Fatal(__FILE__, __LINE__, "Debug check failed: %s.", _msg->c_str()); \
- delete _msg; \
- } \
+#define DCHECK_OP(name, op, lhs, rhs) \
+ do { \
+ if (std::string* _msg = ::v8::base::Check##name##Impl< \
+ typename ::v8::base::pass_value_or_ref<decltype(lhs)>::type, \
+ typename ::v8::base::pass_value_or_ref<decltype(rhs)>::type>( \
+ (lhs), (rhs), #lhs " " #op " " #rhs)) { \
+ V8_Dcheck(__FILE__, __LINE__, _msg->c_str()); \
+ delete _msg; \
+ } \
} while (0)
#else
@@ -94,22 +101,46 @@ V8_BASE_EXPORT void SetPrintStackTrace(void (*print_stack_trace_)());
// Make all CHECK functions discard their log strings to reduce code
// bloat for official release builds.
-#define CHECK_OP(name, op, lhs, rhs) \
- do { \
- bool _cmp = ::v8::base::Cmp##name##Impl< \
- typename v8::base::pass_value_or_ref<decltype(lhs)>::type, \
- typename v8::base::pass_value_or_ref<decltype(rhs)>::type>((lhs), \
- (rhs)); \
- CHECK_WITH_MSG(_cmp, #lhs " " #op " " #rhs); \
+#define CHECK_OP(name, op, lhs, rhs) \
+ do { \
+ bool _cmp = ::v8::base::Cmp##name##Impl< \
+ typename ::v8::base::pass_value_or_ref<decltype(lhs)>::type, \
+ typename ::v8::base::pass_value_or_ref<decltype(rhs)>::type>((lhs), \
+ (rhs)); \
+ CHECK_WITH_MSG(_cmp, #lhs " " #op " " #rhs); \
} while (0)
#define DCHECK_WITH_MSG(condition, msg) void(0);
#endif
-template <typename Op>
-void PrintCheckOperand(std::ostream& os, Op op) {
- os << op;
+// Define PrintCheckOperand<T> for each T which defines operator<< for ostream.
+template <typename T>
+typename std::enable_if<has_output_operator<T>::value>::type PrintCheckOperand(
+ std::ostream& os, T val) {
+ os << std::forward<T>(val);
+}
+
+// Define PrintCheckOperand<T> for enums which have no operator<<.
+template <typename T>
+typename std::enable_if<std::is_enum<T>::value &&
+ !has_output_operator<T>::value>::type
+PrintCheckOperand(std::ostream& os, T val) {
+ using underlying_t = typename std::underlying_type<T>::type;
+ // 8-bit types are not printed as number, so extend them to 16 bit.
+ using int_t = typename std::conditional<
+ std::is_same<underlying_t, uint8_t>::value, uint16_t,
+ typename std::conditional<std::is_same<underlying_t, int8_t>::value,
+ int16_t, underlying_t>::type>::type;
+ PrintCheckOperand(os, static_cast<int_t>(static_cast<underlying_t>(val)));
+}
+
+// Define default PrintCheckOperand<T> for non-printable types.
+template <typename T>
+typename std::enable_if<!has_output_operator<T>::value &&
+ !std::is_enum<T>::value>::type
+PrintCheckOperand(std::ostream& os, T val) {
+ os << "<unprintable>";
}
// Define specializations for character types, defined in logging.cc.
@@ -136,9 +167,9 @@ template <typename Lhs, typename Rhs>
std::string* MakeCheckOpString(Lhs lhs, Rhs rhs, char const* msg) {
std::ostringstream ss;
ss << msg << " (";
- PrintCheckOperand(ss, lhs);
+ PrintCheckOperand<Lhs>(ss, lhs);
ss << " vs. ";
- PrintCheckOperand(ss, rhs);
+ PrintCheckOperand<Rhs>(ss, rhs);
ss << ")";
return new std::string(ss.str());
}
diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc
index c14a3633c5..6c1bde7b85 100644
--- a/deps/v8/src/base/platform/platform-aix.cc
+++ b/deps/v8/src/base/platform/platform-aix.cc
@@ -65,87 +65,46 @@ double AIXTimezoneCache::LocalTimeOffset() {
TimezoneCache* OS::CreateTimezoneCache() { return new AIXTimezoneCache(); }
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
+ kMmapFdOffset);
if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
return mbase;
}
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+ void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
+ kMmapFd, kMmapFdOffset);
-static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
-}
-
+ if (result == MAP_FAILED) return nullptr;
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- std::vector<SharedLibraryAddress> result;
- static const int MAP_LENGTH = 1024;
- int fd = open("/proc/self/maps", O_RDONLY);
- if (fd < 0) return result;
- while (true) {
- char addr_buffer[11];
- addr_buffer[0] = '0';
- addr_buffer[1] = 'x';
- addr_buffer[10] = 0;
- ssize_t rc = read(fd, addr_buffer + 2, 8);
- if (rc < 8) break;
- unsigned start = StringToLong(addr_buffer);
- rc = read(fd, addr_buffer + 2, 1);
- if (rc < 1) break;
- if (addr_buffer[2] != '-') break;
- rc = read(fd, addr_buffer + 2, 8);
- if (rc < 8) break;
- unsigned end = StringToLong(addr_buffer);
- char buffer[MAP_LENGTH];
- int bytes_read = -1;
- do {
- bytes_read++;
- if (bytes_read >= MAP_LENGTH - 1) break;
- rc = read(fd, buffer + bytes_read, 1);
- if (rc < 1) break;
- } while (buffer[bytes_read] != '\n');
- buffer[bytes_read] = 0;
- // Ignore mappings that are not executable.
- if (buffer[3] != 'x') continue;
- char* start_of_path = index(buffer, '/');
- // There may be no filename in this line. Skip to next.
- if (start_of_path == NULL) continue;
- buffer[bytes_read] = 0;
- result.push_back(SharedLibraryAddress(start_of_path, start, end));
- }
- close(fd);
return result;
}
-
-void OS::SignalCodeMovingGC() {}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) {}
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
- : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
- : address_(NULL), size_(0) {
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+ size_t* allocated) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(hint, request_size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd, kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
+ void* result = ReserveRegion(request_size, hint);
+ if (result == nullptr) {
+ *allocated = 0;
+ return nullptr;
+ }
- uint8_t* base = static_cast<uint8_t*>(reservation);
+ uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
@@ -167,74 +126,82 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
DCHECK(aligned_size == request_size);
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- DCHECK(result);
- USE(result);
- }
-}
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
+ *allocated = aligned_size;
+ return static_cast<void*>(aligned_base);
}
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
+ if (mprotect(address, size, prot) == -1) return false;
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
return true;
}
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
- void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
- kMmapFd, kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
+ return mprotect(address, size, PROT_NONE) != -1;
}
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-
- if (mprotect(base, size, prot) == -1) return false;
-
- return true;
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
+ return munmap(address, size) == 0;
}
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mprotect(base, size, PROT_NONE) != -1;
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size) {
+ return munmap(address, size) == 0;
}
-bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
- void* free_start, size_t free_size) {
- return munmap(free_start, free_size) == 0;
-}
+// static
+bool OS::HasLazyCommits() { return true; }
+static unsigned StringToLong(char* buffer) {
+ return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
+}
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return munmap(base, size) == 0;
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddress> result;
+ static const int MAP_LENGTH = 1024;
+ int fd = open("/proc/self/maps", O_RDONLY);
+ if (fd < 0) return result;
+ while (true) {
+ char addr_buffer[11];
+ addr_buffer[0] = '0';
+ addr_buffer[1] = 'x';
+ addr_buffer[10] = 0;
+ ssize_t rc = read(fd, addr_buffer + 2, 8);
+ if (rc < 8) break;
+ unsigned start = StringToLong(addr_buffer);
+ rc = read(fd, addr_buffer + 2, 1);
+ if (rc < 1) break;
+ if (addr_buffer[2] != '-') break;
+ rc = read(fd, addr_buffer + 2, 8);
+ if (rc < 8) break;
+ unsigned end = StringToLong(addr_buffer);
+ char buffer[MAP_LENGTH];
+ int bytes_read = -1;
+ do {
+ bytes_read++;
+ if (bytes_read >= MAP_LENGTH - 1) break;
+ rc = read(fd, buffer + bytes_read, 1);
+ if (rc < 1) break;
+ } while (buffer[bytes_read] != '\n');
+ buffer[bytes_read] = 0;
+ // Ignore mappings that are not executable.
+ if (buffer[3] != 'x') continue;
+ char* start_of_path = index(buffer, '/');
+ // There may be no filename in this line. Skip to next.
+ if (start_of_path == NULL) continue;
+ buffer[bytes_read] = 0;
+ result.push_back(SharedLibraryAddress(start_of_path, start, end));
+ }
+ close(fd);
+ return result;
}
+void OS::SignalCodeMovingGC(void* hint) {}
-bool VirtualMemory::HasLazyCommits() { return true; }
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc
index 6868fd94ad..f20c530d67 100644
--- a/deps/v8/src/base/platform/platform-cygwin.cc
+++ b/deps/v8/src/base/platform/platform-cygwin.cc
@@ -26,6 +26,31 @@
namespace v8 {
namespace base {
+namespace {
+
+// The VirtualMemory implementation is taken from platform-win32.cc.
+// The mmap-based virtual memory implementation as it is used on most posix
+// platforms does not work well because Cygwin does not support MAP_FIXED.
+// This causes VirtualMemory::Commit to not always commit the memory region
+// specified.
+
+static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
+ void* hint) {
+ LPVOID base = NULL;
+
+ if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
+ // For exectutable pages try and randomize the allocation address
+ base = VirtualAlloc(hint, size, action, protection);
+ }
+
+ // After three attempts give up and let the OS find an address to use.
+ if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
+
+ return base;
+}
+
+} // namespace
+
class CygwinTimezoneCache : public PosixTimezoneCache {
const char* LocalTimezone(double time) override;
@@ -65,6 +90,74 @@ void* OS::Allocate(const size_t requested, size_t* allocated,
return mbase;
}
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+ return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
+}
+
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+ size_t* allocated) {
+ hint = AlignedAddress(hint, alignment);
+ DCHECK((alignment % OS::AllocateAlignment()) == 0);
+ size_t request_size =
+ RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* address = ReserveRegion(request_size, hint);
+ if (address == NULL) {
+ *allocated = 0;
+ return nullptr;
+ }
+ uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
+ // Try reducing the size by freeing and then reallocating a specific area.
+ bool result = ReleaseRegion(address, request_size);
+ USE(result);
+ DCHECK(result);
+ address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+ if (address != nullptr) {
+ request_size = size;
+ DCHECK(base == static_cast<uint8_t*>(address));
+ } else {
+ // Resizing failed, just go with a bigger area.
+ address = ReserveRegion(request_size, hint);
+ if (address == nullptr) {
+ *allocated = 0;
+ return nullptr;
+ }
+ }
+
+ *allocated = request_size;
+ return static_cast<void*>(address);
+}
+
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
+ int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+ if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
+ return false;
+ }
+ return true;
+}
+
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
+ return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+}
+
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
+ return VirtualFree(address, 0, MEM_RELEASE) != 0;
+}
+
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size) {
+ return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+}
+
+// static
+bool OS::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddresses> result;
@@ -126,129 +219,9 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-
-void OS::SignalCodeMovingGC() {
+void OS::SignalCodeMovingGC(void* hint) {
// Nothing to do on Cygwin.
}
-
-// The VirtualMemory implementation is taken from platform-win32.cc.
-// The mmap-based virtual memory implementation as it is used on most posix
-// platforms does not work well because Cygwin does not support MAP_FIXED.
-// This causes VirtualMemory::Commit to not always commit the memory region
-// specified.
-
-static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
- void* hint) {
- LPVOID base = NULL;
-
- if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
- // For exectutable pages try and randomize the allocation address
- base = VirtualAlloc(hint, size, action, protection);
- }
-
- // After three attempts give up and let the OS find an address to use.
- if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
-
- return base;
-}
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
- : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
- : address_(NULL), size_(0) {
- hint = AlignedAddress(hint, alignment);
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* address = ReserveRegion(request_size, hint);
- if (address == NULL) return;
- uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
- // Try reducing the size by freeing and then reallocating a specific area.
- bool result = ReleaseRegion(address, request_size);
- USE(result);
- DCHECK(result);
- address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
- if (address != NULL) {
- request_size = size;
- DCHECK(base == static_cast<uint8_t*>(address));
- } else {
- // Resizing failed, just go with a bigger area.
- address = ReserveRegion(request_size, hint);
- if (address == NULL) return;
- }
- address_ = address;
- size_ = request_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address_, size_);
- DCHECK(result);
- USE(result);
- }
-}
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- DCHECK(IsReserved());
- return UncommitRegion(address, size);
-}
-
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
- return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
- return false;
- }
- return true;
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- if (NULL == VirtualAlloc(address,
- OS::CommitPageSize(),
- MEM_COMMIT,
- PAGE_NOACCESS)) {
- return false;
- }
- return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return VirtualFree(base, size, MEM_DECOMMIT) != 0;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return VirtualFree(base, 0, MEM_RELEASE) != 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc
index 3910c50c4b..a1eb7e8928 100644
--- a/deps/v8/src/base/platform/platform-freebsd.cc
+++ b/deps/v8/src/base/platform/platform-freebsd.cc
@@ -40,91 +40,46 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+ void* mbase =
+ mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
return mbase;
}
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+ void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, kMmapFd,
+ kMmapFdOffset);
-static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
-}
-
+ if (result == MAP_FAILED) return NULL;
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- std::vector<SharedLibraryAddress> result;
- static const int MAP_LENGTH = 1024;
- int fd = open("/proc/self/maps", O_RDONLY);
- if (fd < 0) return result;
- while (true) {
- char addr_buffer[11];
- addr_buffer[0] = '0';
- addr_buffer[1] = 'x';
- addr_buffer[10] = 0;
- ssize_t bytes_read = read(fd, addr_buffer + 2, 8);
- if (bytes_read < 8) break;
- unsigned start = StringToLong(addr_buffer);
- bytes_read = read(fd, addr_buffer + 2, 1);
- if (bytes_read < 1) break;
- if (addr_buffer[2] != '-') break;
- bytes_read = read(fd, addr_buffer + 2, 8);
- if (bytes_read < 8) break;
- unsigned end = StringToLong(addr_buffer);
- char buffer[MAP_LENGTH];
- bytes_read = -1;
- do {
- bytes_read++;
- if (bytes_read >= MAP_LENGTH - 1)
- break;
- bytes_read = read(fd, buffer + bytes_read, 1);
- if (bytes_read < 1) break;
- } while (buffer[bytes_read] != '\n');
- buffer[bytes_read] = 0;
- // Ignore mappings that are not executable.
- if (buffer[3] != 'x') continue;
- char* start_of_path = index(buffer, '/');
- // There may be no filename in this line. Skip to next.
- if (start_of_path == NULL) continue;
- buffer[bytes_read] = 0;
- result.push_back(SharedLibraryAddress(start_of_path, start, end));
- }
- close(fd);
return result;
}
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
- : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
- : address_(NULL), size_(0) {
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+ size_t* allocated) {
hint = AlignedAddress(hint, alignment);
DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(hint, request_size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
+ void* result = ReserveRegion(request_size, hint);
+ if (result == nullptr) {
+ *allocated = 0;
+ return nullptr;
+ }
- uint8_t* base = static_cast<uint8_t*>(reservation);
+ uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
@@ -146,87 +101,88 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
DCHECK(aligned_size == request_size);
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- DCHECK(result);
- USE(result);
- }
-}
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
+ *allocated = aligned_size;
+ return static_cast<void*>(aligned_base);
}
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
- void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base,
- size,
- prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd,
+ if (MAP_FAILED == mmap(address, size, prot,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, kMmapFd,
kMmapFdOffset)) {
return false;
}
return true;
}
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
+ return mmap(address, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ kMmapFd, kMmapFdOffset) != MAP_FAILED;
}
-bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
- void* free_start, size_t free_size) {
- return munmap(free_start, free_size) == 0;
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
+ return munmap(address, size) == 0;
}
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return munmap(base, size) == 0;
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size) {
+ return munmap(address, size) == 0;
}
-
-bool VirtualMemory::HasLazyCommits() {
+// static
+bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
}
+static unsigned StringToLong(char* buffer) {
+ return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
+}
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddress> result;
+ static const int MAP_LENGTH = 1024;
+ int fd = open("/proc/self/maps", O_RDONLY);
+ if (fd < 0) return result;
+ while (true) {
+ char addr_buffer[11];
+ addr_buffer[0] = '0';
+ addr_buffer[1] = 'x';
+ addr_buffer[10] = 0;
+ ssize_t bytes_read = read(fd, addr_buffer + 2, 8);
+ if (bytes_read < 8) break;
+ unsigned start = StringToLong(addr_buffer);
+ bytes_read = read(fd, addr_buffer + 2, 1);
+ if (bytes_read < 1) break;
+ if (addr_buffer[2] != '-') break;
+ bytes_read = read(fd, addr_buffer + 2, 8);
+ if (bytes_read < 8) break;
+ unsigned end = StringToLong(addr_buffer);
+ char buffer[MAP_LENGTH];
+ bytes_read = -1;
+ do {
+ bytes_read++;
+ if (bytes_read >= MAP_LENGTH - 1) break;
+ bytes_read = read(fd, buffer + bytes_read, 1);
+ if (bytes_read < 1) break;
+ } while (buffer[bytes_read] != '\n');
+ buffer[bytes_read] = 0;
+ // Ignore mappings that are not executable.
+ if (buffer[3] != 'x') continue;
+ char* start_of_path = index(buffer, '/');
+ // There may be no filename in this line. Skip to next.
+ if (start_of_path == NULL) continue;
+ buffer[bytes_read] = 0;
+ result.push_back(SharedLibraryAddress(start_of_path, start, end));
+ }
+ close(fd);
+ return result;
+}
+
+void OS::SignalCodeMovingGC(void* hint) {}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index 51f3a110b5..16e6f1d2b0 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <magenta/process.h>
-#include <magenta/syscalls.h>
+#include <zircon/process.h>
+#include <zircon/syscalls.h>
#include "src/base/macros.h"
#include "src/base/platform/platform-posix-time.h"
@@ -17,45 +17,58 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
+// static
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
return nullptr;
}
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
- return std::vector<SharedLibraryAddress>();
+// static
+void OS::Guard(void* address, size_t size) {
+ CHECK_EQ(ZX_OK, zx_vmar_protect(zx_vmar_root_self(),
+ reinterpret_cast<uintptr_t>(address), size,
+ 0 /*no permissions*/));
}
-void OS::SignalCodeMovingGC() {
- CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+ zx_handle_t vmo;
+ if (zx_vmo_create(size, 0, &vmo) != ZX_OK) return nullptr;
+ uintptr_t result;
+ zx_status_t status = zx_vmar_map(zx_vmar_root_self(), 0, vmo, 0, size,
+ 0 /*no permissions*/, &result);
+ zx_handle_close(vmo);
+ if (status != ZX_OK) return nullptr;
+ return reinterpret_cast<void*>(result);
}
-VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
- : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
- : address_(nullptr), size_(0) {
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+ size_t* allocated) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
- mx_handle_t vmo;
- if (mx_vmo_create(request_size, 0, &vmo) != MX_OK) return;
+ zx_handle_t vmo;
+ if (zx_vmo_create(request_size, 0, &vmo) != ZX_OK) {
+ *allocated = 0;
+ return nullptr;
+ }
static const char kVirtualMemoryName[] = "v8-virtualmem";
- mx_object_set_property(vmo, MX_PROP_NAME, kVirtualMemoryName,
+ zx_object_set_property(vmo, ZX_PROP_NAME, kVirtualMemoryName,
strlen(kVirtualMemoryName));
uintptr_t reservation;
- mx_status_t status = mx_vmar_map(mx_vmar_root_self(), 0, vmo, 0, request_size,
+ zx_status_t status = zx_vmar_map(zx_vmar_root_self(), 0, vmo, 0, request_size,
0 /*no permissions*/, &reservation);
// Either the vmo is now referenced by the vmar, or we failed and are bailing,
// so close the vmo either way.
- mx_handle_close(vmo);
- if (status != MX_OK) return;
+ zx_handle_close(vmo);
+ if (status != ZX_OK) {
+ *allocated = 0;
+ return nullptr;
+ }
uint8_t* base = reinterpret_cast<uint8_t*>(reservation);
uint8_t* aligned_base = RoundUp(base, alignment);
@@ -64,7 +77,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
- mx_vmar_unmap(mx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
+ zx_vmar_unmap(zx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
prefix_size);
request_size -= prefix_size;
}
@@ -74,7 +87,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
- mx_vmar_unmap(mx_vmar_root_self(),
+ zx_vmar_unmap(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(aligned_base + aligned_size),
suffix_size);
request_size -= suffix_size;
@@ -82,82 +95,51 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
DCHECK(aligned_size == request_size);
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- DCHECK(result);
- USE(result);
- }
-}
-
-void VirtualMemory::Reset() {
- address_ = nullptr;
- size_ = 0;
-}
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- CHECK(InVM(address, size));
- return CommitRegion(address, size, is_executable);
-}
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
+ *allocated = aligned_size;
+ return static_cast<void*>(aligned_base);
}
-bool VirtualMemory::Guard(void* address) {
- return mx_vmar_protect(mx_vmar_root_self(),
- reinterpret_cast<uintptr_t>(address),
- OS::CommitPageSize(), 0 /*no permissions*/) == MX_OK;
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
+ uint32_t prot = ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |
+ (is_executable ? ZX_VM_FLAG_PERM_EXECUTE : 0);
+ return zx_vmar_protect(zx_vmar_root_self(),
+ reinterpret_cast<uintptr_t>(address), size,
+ prot) == ZX_OK;
}
// static
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
- mx_handle_t vmo;
- if (mx_vmo_create(size, 0, &vmo) != MX_OK) return nullptr;
- uintptr_t result;
- mx_status_t status = mx_vmar_map(mx_vmar_root_self(), 0, vmo, 0, size,
- 0 /*no permissions*/, &result);
- mx_handle_close(vmo);
- if (status != MX_OK) return nullptr;
- return reinterpret_cast<void*>(result);
+bool OS::UncommitRegion(void* address, size_t size) {
+ return zx_vmar_protect(zx_vmar_root_self(),
+ reinterpret_cast<uintptr_t>(address), size,
+ 0 /*no permissions*/) == ZX_OK;
}
// static
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- uint32_t prot = MX_VM_FLAG_PERM_READ | MX_VM_FLAG_PERM_WRITE |
- (is_executable ? MX_VM_FLAG_PERM_EXECUTE : 0);
- return mx_vmar_protect(mx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
- size, prot) == MX_OK;
+bool OS::ReleaseRegion(void* address, size_t size) {
+ return zx_vmar_unmap(zx_vmar_root_self(),
+ reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
}
// static
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mx_vmar_protect(mx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
- size, 0 /*no permissions*/) == MX_OK;
+bool OS::ReleasePartialRegion(void* address, size_t size) {
+ return zx_vmar_unmap(zx_vmar_root_self(),
+ reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
}
// static
-bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
- void* free_start, size_t free_size) {
- return mx_vmar_unmap(mx_vmar_root_self(),
- reinterpret_cast<uintptr_t>(free_start),
- free_size) == MX_OK;
+bool OS::HasLazyCommits() {
+ // TODO(scottmg): Port, https://crbug.com/731217.
+ return false;
}
-// static
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return mx_vmar_unmap(mx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
- size) == MX_OK;
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
+ return std::vector<SharedLibraryAddress>();
}
-// static
-bool VirtualMemory::HasLazyCommits() {
- // TODO(scottmg): Port, https://crbug.com/731217.
- return false;
+void OS::SignalCodeMovingGC(void* hint) {
+ CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
}
} // namespace base
diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc
index 24e3ef28a7..2299a2c3e3 100644
--- a/deps/v8/src/base/platform/platform-linux.cc
+++ b/deps/v8/src/base/platform/platform-linux.cc
@@ -35,10 +35,6 @@
#include <asm/sigcontext.h> // NOLINT
#endif
-#if defined(LEAK_SANITIZER)
-#include <sanitizer/lsan_interface.h>
-#endif
-
#include <cmath>
#undef MAP_TYPE
@@ -97,16 +93,102 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (mbase == MAP_FAILED) return NULL;
+ void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
+ kMmapFdOffset);
+ if (mbase == MAP_FAILED) return nullptr;
*allocated = msize;
return mbase;
}
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+ void* result =
+ mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
+
+ if (result == MAP_FAILED) return nullptr;
+ return result;
+}
+
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+ size_t* allocated) {
+ DCHECK((alignment % OS::AllocateAlignment()) == 0);
+ hint = AlignedAddress(hint, alignment);
+ size_t request_size =
+ RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* result = ReserveRegion(request_size, hint);
+ if (result == nullptr) {
+ *allocated = 0;
+ return nullptr;
+ }
+
+ uint8_t* base = static_cast<uint8_t*>(result);
+ uint8_t* aligned_base = RoundUp(base, alignment);
+ DCHECK_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ DCHECK_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ DCHECK(aligned_size == request_size);
+
+ *allocated = aligned_size;
+ return static_cast<void*>(aligned_base);
+}
+
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(address, size, prot,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
+ kMmapFdOffset)) {
+ return false;
+ }
+
+ return true;
+}
+
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
+ return mmap(address, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
+ return munmap(address, size) == 0;
+}
+
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size) {
+ return munmap(address, size) == 0;
+}
+
+// static
+bool OS::HasLazyCommits() { return true; }
+
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
// This function assumes that the layout of the file is as follows:
@@ -168,7 +250,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-void OS::SignalCodeMovingGC() {
+void OS::SignalCodeMovingGC(void* hint) {
// Support for ll_prof.py.
//
// The Linux profiler built into the kernel logs all mmap's with
@@ -183,138 +265,12 @@ void OS::SignalCodeMovingGC() {
OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
OS::Abort();
}
- void* addr = mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_EXEC,
- MAP_PRIVATE, fileno(f), 0);
+ void* addr =
+ mmap(hint, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0);
DCHECK_NE(MAP_FAILED, addr);
OS::Free(addr, size);
fclose(f);
}
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) {}
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
- : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
- : address_(NULL), size_(0) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size =
- RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation =
- mmap(hint, request_size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- uint8_t* base = static_cast<uint8_t*>(reservation);
- uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- DCHECK(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-#if defined(LEAK_SANITIZER)
- __lsan_register_root_region(address_, size_);
-#endif
-}
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- DCHECK(result);
- USE(result);
- }
-}
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- CHECK(InVM(address, size));
- return CommitRegion(address, size, is_executable);
-}
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- CHECK(InVM(address, size));
- return UncommitRegion(address, size);
-}
-
-bool VirtualMemory::Guard(void* address) {
- CHECK(InVM(address, OS::CommitPageSize()));
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
- void* result =
- mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
-#if defined(LEAK_SANITIZER)
- __lsan_register_root_region(result, size);
-#endif
- return result;
-}
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base, size, prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
-
- return true;
-}
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
- void* free_start, size_t free_size) {
-#if defined(LEAK_SANITIZER)
- __lsan_unregister_root_region(base, size);
- __lsan_register_root_region(base, size - free_size);
-#endif
- return munmap(free_start, free_size) == 0;
-}
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-#if defined(LEAK_SANITIZER)
- __lsan_unregister_root_region(base, size);
-#endif
- return munmap(base, size) == 0;
-}
-
-bool VirtualMemory::HasLazyCommits() { return true; }
-
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc
index 076351eaed..3c19962186 100644
--- a/deps/v8/src/base/platform/platform-macos.cc
+++ b/deps/v8/src/base/platform/platform-macos.cc
@@ -51,6 +51,7 @@ namespace base {
static const int kMmapFd = VM_MAKE_TAG(255);
static const off_t kMmapFdOffset = 0;
+// static
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, getpagesize());
@@ -62,58 +63,31 @@ void* OS::Allocate(const size_t requested, size_t* allocated,
return mbase;
}
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+ void* result =
+ mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- std::vector<SharedLibraryAddress> result;
- unsigned int images_count = _dyld_image_count();
- for (unsigned int i = 0; i < images_count; ++i) {
- const mach_header* header = _dyld_get_image_header(i);
- if (header == NULL) continue;
-#if V8_HOST_ARCH_X64
- uint64_t size;
- char* code_ptr = getsectdatafromheader_64(
- reinterpret_cast<const mach_header_64*>(header),
- SEG_TEXT,
- SECT_TEXT,
- &size);
-#else
- unsigned int size;
- char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
-#endif
- if (code_ptr == NULL) continue;
- const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
- const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
- result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start,
- start + size, slide));
- }
- return result;
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
+ if (result == MAP_FAILED) return nullptr;
-TimezoneCache* OS::CreateTimezoneCache() {
- return new PosixDefaultTimezoneCache();
+ return result;
}
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
- : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
- : address_(NULL), size_(0) {
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+ size_t* allocated) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
hint = AlignedAddress(hint, alignment);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation =
- mmap(hint, request_size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
+ void* result = ReserveRegion(request_size, hint);
+ if (result == nullptr) {
+ *allocated = 0;
+ return nullptr;
+ }
- uint8_t* base = static_cast<uint8_t*>(reservation);
+ uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
@@ -135,54 +109,12 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
DCHECK(aligned_size == request_size);
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
+ *allocated = aligned_size;
+ return static_cast<void*>(aligned_base);
}
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- DCHECK(result);
- USE(result);
- }
-}
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
- void* result =
- mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* address,
- size_t size,
- bool is_executable) {
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address,
size,
@@ -195,8 +127,8 @@ bool VirtualMemory::CommitRegion(void* address,
return true;
}
-
-bool VirtualMemory::UncommitRegion(void* address, size_t size) {
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
return mmap(address,
size,
PROT_NONE,
@@ -205,16 +137,48 @@ bool VirtualMemory::UncommitRegion(void* address, size_t size) {
kMmapFdOffset) != MAP_FAILED;
}
-bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
- void* free_start, size_t free_size) {
- return munmap(free_start, free_size) == 0;
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
+ return munmap(address, size) == 0;
}
-bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
-bool VirtualMemory::HasLazyCommits() { return true; }
+// static
+bool OS::HasLazyCommits() { return true; }
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddress> result;
+ unsigned int images_count = _dyld_image_count();
+ for (unsigned int i = 0; i < images_count; ++i) {
+ const mach_header* header = _dyld_get_image_header(i);
+ if (header == NULL) continue;
+#if V8_HOST_ARCH_X64
+ uint64_t size;
+ char* code_ptr = getsectdatafromheader_64(
+ reinterpret_cast<const mach_header_64*>(header), SEG_TEXT, SECT_TEXT,
+ &size);
+#else
+ unsigned int size;
+ char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
+#endif
+ if (code_ptr == NULL) continue;
+ const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
+ const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
+ result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start,
+ start + size, slide));
+ }
+ return result;
+}
+
+void OS::SignalCodeMovingGC(void* hint) {}
+
+TimezoneCache* OS::CreateTimezoneCache() {
+ return new PosixDefaultTimezoneCache();
+}
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-openbsd.cc b/deps/v8/src/base/platform/platform-openbsd.cc
index e7f4406180..910d4a8104 100644
--- a/deps/v8/src/base/platform/platform-openbsd.cc
+++ b/deps/v8/src/base/platform/platform-openbsd.cc
@@ -38,16 +38,104 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+ void* mbase =
+ mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
return mbase;
}
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+ void* result =
+ mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+ size_t* allocated) {
+ DCHECK((alignment % OS::AllocateAlignment()) == 0);
+ hint = AlignedAddress(hint, alignment);
+ size_t request_size =
+ RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* result = ReserveRegion(request_size, hint);
+ if (result == nullptr) {
+ *allocated = 0;
+ return nullptr;
+ }
+
+ uint8_t* base = static_cast<uint8_t*>(result);
+ uint8_t* aligned_base = RoundUp(base, alignment);
+ DCHECK_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ DCHECK_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ DCHECK(aligned_size == request_size);
+
+ *allocated = aligned_size;
+ return static_cast<void*>(aligned_base);
+}
+
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(address, size, prot,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, kMmapFd,
+ kMmapFdOffset)) {
+ return false;
+ }
+ return true;
+}
+
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
+ return mmap(address, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
+ return munmap(address, size) == 0;
+}
+
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size) {
+ return munmap(address, size) == 0;
+}
+
+// static
+bool OS::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
@@ -109,8 +197,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-
-void OS::SignalCodeMovingGC() {
+void OS::SignalCodeMovingGC(void* hint) {
// Support for ll_prof.py.
//
// The Linux profiler built into the kernel logs all mmap's with
@@ -125,140 +212,12 @@ void OS::SignalCodeMovingGC() {
OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
OS::Abort();
}
- void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
- fileno(f), 0);
- DCHECK(addr != MAP_FAILED);
+ void* addr =
+ mmap(hint, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0);
+ DCHECK_NE(MAP_FAILED, addr);
OS::Free(addr, size);
fclose(f);
}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
- : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
- : address_(NULL), size_(0) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation =
- mmap(hint, request_size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- uint8_t* base = static_cast<uint8_t*>(reservation);
- uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- DCHECK(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- DCHECK(result);
- USE(result);
- }
-}
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
- void* result =
- mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base,
- size,
- prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
- return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
- void* free_start, size_t free_size) {
- return munmap(free_start, free_size) == 0;
-}
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 450c89b005..8f658b95cb 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -129,6 +129,7 @@ void OS::ProtectCode(void* address, const size_t size) {
// Create guard pages.
+#if !V8_OS_FUCHSIA
void OS::Guard(void* address, const size_t size) {
#if V8_OS_CYGWIN
DWORD oldprotect;
@@ -137,6 +138,7 @@ void OS::Guard(void* address, const size_t size) {
mprotect(address, size, PROT_NONE);
#endif
}
+#endif // !V8_OS_FUCHSIA
// Make a region of memory readable and writable.
void OS::Unprotect(void* address, const size_t size) {
@@ -148,15 +150,7 @@ void OS::Unprotect(void* address, const size_t size) {
#endif
}
-static LazyInstance<RandomNumberGenerator>::type
- platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
-
-
-void OS::Initialize(int64_t random_seed, bool hard_abort,
- const char* const gc_fake_mmap) {
- if (random_seed) {
- platform_random_number_generator.Pointer()->SetSeed(random_seed);
- }
+void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
g_hard_abort = hard_abort;
g_gc_fake_mmap = gc_fake_mmap;
}
@@ -167,72 +161,6 @@ const char* OS::GetGCFakeMMapFile() {
}
-void* OS::GetRandomMmapAddr() {
-#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
- defined(THREAD_SANITIZER)
- // Dynamic tools do not support custom mmap addresses.
- return NULL;
-#endif
- uintptr_t raw_addr;
- platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
- sizeof(raw_addr));
-#if V8_TARGET_ARCH_X64
- // Currently available CPUs have 48 bits of virtual addressing. Truncate
- // the hint address to 46 bits to give the kernel a fighting chance of
- // fulfilling our placement request.
- raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#elif V8_TARGET_ARCH_PPC64
-#if V8_OS_AIX
- // AIX: 64 bits of virtual addressing, but we limit address range to:
- // a) minimize Segment Lookaside Buffer (SLB) misses and
- raw_addr &= V8_UINT64_C(0x3ffff000);
- // Use extra address space to isolate the mmap regions.
- raw_addr += V8_UINT64_C(0x400000000000);
-#elif V8_TARGET_BIG_ENDIAN
- // Big-endian Linux: 44 bits of virtual addressing.
- raw_addr &= V8_UINT64_C(0x03fffffff000);
-#else
- // Little-endian Linux: 48 bits of virtual addressing.
- raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#endif
-#elif V8_TARGET_ARCH_S390X
- // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
- // of virtual addressing. Truncate to 40 bits to allow kernel chance to
- // fulfill request.
- raw_addr &= V8_UINT64_C(0xfffffff000);
-#elif V8_TARGET_ARCH_S390
- // 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
- // to fulfill request.
- raw_addr &= 0x1ffff000;
-#else
- raw_addr &= 0x3ffff000;
-
-# ifdef __sun
- // For our Solaris/illumos mmap hint, we pick a random address in the bottom
- // half of the top half of the address space (that is, the third quarter).
- // Because we do not MAP_FIXED, this will be treated only as a hint -- the
- // system will not fail to mmap() because something else happens to already
- // be mapped at our random address. We deliberately set the hint high enough
- // to get well above the system's break (that is, the heap); Solaris and
- // illumos will try the hint and if that fails allocate as if there were
- // no hint at all. The high hint prevents the break from getting hemmed in
- // at low values, ceding half of the address space to the system heap.
- raw_addr += 0x80000000;
-#elif V8_OS_AIX
- // The range 0x30000000 - 0xD0000000 is available on AIX;
- // choose the upper range.
- raw_addr += 0x90000000;
-# else
- // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
- // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
- // 10.6 and 10.7.
- raw_addr += 0x20000000;
-# endif
-#endif
- return reinterpret_cast<void*>(raw_addr);
-}
-
-
size_t OS::AllocateAlignment() {
return static_cast<size_t>(sysconf(_SC_PAGESIZE));
}
@@ -292,14 +220,13 @@ class PosixMemoryMappedFile final : public OS::MemoryMappedFile {
// static
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name, void* hint) {
if (FILE* file = fopen(name, "r+")) {
if (fseek(file, 0, SEEK_END) == 0) {
long size = ftell(file); // NOLINT(runtime/int)
if (size >= 0) {
- void* const memory =
- mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_WRITE,
- MAP_SHARED, fileno(file), 0);
+ void* const memory = mmap(hint, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fileno(file), 0);
if (memory != MAP_FAILED) {
return new PosixMemoryMappedFile(file, memory, size);
}
@@ -312,13 +239,13 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
// static
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, void* hint,
size_t size, void* initial) {
if (FILE* file = fopen(name, "w+")) {
size_t result = fwrite(initial, 1, size, file);
if (result == size && !ferror(file)) {
- void* memory = mmap(OS::GetRandomMmapAddr(), result,
- PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ void* memory = mmap(hint, result, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fileno(file), 0);
if (memory != MAP_FAILED) {
return new PosixMemoryMappedFile(file, memory, result);
}
diff --git a/deps/v8/src/base/platform/platform-qnx.cc b/deps/v8/src/base/platform/platform-qnx.cc
index 2e577d0b3f..68bc0efbf9 100644
--- a/deps/v8/src/base/platform/platform-qnx.cc
+++ b/deps/v8/src/base/platform/platform-qnx.cc
@@ -89,99 +89,46 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
+ kMmapFdOffset);
if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
return mbase;
}
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+ void* result =
+ mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
+ kMmapFd, kMmapFdOffset);
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- std::vector<SharedLibraryAddress> result;
- procfs_mapinfo *mapinfos = NULL, *mapinfo;
- int proc_fd, num, i;
-
- struct {
- procfs_debuginfo info;
- char buff[PATH_MAX];
- } map;
-
- char buf[PATH_MAX + 1];
- snprintf(buf, PATH_MAX + 1, "/proc/%d/as", getpid());
-
- if ((proc_fd = open(buf, O_RDONLY)) == -1) {
- close(proc_fd);
- return result;
- }
-
- /* Get the number of map entries. */
- if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
- close(proc_fd);
- return result;
- }
-
- mapinfos = reinterpret_cast<procfs_mapinfo *>(
- malloc(num * sizeof(procfs_mapinfo)));
- if (mapinfos == NULL) {
- close(proc_fd);
- return result;
- }
-
- /* Fill the map entries. */
- if (devctl(proc_fd, DCMD_PROC_PAGEDATA,
- mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) {
- free(mapinfos);
- close(proc_fd);
- return result;
- }
+ if (result == MAP_FAILED) return NULL;
- for (i = 0; i < num; i++) {
- mapinfo = mapinfos + i;
- if (mapinfo->flags & MAP_ELF) {
- map.info.vaddr = mapinfo->vaddr;
- if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK) {
- continue;
- }
- result.push_back(SharedLibraryAddress(
- map.info.path, mapinfo->vaddr, mapinfo->vaddr + mapinfo->size));
- }
- }
- free(mapinfos);
- close(proc_fd);
return result;
}
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
- : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
- : address_(NULL), size_(0) {
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+ size_t* allocated) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
hint = AlignedAddress(hint, alignment);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation =
- mmap(hint, request_size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY, kMmapFd, kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
+ void* result = ReserveRegion(request_size, hint);
+ if (result == nullptr) {
+ *allocated = 0;
+ return nullptr;
+ }
- uint8_t* base = static_cast<uint8_t*>(reservation);
+ uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
@@ -203,84 +150,98 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
DCHECK(aligned_size == request_size);
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
+ *allocated = aligned_size;
+ return static_cast<void*>(aligned_base);
}
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- DCHECK(result);
- USE(result);
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(address, size, prot,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
+ kMmapFdOffset)) {
+ return false;
}
-}
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
+ return true;
}
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
+ return mmap(address, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY, kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
}
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
+ return munmap(address, size) == 0;
}
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size) {
+ return munmap(address, size) == 0;
}
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
- void* result =
- mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
- kMmapFd, kMmapFdOffset);
+// static
+bool OS::HasLazyCommits() { return false; }
- if (result == MAP_FAILED) return NULL;
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddress> result;
+ procfs_mapinfo *mapinfos = NULL, *mapinfo;
+ int proc_fd, num, i;
- return result;
-}
+ struct {
+ procfs_debuginfo info;
+ char buff[PATH_MAX];
+ } map;
+ char buf[PATH_MAX + 1];
+ snprintf(buf, PATH_MAX + 1, "/proc/%d/as", getpid());
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base,
- size,
- prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
- return false;
+ if ((proc_fd = open(buf, O_RDONLY)) == -1) {
+ close(proc_fd);
+ return result;
}
- return true;
-}
-
+ /* Get the number of map entries. */
+ if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
+ close(proc_fd);
+ return result;
+ }
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
+ mapinfos =
+ reinterpret_cast<procfs_mapinfo*>(malloc(num * sizeof(procfs_mapinfo)));
+ if (mapinfos == NULL) {
+ close(proc_fd);
+ return result;
+ }
+ /* Fill the map entries. */
+ if (devctl(proc_fd, DCMD_PROC_PAGEDATA, mapinfos,
+ num * sizeof(procfs_mapinfo), &num) != EOK) {
+ free(mapinfos);
+ close(proc_fd);
+ return result;
+ }
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return munmap(base, size) == 0;
+ for (i = 0; i < num; i++) {
+ mapinfo = mapinfos + i;
+ if (mapinfo->flags & MAP_ELF) {
+ map.info.vaddr = mapinfo->vaddr;
+ if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK) {
+ continue;
+ }
+ result.push_back(SharedLibraryAddress(map.info.path, mapinfo->vaddr,
+ mapinfo->vaddr + mapinfo->size));
+ }
+ }
+ free(mapinfos);
+ close(proc_fd);
+ return result;
}
-
-bool VirtualMemory::HasLazyCommits() {
- return false;
-}
+void OS::SignalCodeMovingGC(void* hint) {}
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-solaris.cc b/deps/v8/src/base/platform/platform-solaris.cc
index 4b80b7823f..2ea6ef4a6c 100644
--- a/deps/v8/src/base/platform/platform-solaris.cc
+++ b/deps/v8/src/base/platform/platform-solaris.cc
@@ -58,49 +58,47 @@ double SolarisTimezoneCache::LocalTimeOffset() {
TimezoneCache* OS::CreateTimezoneCache() { return new SolarisTimezoneCache(); }
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+ void* mbase =
+ mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
return mbase;
}
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+ void* result =
+ mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- return std::vector<SharedLibraryAddress>();
-}
-
+ if (result == MAP_FAILED) return NULL;
-void OS::SignalCodeMovingGC() {
+ return result;
}
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
- : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
- : address_(NULL), size_(0) {
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+ size_t* allocated) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
hint = AlignedAddress(hint, alignment);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation =
- mmap(hint, request_size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
+ void* result = ReserveRegion(request_size, hint);
+ if (result == nullptr) {
+ *allocated = 0;
+ return nullptr;
+ }
- uint8_t* base = static_cast<uint8_t*>(reservation);
+ uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
@@ -122,88 +120,49 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
DCHECK(aligned_size == request_size);
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- DCHECK(result);
- USE(result);
- }
-}
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
+ *allocated = aligned_size;
+ return static_cast<void*>(aligned_base);
}
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
- void* result =
- mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base,
- size,
- prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- kMmapFd,
+ if (MAP_FAILED == mmap(address, size, prot,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
kMmapFdOffset)) {
return false;
}
return true;
}
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
+ return mmap(address, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
-bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
- void* free_start, size_t free_size) {
- return munmap(free_start, free_size) == 0;
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
+ return munmap(address, size) == 0;
}
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return munmap(base, size) == 0;
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size) {
+ return munmap(address, size) == 0;
}
-
-bool VirtualMemory::HasLazyCommits() {
+// static
+bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
}
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ return std::vector<SharedLibraryAddress>();
+}
+
+void OS::SignalCodeMovingGC(void* hint) {}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 7ce0a0d552..de1a27506f 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -20,12 +20,10 @@
#include "src/base/win32-headers.h"
#include "src/base/bits.h"
-#include "src/base/lazy-instance.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/base/timezone-cache.h"
-#include "src/base/utils/random-number-generator.h"
// Extra functions for MinGW. Most of these are the _s functions which are in
// the Microsoft Visual Studio C++ CRT.
@@ -701,41 +699,11 @@ size_t OS::AllocateAlignment() {
return allocate_alignment;
}
-
-static LazyInstance<RandomNumberGenerator>::type
- platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
-
-
-void OS::Initialize(int64_t random_seed, bool hard_abort,
- const char* const gc_fake_mmap) {
- if (random_seed) {
- platform_random_number_generator.Pointer()->SetSeed(random_seed);
- }
+void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
g_hard_abort = hard_abort;
}
-
-void* OS::GetRandomMmapAddr() {
- // The address range used to randomize RWX allocations in OS::Allocate
- // Try not to map pages into the default range that windows loads DLLs
- // Use a multiple of 64k to prevent committing unused memory.
- // Note: This does not guarantee RWX regions will be within the
- // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
-#ifdef V8_HOST_ARCH_64_BIT
- static const uintptr_t kAllocationRandomAddressMin = 0x0000000080000000;
- static const uintptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
-#else
- static const uintptr_t kAllocationRandomAddressMin = 0x04000000;
- static const uintptr_t kAllocationRandomAddressMax = 0x3FFF0000;
-#endif
- uintptr_t address;
- platform_random_number_generator.Pointer()->NextBytes(&address,
- sizeof(address));
- address <<= kPageSizeBits;
- address += kAllocationRandomAddressMin;
- address &= kAllocationRandomAddressMax;
- return reinterpret_cast<void *>(address);
-}
+namespace {
static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
void* hint) {
@@ -762,6 +730,8 @@ static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
return base;
}
+} // namespace
+
void* OS::Allocate(const size_t requested, size_t* allocated,
bool is_executable, void* hint) {
return OS::Allocate(requested, allocated,
@@ -809,18 +779,15 @@ void OS::Free(void* address, const size_t size) {
USE(size);
}
-
intptr_t OS::CommitPageSize() {
return 4096;
}
-
void OS::ProtectCode(void* address, const size_t size) {
DWORD old_protect;
VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
}
-
void OS::Guard(void* address, const size_t size) {
DWORD oldprotect;
VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
@@ -828,10 +795,77 @@ void OS::Guard(void* address, const size_t size) {
void OS::Unprotect(void* address, const size_t size) {
LPVOID result = VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE);
- DCHECK_IMPLIES(result != nullptr, GetLastError() == 0);
USE(result);
}
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+ return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
+}
+
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+ size_t* allocated) {
+ DCHECK((alignment % OS::AllocateAlignment()) == 0);
+ hint = AlignedAddress(hint, alignment);
+ size_t request_size =
+ RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* address = ReserveRegion(request_size, hint);
+ if (address == nullptr) {
+ *allocated = 0;
+ return nullptr;
+ }
+ uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
+ // Try reducing the size by freeing and then reallocating a specific area.
+ bool result = ReleaseRegion(address, request_size);
+ USE(result);
+ DCHECK(result);
+ address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+ if (address != nullptr) {
+ request_size = size;
+ DCHECK(base == static_cast<uint8_t*>(address));
+ } else {
+ // Resizing failed, just go with a bigger area.
+ address = ReserveRegion(request_size, hint);
+ if (address == nullptr) {
+ *allocated = 0;
+ return nullptr;
+ }
+ }
+
+ *allocated = request_size;
+ return static_cast<void*>(address);
+}
+
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
+ int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+ if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
+ return false;
+ }
+ return true;
+}
+
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
+ return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+}
+
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
+ return VirtualFree(address, 0, MEM_RELEASE) != 0;
+}
+
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size) {
+ return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+}
+
+// static
+bool OS::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
void OS::Sleep(TimeDelta interval) {
::Sleep(static_cast<DWORD>(interval.InMilliseconds()));
}
@@ -882,7 +916,7 @@ class Win32MemoryMappedFile final : public OS::MemoryMappedFile {
// static
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name, void* hint) {
// Open a physical file
HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
@@ -890,7 +924,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
DWORD size = GetFileSize(file, NULL);
- // Create a file mapping for the physical file
+ // Create a file mapping for the physical file. Ignore hint on Windows.
HANDLE file_mapping =
CreateFileMapping(file, NULL, PAGE_READWRITE, 0, size, NULL);
if (file_mapping == NULL) return NULL;
@@ -902,14 +936,14 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
// static
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, void* hint,
size_t size, void* initial) {
// Open a physical file
HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE, NULL,
OPEN_ALWAYS, 0, NULL);
if (file == NULL) return NULL;
- // Create a file mapping for the physical file
+ // Create a file mapping for the physical file. Ignore hint on Windows.
HANDLE file_mapping = CreateFileMapping(file, NULL, PAGE_READWRITE, 0,
static_cast<DWORD>(size), NULL);
if (file_mapping == NULL) return NULL;
@@ -1178,20 +1212,13 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return LoadSymbols(process_handle);
}
-
-void OS::SignalCodeMovingGC() {
-}
-
-
#else // __MINGW32__
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return std::vector<OS::SharedLibraryAddress>();
}
-
-
-void OS::SignalCodeMovingGC() { }
#endif // __MINGW32__
+void OS::SignalCodeMovingGC(void* hint) {}
int OS::ActivationFrameAlignment() {
#ifdef _WIN64
@@ -1205,108 +1232,6 @@ int OS::ActivationFrameAlignment() {
#endif
}
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
- : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
- : address_(NULL), size_(0) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* address = ReserveRegion(request_size, hint);
- if (address == NULL) return;
- uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
- // Try reducing the size by freeing and then reallocating a specific area.
- bool result = ReleaseRegion(address, request_size);
- USE(result);
- DCHECK(result);
- address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
- if (address != NULL) {
- request_size = size;
- DCHECK(base == static_cast<uint8_t*>(address));
- } else {
- // Resizing failed, just go with a bigger area.
- address = ReserveRegion(request_size, hint);
- if (address == NULL) return;
- }
- address_ = address;
- size_ = request_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- DCHECK(result);
- USE(result);
- }
-}
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- DCHECK(IsReserved());
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- if (NULL == VirtualAlloc(address,
- OS::CommitPageSize(),
- MEM_COMMIT,
- PAGE_NOACCESS)) {
- return false;
- }
- return true;
-}
-
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
- return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
- return false;
- }
- return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return VirtualFree(base, size, MEM_DECOMMIT) != 0;
-}
-
-bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
- void* free_start, size_t free_size) {
- return VirtualFree(free_start, free_size, MEM_DECOMMIT) != 0;
-}
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return VirtualFree(base, 0, MEM_RELEASE) != 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-
// ----------------------------------------------------------------------------
// Win32 thread support.
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 7737001d5a..0ff8599b0c 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -107,12 +107,9 @@ class TimezoneCache;
class V8_BASE_EXPORT OS {
public:
// Initialize the OS class.
- // - random_seed: Used for the GetRandomMmapAddress() if non-zero.
// - hard_abort: If true, OS::Abort() will crash instead of aborting.
// - gc_fake_mmap: Name of the file for fake gc mmap used in ll_prof.
- static void Initialize(int64_t random_seed,
- bool hard_abort,
- const char* const gc_fake_mmap);
+ static void Initialize(bool hard_abort, const char* const gc_fake_mmap);
// Returns the accumulated user time for thread. This routine
// can be used for profiling. The implementation should
@@ -191,12 +188,25 @@ class V8_BASE_EXPORT OS {
// Make a region of memory readable and writable.
static void Unprotect(void* address, const size_t size);
- // Generate a random address to be used for hinting mmap().
- static void* GetRandomMmapAddr();
-
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
+ static void* ReserveRegion(size_t size, void* hint);
+
+ static void* ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+ size_t* allocated);
+
+ static bool CommitRegion(void* address, size_t size, bool is_executable);
+
+ static bool UncommitRegion(void* address, size_t size);
+
+ static bool ReleaseRegion(void* address, size_t size);
+
+ // Release part of a reserved address range.
+ static bool ReleasePartialRegion(void* address, size_t size);
+
+ static bool HasLazyCommits();
+
// Sleep for a specified time interval.
static void Sleep(TimeDelta interval);
@@ -221,8 +231,8 @@ class V8_BASE_EXPORT OS {
virtual void* memory() const = 0;
virtual size_t size() const = 0;
- static MemoryMappedFile* open(const char* name);
- static MemoryMappedFile* create(const char* name, size_t size,
+ static MemoryMappedFile* open(const char* name, void* hint);
+ static MemoryMappedFile* create(const char* name, void* hint, size_t size,
void* initial);
};
@@ -261,7 +271,7 @@ class V8_BASE_EXPORT OS {
// process that a code moving garbage collection starts. Can do
// nothing, in which case the code objects must not move (e.g., by
// using --never-compact) if accurate profiling is desired.
- static void SignalCodeMovingGC();
+ static void SignalCodeMovingGC(void* hint);
// Support runtime detection of whether the hard float option of the
// EABI is used.
@@ -285,141 +295,6 @@ class V8_BASE_EXPORT OS {
DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
};
-// Represents and controls an area of reserved memory.
-// Control of the reserved memory can be assigned to another VirtualMemory
-// object by calling TakeControl. This removes the reserved memory from the
-// 'from' instance.
-class V8_BASE_EXPORT VirtualMemory {
- public:
- // Empty VirtualMemory object, controlling no reserved memory.
- VirtualMemory();
-
- // Reserves virtual memory with size.
- explicit VirtualMemory(size_t size, void* hint);
-
- // Reserves virtual memory containing an area of the given size that
- // is aligned per alignment. This may not be at the position returned
- // by address().
- VirtualMemory(size_t size, size_t alignment, void* hint);
-
- // Construct a virtual memory by assigning it some already mapped address
- // and size.
- VirtualMemory(void* address, size_t size) : address_(address), size_(size) {}
-
- // Releases the reserved memory, if any, controlled by this VirtualMemory
- // object.
- ~VirtualMemory();
-
- // Returns whether the memory has been reserved.
- bool IsReserved() const { return address_ != nullptr; }
-
- // Initialize or resets an embedded VirtualMemory object.
- void Reset();
-
- // Returns the start address of the reserved memory.
- // If the memory was reserved with an alignment, this address is not
- // necessarily aligned. The user might need to round it up to a multiple of
- // the alignment to get the start of the aligned block.
- void* address() const {
- DCHECK(IsReserved());
- return address_;
- }
-
- void* end() const {
- DCHECK(IsReserved());
- return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address_) +
- size_);
- }
-
- // Returns the size of the reserved memory. The returned value is only
- // meaningful when IsReserved() returns true.
- // If the memory was reserved with an alignment, this size may be larger
- // than the requested size.
- size_t size() const { return size_; }
-
- // Commits real memory. Returns whether the operation succeeded.
- bool Commit(void* address, size_t size, bool is_executable);
-
- // Uncommit real memory. Returns whether the operation succeeded.
- bool Uncommit(void* address, size_t size);
-
- // Creates a single guard page at the given address.
- bool Guard(void* address);
-
- // Releases the memory after |free_start|. Returns the bytes released.
- size_t ReleasePartial(void* free_start) {
- DCHECK(IsReserved());
- // Notice: Order is important here. The VirtualMemory object might live
- // inside the allocated region.
- const size_t size = size_ - (reinterpret_cast<size_t>(free_start) -
- reinterpret_cast<size_t>(address_));
- CHECK(InVM(free_start, size));
- DCHECK_LT(address_, free_start);
- DCHECK_LT(free_start, reinterpret_cast<void*>(
- reinterpret_cast<size_t>(address_) + size_));
- const bool result = ReleasePartialRegion(address_, size_, free_start, size);
- USE(result);
- DCHECK(result);
- size_ -= size;
- return size;
- }
-
- void Release() {
- DCHECK(IsReserved());
- // Notice: Order is important here. The VirtualMemory object might live
- // inside the allocated region.
- void* address = address_;
- size_t size = size_;
- CHECK(InVM(address, size));
- Reset();
- bool result = ReleaseRegion(address, size);
- USE(result);
- DCHECK(result);
- }
-
- // Assign control of the reserved region to a different VirtualMemory object.
- // The old object is no longer functional (IsReserved() returns false).
- void TakeControl(VirtualMemory* from) {
- DCHECK(!IsReserved());
- address_ = from->address_;
- size_ = from->size_;
- from->Reset();
- }
-
- static void* ReserveRegion(size_t size, void* hint);
-
- static bool CommitRegion(void* base, size_t size, bool is_executable);
-
- static bool UncommitRegion(void* base, size_t size);
-
- // Must be called with a base pointer that has been returned by ReserveRegion
- // and the same size it was reserved with.
- static bool ReleaseRegion(void* base, size_t size);
-
- // Must be called with a base pointer that has been returned by ReserveRegion
- // and the same size it was reserved with.
- // [free_start, free_start + free_size] is the memory that will be released.
- static bool ReleasePartialRegion(void* base, size_t size, void* free_start,
- size_t free_size);
-
- // Returns true if OS performs lazy commits, i.e. the memory allocation call
- // defers actual physical memory allocation till the first memory access.
- // Otherwise returns false.
- static bool HasLazyCommits();
-
- private:
- bool InVM(void* address, size_t size) {
- return (reinterpret_cast<uintptr_t>(address_) <=
- reinterpret_cast<uintptr_t>(address)) &&
- ((reinterpret_cast<uintptr_t>(address_) + size_) >=
- (reinterpret_cast<uintptr_t>(address) + size));
- }
-
- void* address_; // Start address of the virtual memory.
- size_t size_; // Size of the virtual memory.
-};
-
-
// ----------------------------------------------------------------------------
// Thread
//
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index 09e3fd02dd..6695bf8e57 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -621,6 +621,8 @@ TimeTicks TimeTicks::HighResolutionNow() {
ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
#elif V8_OS_POSIX
ticks = ClockNow(CLOCK_MONOTONIC);
+#else
+#error platform does not implement TimeTicks::HighResolutionNow.
#endif // V8_OS_MACOSX
// Make sure we never return 0 here.
return TimeTicks(ticks + 1);
diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h
index fbf9265d28..a7bb290929 100644
--- a/deps/v8/src/base/template-utils.h
+++ b/deps/v8/src/base/template-utils.h
@@ -78,6 +78,21 @@ struct pass_value_or_ref {
decay_t, const decay_t&>::type;
};
+template <typename T>
+struct has_output_operator {
+ // This template is only instantiable if U provides operator<< with ostream.
+ // Its return type is uint8_t.
+ template <typename U>
+ static auto __check_operator(U u)
+ -> decltype(*(std::ostream*)nullptr << *u, uint8_t{0});
+ // This is a fallback implementation, returning uint16_t. If the template
+ // above is instantiable, is has precedence over this varargs function.
+ static uint16_t __check_operator(...);
+
+ using ptr_t = typename std::add_pointer<T>::type;
+ static constexpr bool value = sizeof(__check_operator(ptr_t{nullptr})) == 1;
+};
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/tsan.h b/deps/v8/src/base/tsan.h
new file mode 100644
index 0000000000..7cf68a6a64
--- /dev/null
+++ b/deps/v8/src/base/tsan.h
@@ -0,0 +1,47 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_TSAN_H_
+#define V8_BASE_TSAN_H_
+
+namespace v8 {
+namespace base {
+
+// This file contains annotations for ThreadSanitizer (TSan), a race detector.
+// See
+// https://llvm.org/svn/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interface_ann.cc
+
+#if THREAD_SANITIZER
+
+#define TSAN_ANNOTATE_IGNORE_READS_BEGIN \
+ v8::base::AnnotateIgnoreReadsBegin(__FILE__, __LINE__)
+#define TSAN_ANNOTATE_IGNORE_READS_END \
+ v8::base::AnnotateIgnoreReadsEnd(__FILE__, __LINE__)
+#define TSAN_ANNOTATE_IGNORE_WRITES_BEGIN \
+ v8::base::AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
+#define TSAN_ANNOTATE_IGNORE_WRITES_END \
+ v8::base::AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
+
+extern "C" {
+
+void AnnotateIgnoreReadsBegin(const char* file, int line);
+void AnnotateIgnoreReadsEnd(const char* file, int line);
+void AnnotateIgnoreWritesBegin(const char* file, int line);
+void AnnotateIgnoreWritesEnd(const char* file, int line);
+
+} // extern "C"
+
+#else
+
+#define TSAN_ANNOTATE_IGNORE_READS_BEGIN ((void)0)
+#define TSAN_ANNOTATE_IGNORE_READS_END ((void)0)
+#define TSAN_ANNOTATE_IGNORE_WRITES_BEGIN ((void)0)
+#define TSAN_ANNOTATE_IGNORE_WRITES_END ((void)0)
+
+#endif
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_TSAN_H_
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index f4dd2f1754..fe7d63fa95 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -6,6 +6,7 @@
#include "src/accessors.h"
#include "src/api-natives.h"
+#include "src/api.h"
#include "src/base/ieee754.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
@@ -16,7 +17,6 @@
#include "src/extensions/ignition-statistics-extension.h"
#include "src/extensions/statistics-extension.h"
#include "src/extensions/trigger-failure-extension.h"
-#include "src/ffi/ffi-compiler.h"
#include "src/heap/heap.h"
#include "src/isolate-inl.h"
#include "src/snapshot/natives.h"
@@ -30,6 +30,41 @@
namespace v8 {
namespace internal {
+void SourceCodeCache::Initialize(Isolate* isolate, bool create_heap_objects) {
+ cache_ = create_heap_objects ? isolate->heap()->empty_fixed_array() : NULL;
+}
+
+bool SourceCodeCache::Lookup(Vector<const char> name,
+ Handle<SharedFunctionInfo>* handle) {
+ for (int i = 0; i < cache_->length(); i += 2) {
+ SeqOneByteString* str = SeqOneByteString::cast(cache_->get(i));
+ if (str->IsUtf8EqualTo(name)) {
+ *handle = Handle<SharedFunctionInfo>(
+ SharedFunctionInfo::cast(cache_->get(i + 1)));
+ return true;
+ }
+ }
+ return false;
+}
+
+void SourceCodeCache::Add(Vector<const char> name,
+ Handle<SharedFunctionInfo> shared) {
+ Isolate* isolate = shared->GetIsolate();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+ int length = cache_->length();
+ Handle<FixedArray> new_array = factory->NewFixedArray(length + 2, TENURED);
+ cache_->CopyTo(0, *new_array, 0, cache_->length());
+ cache_ = *new_array;
+ Handle<String> str =
+ factory->NewStringFromOneByte(Vector<const uint8_t>::cast(name), TENURED)
+ .ToHandleChecked();
+ DCHECK(!str.is_null());
+ cache_->set(length, *str);
+ cache_->set(length + 1, *shared);
+ Script::cast(shared->script())->set_type(type_);
+}
+
Bootstrapper::Bootstrapper(Isolate* isolate)
: isolate_(isolate),
nesting_(0),
@@ -308,26 +343,54 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
namespace {
-void InstallFunction(Handle<JSObject> target, Handle<Name> property_name,
- Handle<JSFunction> function, Handle<String> function_name,
- PropertyAttributes attributes = DONT_ENUM) {
+// Non-construct case.
+V8_NOINLINE Handle<SharedFunctionInfo> SimpleCreateSharedFunctionInfo(
+ Isolate* isolate, Builtins::Name call, Handle<String> name, int len) {
+ Handle<Code> code = isolate->builtins()->builtin_handle(call);
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfo(name, code, false);
+ shared->set_lazy_deserialization_builtin_id(call);
+ shared->set_internal_formal_parameter_count(len);
+ shared->set_length(len);
+ return shared;
+}
+
+// Construct case.
+V8_NOINLINE Handle<SharedFunctionInfo> SimpleCreateSharedFunctionInfo(
+ Isolate* isolate, Builtins::Name call, Handle<String> name,
+ Handle<String> instance_class_name, int len) {
+ Handle<Code> code = isolate->builtins()->builtin_handle(call);
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfo(name, code, false);
+ shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
+ shared->set_instance_class_name(*instance_class_name);
+ if (Builtins::IsLazy(call)) shared->set_lazy_deserialization_builtin_id(call);
+ shared->set_internal_formal_parameter_count(len);
+ shared->set_length(len);
+ return shared;
+}
+
+V8_NOINLINE void InstallFunction(Handle<JSObject> target,
+ Handle<Name> property_name,
+ Handle<JSFunction> function,
+ Handle<String> function_name,
+ PropertyAttributes attributes = DONT_ENUM) {
JSObject::AddProperty(target, property_name, function, attributes);
if (target->IsJSGlobalObject()) {
function->shared()->set_instance_class_name(*function_name);
}
}
-void InstallFunction(Handle<JSObject> target, Handle<JSFunction> function,
- Handle<Name> name,
- PropertyAttributes attributes = DONT_ENUM) {
+V8_NOINLINE void InstallFunction(Handle<JSObject> target,
+ Handle<JSFunction> function, Handle<Name> name,
+ PropertyAttributes attributes = DONT_ENUM) {
Handle<String> name_string = Name::ToFunctionName(name).ToHandleChecked();
InstallFunction(target, name, function, name_string, attributes);
}
-Handle<JSFunction> CreateFunction(Isolate* isolate, Handle<String> name,
- InstanceType type, int instance_size,
- MaybeHandle<Object> maybe_prototype,
- Builtins::Name call) {
+V8_NOINLINE Handle<JSFunction> CreateFunction(
+ Isolate* isolate, Handle<String> name, InstanceType type, int instance_size,
+ MaybeHandle<Object> maybe_prototype, Builtins::Name call) {
Factory* factory = isolate->factory();
Handle<Code> call_code(isolate->builtins()->builtin(call));
Handle<Object> prototype;
@@ -336,15 +399,17 @@ Handle<JSFunction> CreateFunction(Isolate* isolate, Handle<String> name,
? factory->NewFunction(name, call_code, prototype, type,
instance_size, STRICT, IMMUTABLE)
: factory->NewFunctionWithoutPrototype(name, call_code, STRICT);
+ if (Builtins::IsLazy(call)) {
+ result->shared()->set_lazy_deserialization_builtin_id(call);
+ }
result->shared()->set_native(true);
return result;
}
-Handle<JSFunction> InstallFunction(Handle<JSObject> target, Handle<Name> name,
- InstanceType type, int instance_size,
- MaybeHandle<Object> maybe_prototype,
- Builtins::Name call,
- PropertyAttributes attributes) {
+V8_NOINLINE Handle<JSFunction> InstallFunction(
+ Handle<JSObject> target, Handle<Name> name, InstanceType type,
+ int instance_size, MaybeHandle<Object> maybe_prototype, Builtins::Name call,
+ PropertyAttributes attributes) {
Handle<String> name_string = Name::ToFunctionName(name).ToHandleChecked();
Handle<JSFunction> function =
CreateFunction(target->GetIsolate(), name_string, type, instance_size,
@@ -353,19 +418,20 @@ Handle<JSFunction> InstallFunction(Handle<JSObject> target, Handle<Name> name,
return function;
}
-Handle<JSFunction> InstallFunction(Handle<JSObject> target, const char* name,
- InstanceType type, int instance_size,
- MaybeHandle<Object> maybe_prototype,
- Builtins::Name call) {
+V8_NOINLINE Handle<JSFunction> InstallFunction(
+ Handle<JSObject> target, const char* name, InstanceType type,
+ int instance_size, MaybeHandle<Object> maybe_prototype,
+ Builtins::Name call) {
Factory* const factory = target->GetIsolate()->factory();
PropertyAttributes attributes = DONT_ENUM;
return InstallFunction(target, factory->InternalizeUtf8String(name), type,
instance_size, maybe_prototype, call, attributes);
}
-Handle<JSFunction> SimpleCreateFunction(Isolate* isolate, Handle<String> name,
- Builtins::Name call, int len,
- bool adapt) {
+V8_NOINLINE Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
+ Handle<String> name,
+ Builtins::Name call,
+ int len, bool adapt) {
Handle<JSFunction> fun =
CreateFunction(isolate, name, JS_OBJECT_TYPE, JSObject::kHeaderSize,
MaybeHandle<JSObject>(), call);
@@ -378,7 +444,7 @@ Handle<JSFunction> SimpleCreateFunction(Isolate* isolate, Handle<String> name,
return fun;
}
-Handle<JSFunction> SimpleInstallFunction(
+V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
Handle<JSObject> base, Handle<Name> property_name,
Handle<String> function_name, Builtins::Name call, int len, bool adapt,
PropertyAttributes attrs = DONT_ENUM,
@@ -392,14 +458,14 @@ Handle<JSFunction> SimpleInstallFunction(
return fun;
}
-Handle<JSFunction> SimpleInstallFunction(
+V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
Handle<JSObject> base, Handle<String> name, Builtins::Name call, int len,
bool adapt, PropertyAttributes attrs = DONT_ENUM,
BuiltinFunctionId id = kInvalidBuiltinFunctionId) {
return SimpleInstallFunction(base, name, name, call, len, adapt, attrs, id);
}
-Handle<JSFunction> SimpleInstallFunction(
+V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
Handle<JSObject> base, Handle<Name> property_name,
const char* function_name, Builtins::Name call, int len, bool adapt,
PropertyAttributes attrs = DONT_ENUM,
@@ -411,7 +477,7 @@ Handle<JSFunction> SimpleInstallFunction(
call, len, adapt, attrs, id);
}
-Handle<JSFunction> SimpleInstallFunction(
+V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
Handle<JSObject> base, const char* name, Builtins::Name call, int len,
bool adapt, PropertyAttributes attrs = DONT_ENUM,
BuiltinFunctionId id = kInvalidBuiltinFunctionId) {
@@ -422,17 +488,19 @@ Handle<JSFunction> SimpleInstallFunction(
len, adapt, attrs, id);
}
-Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
- const char* name, Builtins::Name call,
- int len, bool adapt,
- BuiltinFunctionId id) {
+V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
+ const char* name,
+ Builtins::Name call,
+ int len, bool adapt,
+ BuiltinFunctionId id) {
return SimpleInstallFunction(base, name, call, len, adapt, DONT_ENUM, id);
}
-void SimpleInstallGetterSetter(Handle<JSObject> base, Handle<String> name,
- Builtins::Name call_getter,
- Builtins::Name call_setter,
- PropertyAttributes attribs) {
+V8_NOINLINE void SimpleInstallGetterSetter(Handle<JSObject> base,
+ Handle<String> name,
+ Builtins::Name call_getter,
+ Builtins::Name call_setter,
+ PropertyAttributes attribs) {
Isolate* const isolate = base->GetIsolate();
Handle<String> getter_name =
@@ -450,10 +518,11 @@ void SimpleInstallGetterSetter(Handle<JSObject> base, Handle<String> name,
JSObject::DefineAccessor(base, name, getter, setter, attribs).Check();
}
-Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
- Handle<String> name,
- Handle<Name> property_name,
- Builtins::Name call, bool adapt) {
+V8_NOINLINE Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
+ Handle<Name> name,
+ Handle<Name> property_name,
+ Builtins::Name call,
+ bool adapt) {
Isolate* const isolate = base->GetIsolate();
Handle<String> getter_name =
@@ -470,28 +539,31 @@ Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
return getter;
}
-Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
- Handle<String> name, Builtins::Name call,
- bool adapt) {
+V8_NOINLINE Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
+ Handle<Name> name,
+ Builtins::Name call,
+ bool adapt) {
return SimpleInstallGetter(base, name, name, call, adapt);
}
-Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
- Handle<String> name, Builtins::Name call,
- bool adapt, BuiltinFunctionId id) {
+V8_NOINLINE Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
+ Handle<Name> name,
+ Builtins::Name call,
+ bool adapt,
+ BuiltinFunctionId id) {
Handle<JSFunction> fun = SimpleInstallGetter(base, name, call, adapt);
fun->shared()->set_builtin_function_id(id);
return fun;
}
-void InstallConstant(Isolate* isolate, Handle<JSObject> holder,
- const char* name, Handle<Object> value) {
+V8_NOINLINE void InstallConstant(Isolate* isolate, Handle<JSObject> holder,
+ const char* name, Handle<Object> value) {
JSObject::AddProperty(
holder, isolate->factory()->NewStringFromAsciiChecked(name), value,
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
}
-void InstallSpeciesGetter(Handle<JSFunction> constructor) {
+V8_NOINLINE void InstallSpeciesGetter(Handle<JSFunction> constructor) {
Factory* factory = constructor->GetIsolate()->factory();
// TODO(adamk): We should be able to share a SharedFunctionInfo
// between all these JSFunctins.
@@ -659,9 +731,13 @@ void Genesis::CreateObjectFunction(Handle<JSFunction> empty_function) {
int unused = JSObject::kInitialGlobalObjectUnusedPropertiesCount;
int instance_size = JSObject::kHeaderSize + kPointerSize * unused;
- Handle<JSFunction> object_fun =
- CreateFunction(isolate_, factory->Object_string(), JS_OBJECT_TYPE,
- instance_size, factory->null_value(), Builtins::kIllegal);
+ Handle<JSFunction> object_fun = CreateFunction(
+ isolate_, factory->Object_string(), JS_OBJECT_TYPE, instance_size,
+ factory->null_value(), Builtins::kObjectConstructor);
+ object_fun->shared()->set_length(1);
+ object_fun->shared()->DontAdaptArguments();
+ object_fun->shared()->SetConstructStub(
+ *BUILTIN_CODE(isolate_, ObjectConstructor_ConstructStub));
native_context()->set_object_function(*object_fun);
{
@@ -871,6 +947,8 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
// %AsyncGeneratorPrototype%
JSObject::ForceSetPrototype(async_generator_object_prototype,
async_iterator_prototype);
+ native_context()->set_initial_async_generator_prototype(
+ *async_generator_object_prototype);
JSObject::AddProperty(async_generator_object_prototype,
factory()->to_string_tag_symbol(),
@@ -956,8 +1034,8 @@ void Genesis::CreateJSProxyMaps() {
// Allocate maps for all Proxy types.
// Next to the default proxy, we need maps indicating callable and
// constructable proxies.
- Handle<Map> proxy_map =
- factory()->NewMap(JS_PROXY_TYPE, JSProxy::kSize, PACKED_ELEMENTS);
+ Handle<Map> proxy_map = factory()->NewMap(JS_PROXY_TYPE, JSProxy::kSize,
+ TERMINAL_FAST_ELEMENTS_KIND);
proxy_map->set_dictionary_map(true);
proxy_map->set_may_have_interesting_symbols(true);
native_context()->set_proxy_map(*proxy_map);
@@ -1204,7 +1282,7 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
factory->the_hole_value(), Builtins::kErrorConstructor, DONT_ENUM);
error_fun->shared()->set_instance_class_name(*factory->Error_string());
error_fun->shared()->DontAdaptArguments();
- error_fun->shared()->set_construct_stub(
+ error_fun->shared()->SetConstructStub(
*BUILTIN_CODE(isolate, ErrorConstructor));
error_fun->shared()->set_length(1);
@@ -1334,9 +1412,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kObjectDefineProperty, 3, true);
native_context()->set_object_define_property(*object_define_property);
- Handle<JSFunction> object_freeze = SimpleInstallFunction(
- object_function, "freeze", Builtins::kObjectFreeze, 1, false);
- native_context()->set_object_freeze(*object_freeze);
+ SimpleInstallFunction(object_function, "freeze", Builtins::kObjectFreeze, 1,
+ false);
Handle<JSFunction> object_get_prototype_of = SimpleInstallFunction(
object_function, "getPrototypeOf", Builtins::kObjectGetPrototypeOf,
@@ -1456,11 +1533,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // --- A s y n c F r o m S y n c I t e r a t o r
- Handle<Code> code(BUILTIN_CODE(isolate, AsyncIteratorValueUnwrap));
- Handle<SharedFunctionInfo> info =
- factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
- info->set_internal_formal_parameter_count(1);
- info->set_length(1);
+ Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ isolate, Builtins::kAsyncIteratorValueUnwrap, factory->empty_string(),
+ 1);
native_context()->set_async_iterator_value_unwrap_shared_fun(*info);
}
@@ -1475,42 +1550,35 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kAsyncGeneratorAwaitUncaught, 1, false);
native_context()->set_async_generator_await_uncaught(*await_uncaught);
- Handle<Code> code(BUILTIN_CODE(isolate, AsyncGeneratorAwaitResolveClosure));
- Handle<SharedFunctionInfo> info =
- factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
- info->set_internal_formal_parameter_count(1);
- info->set_length(1);
+ Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ isolate, Builtins::kAsyncGeneratorAwaitResolveClosure,
+ factory->empty_string(), 1);
native_context()->set_async_generator_await_resolve_shared_fun(*info);
- code = BUILTIN_CODE(isolate, AsyncGeneratorAwaitRejectClosure);
- info = factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
- info->set_internal_formal_parameter_count(1);
- info->set_length(1);
+ info = SimpleCreateSharedFunctionInfo(
+ isolate, Builtins::kAsyncGeneratorAwaitRejectClosure,
+ factory->empty_string(), 1);
native_context()->set_async_generator_await_reject_shared_fun(*info);
- code = BUILTIN_CODE(isolate, AsyncGeneratorYieldResolveClosure);
- info = factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
- info->set_internal_formal_parameter_count(1);
- info->set_length(1);
+ info = SimpleCreateSharedFunctionInfo(
+ isolate, Builtins::kAsyncGeneratorYieldResolveClosure,
+ factory->empty_string(), 1);
native_context()->set_async_generator_yield_resolve_shared_fun(*info);
- code = BUILTIN_CODE(isolate, AsyncGeneratorReturnResolveClosure);
- info = factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
- info->set_internal_formal_parameter_count(1);
- info->set_length(1);
+ info = SimpleCreateSharedFunctionInfo(
+ isolate, Builtins::kAsyncGeneratorReturnResolveClosure,
+ factory->empty_string(), 1);
native_context()->set_async_generator_return_resolve_shared_fun(*info);
- code = BUILTIN_CODE(isolate, AsyncGeneratorReturnClosedResolveClosure);
- info = factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
- info->set_internal_formal_parameter_count(1);
- info->set_length(1);
+ info = SimpleCreateSharedFunctionInfo(
+ isolate, Builtins::kAsyncGeneratorReturnClosedResolveClosure,
+ factory->empty_string(), 1);
native_context()->set_async_generator_return_closed_resolve_shared_fun(
*info);
- code = BUILTIN_CODE(isolate, AsyncGeneratorReturnClosedRejectClosure);
- info = factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
- info->set_internal_formal_parameter_count(1);
- info->set_length(1);
+ info = SimpleCreateSharedFunctionInfo(
+ isolate, Builtins::kAsyncGeneratorReturnClosedRejectClosure,
+ factory->empty_string(), 1);
native_context()->set_async_generator_return_closed_reject_shared_fun(
*info);
}
@@ -1556,9 +1624,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
array_function->shared()->SetConstructStub(*code);
// Set up %ArrayPrototype%.
+ // The %ArrayPrototype% has TERMINAL_FAST_ELEMENTS_KIND in order to ensure
+ // that constant functions stay constant after turning prototype to setup
+ // mode and back when constant field tracking is enabled.
Handle<JSArray> proto =
- Handle<JSArray>::cast(factory->NewJSObject(array_function, TENURED));
- JSArray::Initialize(proto, 0);
+ factory->NewJSArray(0, TERMINAL_FAST_ELEMENTS_KIND, TENURED);
JSFunction::SetPrototype(array_function, proto);
native_context()->set_initial_array_prototype(*proto);
@@ -1871,6 +1941,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
DONT_ENUM);
// Install the String.prototype methods.
+ SimpleInstallFunction(prototype, "anchor", Builtins::kStringPrototypeAnchor,
+ 1, true);
+ SimpleInstallFunction(prototype, "big", Builtins::kStringPrototypeBig, 0,
+ true);
+ SimpleInstallFunction(prototype, "blink", Builtins::kStringPrototypeBlink,
+ 0, true);
+ SimpleInstallFunction(prototype, "bold", Builtins::kStringPrototypeBold, 0,
+ true);
SimpleInstallFunction(prototype, "charAt", Builtins::kStringPrototypeCharAt,
1, true);
SimpleInstallFunction(prototype, "charCodeAt",
@@ -1881,12 +1959,22 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
1, false);
SimpleInstallFunction(prototype, "endsWith",
Builtins::kStringPrototypeEndsWith, 1, false);
+ SimpleInstallFunction(prototype, "fontcolor",
+ Builtins::kStringPrototypeFontcolor, 1, true);
+ SimpleInstallFunction(prototype, "fontsize",
+ Builtins::kStringPrototypeFontsize, 1, true);
+ SimpleInstallFunction(prototype, "fixed", Builtins::kStringPrototypeFixed,
+ 0, true);
SimpleInstallFunction(prototype, "includes",
Builtins::kStringPrototypeIncludes, 1, false);
SimpleInstallFunction(prototype, "indexOf",
Builtins::kStringPrototypeIndexOf, 1, false);
+ SimpleInstallFunction(prototype, "italics",
+ Builtins::kStringPrototypeItalics, 0, true);
SimpleInstallFunction(prototype, "lastIndexOf",
Builtins::kStringPrototypeLastIndexOf, 1, false);
+ SimpleInstallFunction(prototype, "link", Builtins::kStringPrototypeLink, 1,
+ true);
SimpleInstallFunction(prototype, "localeCompare",
Builtins::kStringPrototypeLocaleCompare, 1, true);
#ifdef V8_INTL_SUPPORT
@@ -1896,16 +1984,26 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(prototype, "normalize",
Builtins::kStringPrototypeNormalize, 0, false);
#endif // V8_INTL_SUPPORT
+ SimpleInstallFunction(prototype, "repeat", Builtins::kStringPrototypeRepeat,
+ 1, true);
SimpleInstallFunction(prototype, "replace",
Builtins::kStringPrototypeReplace, 2, true);
SimpleInstallFunction(prototype, "slice", Builtins::kStringPrototypeSlice,
2, false);
+ SimpleInstallFunction(prototype, "small", Builtins::kStringPrototypeSmall,
+ 0, true);
SimpleInstallFunction(prototype, "split", Builtins::kStringPrototypeSplit,
2, false);
+ SimpleInstallFunction(prototype, "strike", Builtins::kStringPrototypeStrike,
+ 0, true);
+ SimpleInstallFunction(prototype, "sub", Builtins::kStringPrototypeSub, 0,
+ true);
SimpleInstallFunction(prototype, "substr", Builtins::kStringPrototypeSubstr,
2, false);
SimpleInstallFunction(prototype, "substring",
Builtins::kStringPrototypeSubstring, 2, false);
+ SimpleInstallFunction(prototype, "sup", Builtins::kStringPrototypeSup, 0,
+ true);
SimpleInstallFunction(prototype, "startsWith",
Builtins::kStringPrototypeStartsWith, 1, false);
SimpleInstallFunction(prototype, "toString",
@@ -2163,13 +2261,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{
- Handle<Code> code(BUILTIN_CODE(isolate, PromiseGetCapabilitiesExecutor));
- Handle<SharedFunctionInfo> info =
- factory->NewSharedFunctionInfo(factory->empty_string(), code, true);
- info->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
- info->set_instance_class_name(isolate->heap()->Object_string());
- info->set_internal_formal_parameter_count(2);
- info->set_length(2);
+ Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ isolate, Builtins::kPromiseGetCapabilitiesExecutor,
+ factory->empty_string(), factory->Object_string(), 2);
native_context()->set_promise_get_capabilities_executor_shared_fun(*info);
// %new_promise_capability(C, debugEvent)
@@ -2198,8 +2292,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(promise_fun, "race", Builtins::kPromiseRace, 1, true);
- SimpleInstallFunction(promise_fun, "resolve", Builtins::kPromiseResolve, 1,
- true);
+ SimpleInstallFunction(promise_fun, "resolve",
+ Builtins::kPromiseResolveWrapper, 1, true);
SimpleInstallFunction(promise_fun, "reject", Builtins::kPromiseReject, 1,
true);
@@ -2284,27 +2378,20 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{
- Handle<Code> code(BUILTIN_CODE(isolate, PromiseResolveClosure));
- Handle<SharedFunctionInfo> info =
- factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
- info->set_internal_formal_parameter_count(1);
- info->set_length(1);
+ Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ isolate, Builtins::kPromiseResolveClosure, factory->empty_string(),
+ 1);
native_context()->set_promise_resolve_shared_fun(*info);
- code = BUILTIN_CODE(isolate, PromiseRejectClosure);
- info =
- factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
- info->set_internal_formal_parameter_count(1);
- info->set_length(1);
+ info = SimpleCreateSharedFunctionInfo(
+ isolate, Builtins::kPromiseRejectClosure, factory->empty_string(), 1);
native_context()->set_promise_reject_shared_fun(*info);
}
{
- Handle<Code> code(BUILTIN_CODE(isolate, PromiseAllResolveElementClosure));
- Handle<SharedFunctionInfo> info =
- factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
- info->set_internal_formal_parameter_count(1);
- info->set_length(1);
+ Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ isolate, Builtins::kPromiseAllResolveElementClosure,
+ factory->empty_string(), 1);
native_context()->set_promise_all_resolve_element_shared_fun(*info);
}
@@ -2853,8 +2940,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSObject::cast(typed_array_fun->instance_prototype()));
native_context()->set_typed_array_prototype(*prototype);
- // Install the "buffer", "byteOffset", "byteLength" and "length"
- // getters on the {prototype}.
+ // Install the "buffer", "byteOffset", "byteLength", "length"
+ // and @@toStringTag getters on the {prototype}.
SimpleInstallGetter(prototype, factory->buffer_string(),
Builtins::kTypedArrayPrototypeBuffer, false);
SimpleInstallGetter(prototype, factory->byte_length_string(),
@@ -2866,6 +2953,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallGetter(prototype, factory->length_string(),
Builtins::kTypedArrayPrototypeLength, true,
kTypedArrayLength);
+ SimpleInstallGetter(prototype, factory->to_string_tag_symbol(),
+ Builtins::kTypedArrayPrototypeToStringTag, true,
+ kTypedArrayToStringTag);
// Install "keys", "values" and "entries" methods on the {prototype}.
SimpleInstallFunction(prototype, "entries",
@@ -2904,6 +2994,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kTypedArrayPrototypeReduce, 1, false);
SimpleInstallFunction(prototype, "reduceRight",
Builtins::kTypedArrayPrototypeReduceRight, 1, false);
+ SimpleInstallFunction(prototype, "set", Builtins::kTypedArrayPrototypeSet,
+ 1, false);
SimpleInstallFunction(prototype, "slice",
Builtins::kTypedArrayPrototypeSlice, 2, false);
SimpleInstallFunction(prototype, "some", Builtins::kTypedArrayPrototypeSome,
@@ -3046,23 +3138,24 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
prototype, factory->to_string_tag_symbol(), factory->Map_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- Handle<JSFunction> map_get =
- SimpleInstallFunction(prototype, "get", Builtins::kMapGet, 1, true);
+ Handle<JSFunction> map_get = SimpleInstallFunction(
+ prototype, "get", Builtins::kMapPrototypeGet, 1, true);
native_context()->set_map_get(*map_get);
- Handle<JSFunction> map_set =
- SimpleInstallFunction(prototype, "set", Builtins::kMapSet, 2, true);
+ Handle<JSFunction> map_set = SimpleInstallFunction(
+ prototype, "set", Builtins::kMapPrototypeSet, 2, true);
native_context()->set_map_set(*map_set);
- Handle<JSFunction> map_has =
- SimpleInstallFunction(prototype, "has", Builtins::kMapHas, 1, true);
+ Handle<JSFunction> map_has = SimpleInstallFunction(
+ prototype, "has", Builtins::kMapPrototypeHas, 1, true);
native_context()->set_map_has(*map_has);
Handle<JSFunction> map_delete = SimpleInstallFunction(
- prototype, "delete", Builtins::kMapDelete, 1, true);
+ prototype, "delete", Builtins::kMapPrototypeDelete, 1, true);
native_context()->set_map_delete(*map_delete);
- SimpleInstallFunction(prototype, "clear", Builtins::kMapClear, 0, true);
+ SimpleInstallFunction(prototype, "clear", Builtins::kMapPrototypeClear, 0,
+ true);
Handle<JSFunction> entries = SimpleInstallFunction(
prototype, "entries", Builtins::kMapPrototypeEntries, 0, true);
JSObject::AddProperty(prototype, factory->iterator_symbol(), entries,
@@ -3101,19 +3194,20 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
prototype, factory->to_string_tag_symbol(), factory->Set_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- Handle<JSFunction> set_has =
- SimpleInstallFunction(prototype, "has", Builtins::kSetHas, 1, true);
+ Handle<JSFunction> set_has = SimpleInstallFunction(
+ prototype, "has", Builtins::kSetPrototypeHas, 1, true);
native_context()->set_set_has(*set_has);
- Handle<JSFunction> set_add =
- SimpleInstallFunction(prototype, "add", Builtins::kSetAdd, 1, true);
+ Handle<JSFunction> set_add = SimpleInstallFunction(
+ prototype, "add", Builtins::kSetPrototypeAdd, 1, true);
native_context()->set_set_add(*set_add);
Handle<JSFunction> set_delete = SimpleInstallFunction(
- prototype, "delete", Builtins::kSetDelete, 1, true);
+ prototype, "delete", Builtins::kSetPrototypeDelete, 1, true);
native_context()->set_set_delete(*set_delete);
- SimpleInstallFunction(prototype, "clear", Builtins::kSetClear, 0, true);
+ SimpleInstallFunction(prototype, "clear", Builtins::kSetPrototypeClear, 0,
+ true);
SimpleInstallFunction(prototype, "entries", Builtins::kSetPrototypeEntries,
0, true);
SimpleInstallFunction(prototype, "forEach", Builtins::kSetPrototypeForEach,
@@ -3213,7 +3307,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
CreateJSProxyMaps();
Handle<Map> proxy_function_map =
- Map::Copy(isolate->sloppy_function_without_prototype_map(), "Proxy");
+ Map::Copy(isolate->strict_function_without_prototype_map(), "Proxy");
proxy_function_map->set_is_constructor(true);
Handle<String> name = factory->Proxy_string();
@@ -3536,12 +3630,14 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
Handle<String> script_name =
isolate->factory()->NewStringFromUtf8(name).ToHandleChecked();
- Handle<SharedFunctionInfo> function_info =
+ MaybeHandle<SharedFunctionInfo> maybe_function_info =
Compiler::GetSharedFunctionInfoForScript(
- source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
- context, NULL, NULL, ScriptCompiler::kNoCompileOptions, natives_flag,
- Handle<FixedArray>());
- if (function_info.is_null()) return false;
+ source, script_name, 0, 0, ScriptOriginOptions(),
+ MaybeHandle<Object>(), context, NULL, NULL,
+ ScriptCompiler::kNoCompileOptions, natives_flag,
+ MaybeHandle<FixedArray>());
+ Handle<SharedFunctionInfo> function_info;
+ if (!maybe_function_info.ToHandle(&function_info)) return false;
DCHECK(context->IsNativeContext());
@@ -3600,11 +3696,13 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
if (!cache->Lookup(name, &function_info)) {
Handle<String> script_name =
factory->NewStringFromUtf8(name).ToHandleChecked();
- function_info = Compiler::GetSharedFunctionInfoForScript(
- source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
- context, extension, NULL, ScriptCompiler::kNoCompileOptions,
- EXTENSION_CODE, Handle<FixedArray>());
- if (function_info.is_null()) return false;
+ MaybeHandle<SharedFunctionInfo> maybe_function_info =
+ Compiler::GetSharedFunctionInfoForScript(
+ source, script_name, 0, 0, ScriptOriginOptions(),
+ MaybeHandle<Object>(), context, extension, NULL,
+ ScriptCompiler::kNoCompileOptions, EXTENSION_CODE,
+ MaybeHandle<FixedArray>());
+ if (!maybe_function_info.ToHandle(&function_info)) return false;
cache->Add(name, function_info);
}
@@ -4017,21 +4115,16 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
}
{
- Handle<Code> code(BUILTIN_CODE(isolate, AsyncFunctionAwaitRejectClosure));
- Handle<SharedFunctionInfo> info =
- factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
- info->set_internal_formal_parameter_count(1);
- info->set_length(1);
+ Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ isolate, Builtins::kAsyncFunctionAwaitRejectClosure,
+ factory->empty_string(), 1);
native_context->set_async_function_await_reject_shared_fun(*info);
}
{
- Handle<Code> code(
- BUILTIN_CODE(isolate, AsyncFunctionAwaitResolveClosure));
- Handle<SharedFunctionInfo> info =
- factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
- info->set_internal_formal_parameter_count(1);
- info->set_length(1);
+ Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ isolate, Builtins::kAsyncFunctionAwaitResolveClosure,
+ factory->empty_string(), 1);
native_context->set_async_function_await_resolve_shared_fun(*info);
}
@@ -4135,6 +4228,7 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_tostring)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_rest_spread)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_template_escapes)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrict_constructor_return)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_strict_legacy_accessor_builtins)
@@ -4217,40 +4311,31 @@ void Genesis::InitializeGlobal_harmony_promise_finally() {
native_context()->set_promise_prototype_map(*prototype_map);
{
- Handle<Code> code = BUILTIN_CODE(isolate(), PromiseThenFinally);
- Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
- factory()->empty_string(), code, false);
- info->set_internal_formal_parameter_count(1);
- info->set_length(1);
+ Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ isolate(), Builtins::kPromiseThenFinally, factory()->empty_string(), 1);
info->set_native(true);
native_context()->set_promise_then_finally_shared_fun(*info);
}
{
- Handle<Code> code = BUILTIN_CODE(isolate(), PromiseCatchFinally);
- Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
- factory()->empty_string(), code, false);
- info->set_internal_formal_parameter_count(1);
- info->set_length(1);
+ Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ isolate(), Builtins::kPromiseCatchFinally, factory()->empty_string(),
+ 1);
info->set_native(true);
native_context()->set_promise_catch_finally_shared_fun(*info);
}
{
- Handle<Code> code = BUILTIN_CODE(isolate(), PromiseValueThunkFinally);
- Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
- factory()->empty_string(), code, false);
- info->set_internal_formal_parameter_count(0);
- info->set_length(0);
+ Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ isolate(), Builtins::kPromiseValueThunkFinally,
+ factory()->empty_string(), 0);
native_context()->set_promise_value_thunk_finally_shared_fun(*info);
}
{
- Handle<Code> code = BUILTIN_CODE(isolate(), PromiseThrowerFinally);
- Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
- factory()->empty_string(), code, false);
- info->set_internal_formal_parameter_count(0);
- info->set_length(0);
+ Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ isolate(), Builtins::kPromiseThrowerFinally, factory()->empty_string(),
+ 0);
native_context()->set_promise_thrower_finally_shared_fun(*info);
}
}
@@ -4271,6 +4356,50 @@ void Genesis::InitializeGlobal_harmony_regexp_dotall() {
native_context()->set_regexp_prototype_map(*prototype_map);
}
+void Genesis::InitializeGlobal_harmony_bigint() {
+ if (!FLAG_harmony_bigint) return;
+
+ Handle<JSGlobalObject> global(native_context()->global_object());
+ Handle<JSFunction> bigint_fun = InstallFunction(
+ global, "BigInt", JS_VALUE_TYPE, JSValue::kSize,
+ isolate()->factory()->the_hole_value(), Builtins::kBigIntConstructor);
+ bigint_fun->shared()->DontAdaptArguments();
+ bigint_fun->shared()->SetConstructStub(
+ *BUILTIN_CODE(isolate(), BigIntConstructor_ConstructStub));
+ bigint_fun->shared()->set_length(1);
+ InstallWithIntrinsicDefaultProto(isolate(), bigint_fun,
+ Context::BIGINT_FUNCTION_INDEX);
+ heap()->bigint_map()->SetConstructorFunctionIndex(
+ Context::BIGINT_FUNCTION_INDEX);
+
+ // Install the properties of the BigInt constructor.
+ // parseInt(string, radix)
+ SimpleInstallFunction(bigint_fun, "parseInt", Builtins::kBigIntParseInt, 2,
+ false);
+ // asUintN(bits, bigint)
+ SimpleInstallFunction(bigint_fun, "asUintN", Builtins::kBigIntAsUintN, 2,
+ false);
+ // asIntN(bits, bigint)
+ SimpleInstallFunction(bigint_fun, "asIntN", Builtins::kBigIntAsIntN, 2,
+ false);
+
+ // Set up the %BigIntPrototype%.
+ Handle<JSObject> prototype(JSObject::cast(bigint_fun->instance_prototype()));
+ JSFunction::SetPrototype(bigint_fun, prototype);
+
+ // Install the properties of the BigInt.prototype.
+ // "constructor" is created implicitly by InstallFunction() above.
+ // toLocaleString([reserved1 [, reserved2]])
+ SimpleInstallFunction(prototype, "toLocaleString",
+ Builtins::kBigIntPrototypeToLocaleString, 0, false);
+ // toString([radix])
+ SimpleInstallFunction(prototype, "toString",
+ Builtins::kBigIntPrototypeToString, 0, false);
+ // valueOf()
+ SimpleInstallFunction(prototype, "valueOf", Builtins::kBigIntPrototypeValueOf,
+ 0, false);
+}
+
#ifdef V8_INTL_SUPPORT
void Genesis::InitializeGlobal_harmony_number_format_to_parts() {
@@ -4329,7 +4458,8 @@ Handle<JSFunction> Genesis::CreateArrayBuffer(Handle<String> name,
array_buffer_fun, DONT_ENUM);
SimpleInstallFunction(array_buffer_fun, factory()->isView_string(),
- Builtins::kArrayBufferIsView, 1, true);
+ Builtins::kArrayBufferIsView, 1, true, DONT_ENUM,
+ kArrayBufferIsView);
// Install the "byteLength" getter on the {prototype}.
SimpleInstallGetter(prototype, factory()->byte_length_string(),
@@ -4828,12 +4958,16 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit), isolate);
JSObject::AddProperty(Error, name, stack_trace_limit, NONE);
- if (FLAG_expose_wasm || FLAG_validate_asm) {
- WasmJs::Install(isolate);
+ if (FLAG_expose_wasm) {
+ // Install the internal data structures into the isolate and expose on
+ // the global object.
+ WasmJs::Install(isolate, true);
+ } else if (FLAG_validate_asm) {
+ // Install the internal data structures only; these are needed for asm.js
+ // translated to WASM to work correctly.
+ WasmJs::Install(isolate, false);
}
- InstallFFIMap(isolate);
-
return true;
}
@@ -5350,7 +5484,7 @@ Genesis::Genesis(Isolate* isolate,
DCHECK_EQ(global_proxy_data->embedder_field_count(),
global_proxy_template->InternalFieldCount());
Handle<Map> global_proxy_map = isolate->factory()->NewMap(
- JS_GLOBAL_PROXY_TYPE, proxy_size, HOLEY_SMI_ELEMENTS);
+ JS_GLOBAL_PROXY_TYPE, proxy_size, TERMINAL_FAST_ELEMENTS_KIND);
global_proxy_map->set_is_access_check_needed(true);
global_proxy_map->set_has_hidden_prototype(true);
global_proxy_map->set_may_have_interesting_symbols(true);
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 05eb74f091..d49180190e 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -22,44 +22,16 @@ class SourceCodeCache final BASE_EMBEDDED {
public:
explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
- void Initialize(Isolate* isolate, bool create_heap_objects) {
- cache_ = create_heap_objects ? isolate->heap()->empty_fixed_array() : NULL;
- }
+ void Initialize(Isolate* isolate, bool create_heap_objects);
void Iterate(RootVisitor* v) {
v->VisitRootPointer(Root::kExtensions,
bit_cast<Object**, FixedArray**>(&cache_));
}
- bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
- for (int i = 0; i < cache_->length(); i+=2) {
- SeqOneByteString* str = SeqOneByteString::cast(cache_->get(i));
- if (str->IsUtf8EqualTo(name)) {
- *handle = Handle<SharedFunctionInfo>(
- SharedFunctionInfo::cast(cache_->get(i + 1)));
- return true;
- }
- }
- return false;
- }
+ bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle);
- void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) {
- Isolate* isolate = shared->GetIsolate();
- Factory* factory = isolate->factory();
- HandleScope scope(isolate);
- int length = cache_->length();
- Handle<FixedArray> new_array = factory->NewFixedArray(length + 2, TENURED);
- cache_->CopyTo(0, *new_array, 0, cache_->length());
- cache_ = *new_array;
- Handle<String> str =
- factory
- ->NewStringFromOneByte(Vector<const uint8_t>::cast(name), TENURED)
- .ToHandleChecked();
- DCHECK(!str.is_null());
- cache_->set(length, *str);
- cache_->set(length + 1, *shared);
- Script::cast(shared->script())->set_type(type_);
- }
+ void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared);
private:
Script::Type type_;
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 0af87f2c8a..bf359d69e9 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -156,276 +156,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-// static
-void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- cp : context
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // 1. Load the first argument into r0.
- Label no_arguments;
- {
- __ mov(r2, r0); // Store argc in r2.
- __ sub(r0, r0, Operand(1), SetCC);
- __ b(lo, &no_arguments);
- __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- }
-
- // 2a. Convert the first argument to a number.
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r2);
- __ EnterBuiltinFrame(cp, r1, r2);
- __ Call(BUILTIN_CODE(masm->isolate(), ToNumber), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(cp, r1, r2);
- __ SmiUntag(r2);
- }
-
- {
- // Drop all arguments including the receiver.
- __ Drop(r2);
- __ Ret(1);
- }
-
- // 2b. No arguments, return +0.
- __ bind(&no_arguments);
- __ Move(r0, Smi::kZero);
- __ Ret(1);
-}
-
-// static
-void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- r3 : new target
- // -- cp : context
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // 2. Load the first argument into r2.
- {
- Label no_arguments, done;
- __ mov(r6, r0); // Store argc in r6.
- __ sub(r0, r0, Operand(1), SetCC);
- __ b(lo, &no_arguments);
- __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ b(&done);
- __ bind(&no_arguments);
- __ Move(r2, Smi::kZero);
- __ bind(&done);
- }
-
- // 3. Make sure r2 is a number.
- {
- Label done_convert;
- __ JumpIfSmi(r2, &done_convert);
- __ CompareObjectType(r2, r4, r4, HEAP_NUMBER_TYPE);
- __ b(eq, &done_convert);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r6);
- __ EnterBuiltinFrame(cp, r1, r6);
- __ Push(r3);
- __ Move(r0, r2);
- __ Call(BUILTIN_CODE(masm->isolate(), ToNumber), RelocInfo::CODE_TARGET);
- __ Move(r2, r0);
- __ Pop(r3);
- __ LeaveBuiltinFrame(cp, r1, r6);
- __ SmiUntag(r6);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, new_object;
- __ cmp(r1, r3);
- __ b(ne, &new_object);
-
- // 5. Allocate a JSValue wrapper for the number.
- __ AllocateJSValue(r0, r1, r2, r4, r5, &new_object);
- __ b(&drop_frame_and_ret);
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r6);
- __ EnterBuiltinFrame(cp, r1, r6);
- __ Push(r2); // first argument
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ Pop(r2);
- __ LeaveBuiltinFrame(cp, r1, r6);
- __ SmiUntag(r6);
- }
- __ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
-
- __ bind(&drop_frame_and_ret);
- {
- __ Drop(r6);
- __ Ret(1);
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- cp : context
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // 1. Load the first argument into r0.
- Label no_arguments;
- {
- __ mov(r2, r0); // Store argc in r2.
- __ sub(r0, r0, Operand(1), SetCC);
- __ b(lo, &no_arguments);
- __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- }
-
- // 2a. At least one argument, return r0 if it's a string, otherwise
- // dispatch to appropriate conversion.
- Label drop_frame_and_ret, to_string, symbol_descriptive_string;
- {
- __ JumpIfSmi(r0, &to_string);
- STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
- __ CompareObjectType(r0, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(hi, &to_string);
- __ b(eq, &symbol_descriptive_string);
- __ b(&drop_frame_and_ret);
- }
-
- // 2b. No arguments, return the empty string (and pop the receiver).
- __ bind(&no_arguments);
- {
- __ LoadRoot(r0, Heap::kempty_stringRootIndex);
- __ Ret(1);
- }
-
- // 3a. Convert r0 to a string.
- __ bind(&to_string);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r2);
- __ EnterBuiltinFrame(cp, r1, r2);
- __ Call(BUILTIN_CODE(masm->isolate(), ToString), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(cp, r1, r2);
- __ SmiUntag(r2);
- }
- __ b(&drop_frame_and_ret);
-
- // 3b. Convert symbol in r0 to a string.
- __ bind(&symbol_descriptive_string);
- {
- __ Drop(r2);
- __ Drop(1);
- __ Push(r0);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- __ Drop(r2);
- __ Ret(1);
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- r3 : new target
- // -- cp : context
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // 2. Load the first argument into r2.
- {
- Label no_arguments, done;
- __ mov(r6, r0); // Store argc in r6.
- __ sub(r0, r0, Operand(1), SetCC);
- __ b(lo, &no_arguments);
- __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ b(&done);
- __ bind(&no_arguments);
- __ LoadRoot(r2, Heap::kempty_stringRootIndex);
- __ bind(&done);
- }
-
- // 3. Make sure r2 is a string.
- {
- Label convert, done_convert;
- __ JumpIfSmi(r2, &convert);
- __ CompareObjectType(r2, r4, r4, FIRST_NONSTRING_TYPE);
- __ b(lo, &done_convert);
- __ bind(&convert);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r6);
- __ EnterBuiltinFrame(cp, r1, r6);
- __ Push(r3);
- __ Move(r0, r2);
- __ Call(BUILTIN_CODE(masm->isolate(), ToString), RelocInfo::CODE_TARGET);
- __ Move(r2, r0);
- __ Pop(r3);
- __ LeaveBuiltinFrame(cp, r1, r6);
- __ SmiUntag(r6);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, new_object;
- __ cmp(r1, r3);
- __ b(ne, &new_object);
-
- // 5. Allocate a JSValue wrapper for the string.
- __ AllocateJSValue(r0, r1, r2, r4, r5, &new_object);
- __ b(&drop_frame_and_ret);
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r6);
- __ EnterBuiltinFrame(cp, r1, r6);
- __ Push(r2); // first argument
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ Pop(r2);
- __ LeaveBuiltinFrame(cp, r1, r6);
- __ SmiUntag(r6);
- }
- __ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
-
- __ bind(&drop_frame_and_ret);
- {
- __ Drop(r6);
- __ Ret(1);
- }
-}
-
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
@@ -972,33 +702,12 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
- Register native_context = scratch1;
-
// Store code entry in the closure.
__ str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
-
- // Link the closure into the optimized function list.
- __ ldr(native_context, NativeContextMemOperand());
- __ ldr(scratch2,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ str(scratch2,
- FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
- scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ str(closure,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- // Save closure before the write barrier.
- __ mov(scratch2, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, closure,
- scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ mov(closure, scratch2);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
@@ -1011,7 +720,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
// Leave the frame (also dropping the register file).
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ __ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
__ add(sp, sp, args_count, LeaveCC);
@@ -1576,6 +1285,18 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
+void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
+ // Set the code slot inside the JSFunction to the trampoline to the
+ // interpreter entry.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+ __ str(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ RecordWriteField(r1, JSFunction::kCodeOffset, r2, r4, kLRHasNotBeenSaved,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ // Jump to compile lazy.
+ Generate_CompileLazy(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
@@ -1622,6 +1343,95 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
+void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (preserved for callee)
+ // -- r3 : new target (preserved for callee)
+ // -- r1 : target function (preserved for callee)
+ // -----------------------------------
+
+ Label deserialize_in_runtime;
+
+ Register target = r1; // Must be preserved
+ Register scratch0 = r2;
+ Register scratch1 = r4;
+
+ CHECK(scratch0 != r0 && scratch0 != r3 && scratch0 != r1);
+ CHECK(scratch1 != r0 && scratch1 != r3 && scratch1 != r1);
+ CHECK(scratch0 != scratch1);
+
+ // Load the builtin id for lazy deserialization from SharedFunctionInfo.
+
+ __ AssertFunction(target);
+ __ ldr(scratch0,
+ FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
+
+ __ ldr(scratch1,
+ FieldMemOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
+ __ AssertSmi(scratch1);
+
+ // The builtin may already have been deserialized. If that is the case, it is
+ // stored in the builtins table, and we can copy to correct code object to
+ // both the shared function info and function without calling into runtime.
+ //
+ // Otherwise, we need to call into runtime to deserialize.
+
+ {
+ // Load the code object at builtins_table[builtin_id] into scratch1.
+
+ __ SmiUntag(scratch1);
+ __ Move(scratch0,
+ Operand(ExternalReference::builtins_address(masm->isolate())));
+ __ ldr(scratch1, MemOperand(scratch0, scratch1, LSL, kPointerSizeLog2));
+
+ // Check if the loaded code object has already been deserialized. This is
+ // the case iff it does not equal DeserializeLazy.
+
+ __ Move(scratch0, masm->CodeObject());
+ __ cmp(scratch1, scratch0);
+ __ b(eq, &deserialize_in_runtime);
+ }
+
+ {
+ // If we've reached this spot, the target builtin has been deserialized and
+ // we simply need to copy it over. First to the shared function info.
+
+ Register target_builtin = scratch1;
+ Register shared = scratch0;
+
+ __ ldr(shared,
+ FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
+
+ CHECK(r5 != target && r5 != scratch0 && r5 != scratch1);
+ CHECK(r9 != target && r9 != scratch0 && r9 != scratch1);
+
+ __ str(target_builtin,
+ FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset));
+ __ mov(r9, target_builtin); // Write barrier clobbers r9 below.
+ __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, r9, r5,
+ kLRHasNotBeenSaved, kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // And second to the target function.
+
+ __ str(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
+ __ mov(r9, target_builtin); // Write barrier clobbers r9 below.
+ __ RecordWriteField(target, JSFunction::kCodeOffset, r9, r5,
+ kLRHasNotBeenSaved, kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // All copying is done. Jump to the deserialized code object.
+
+ __ add(target_builtin, target_builtin,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(target_builtin);
+ }
+
+ __ bind(&deserialize_in_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
+}
+
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
@@ -1703,7 +1513,6 @@ void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
__ pop(r0);
}
- __ add(sp, sp, Operand(kPointerSize)); // Ignore state
__ mov(pc, lr); // Jump to ContinueToBuiltin stub
}
@@ -1759,50 +1568,15 @@ void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
Generate_ContinueToBuiltinHelper(masm, true, true);
}
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Pass the function and deoptimization type to the runtime system.
- __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(r0);
__ CallRuntime(Runtime::kNotifyDeoptimized);
}
- // Get the full codegen state from the stack and untag it -> r6.
- __ ldr(r6, MemOperand(sp, 0 * kPointerSize));
- __ SmiUntag(r6);
- // Switch on the state.
- Label with_tos_register, unknown_state;
- __ cmp(r6,
- Operand(static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS)));
- __ b(ne, &with_tos_register);
- __ add(sp, sp, Operand(1 * kPointerSize)); // Remove state.
- __ Ret();
-
- __ bind(&with_tos_register);
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r0.code());
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
- __ cmp(r6,
- Operand(static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER)));
- __ b(ne, &unknown_state);
- __ add(sp, sp, Operand(2 * kPointerSize)); // Remove state.
+ __ pop(r0);
__ Ret();
-
- __ bind(&unknown_state);
- __ stop("no cases left");
-}
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 03a4995f75..b1d5d32b9a 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -150,281 +150,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-// static
-void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : number of arguments
- // -- x1 : constructor function
- // -- cp : context
- // -- lr : return address
- // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
- // -- sp[argc * 8] : receiver
- // -----------------------------------
- ASM_LOCATION("Builtins::Generate_NumberConstructor");
-
- // 1. Load the first argument into x0.
- Label no_arguments;
- {
- __ Cbz(x0, &no_arguments);
- __ Mov(x2, x0); // Store argc in x2.
- __ Sub(x0, x0, 1);
- __ Ldr(x0, MemOperand(jssp, x0, LSL, kPointerSizeLog2));
- }
-
- // 2a. Convert first argument to number.
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(x2);
- __ EnterBuiltinFrame(cp, x1, x2);
- __ Call(BUILTIN_CODE(masm->isolate(), ToNumber), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(cp, x1, x2);
- __ SmiUntag(x2);
- }
-
- {
- // Drop all arguments.
- __ Drop(x2);
- }
-
- // 2b. No arguments, return +0 (already in x0).
- __ Bind(&no_arguments);
- __ Drop(1);
- __ Ret();
-}
-
-// static
-void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : number of arguments
- // -- x1 : constructor function
- // -- x3 : new target
- // -- cp : context
- // -- lr : return address
- // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
- // -- sp[argc * 8] : receiver
- // -----------------------------------
- ASM_LOCATION("Builtins::Generate_NumberConstructor_ConstructStub");
-
- // 1. Make sure we operate in the context of the called function.
- __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
-
- // 2. Load the first argument into x2.
- {
- Label no_arguments, done;
- __ Move(x6, x0); // Store argc in x6.
- __ Cbz(x0, &no_arguments);
- __ Sub(x0, x0, 1);
- __ Ldr(x2, MemOperand(jssp, x0, LSL, kPointerSizeLog2));
- __ B(&done);
- __ Bind(&no_arguments);
- __ Mov(x2, Smi::kZero);
- __ Bind(&done);
- }
-
- // 3. Make sure x2 is a number.
- {
- Label done_convert;
- __ JumpIfSmi(x2, &done_convert);
- __ JumpIfObjectType(x2, x4, x4, HEAP_NUMBER_TYPE, &done_convert, eq);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(x6);
- __ EnterBuiltinFrame(cp, x1, x6);
- __ Push(x3);
- __ Move(x0, x2);
- __ Call(BUILTIN_CODE(masm->isolate(), ToNumber), RelocInfo::CODE_TARGET);
- __ Move(x2, x0);
- __ Pop(x3);
- __ LeaveBuiltinFrame(cp, x1, x6);
- __ SmiUntag(x6);
- }
- __ Bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, new_object;
- __ Cmp(x1, x3);
- __ B(ne, &new_object);
-
- // 5. Allocate a JSValue wrapper for the number.
- __ AllocateJSValue(x0, x1, x2, x4, x5, &new_object);
- __ B(&drop_frame_and_ret);
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(x6);
- __ EnterBuiltinFrame(cp, x1, x6);
- __ Push(x2); // first argument
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ Pop(x2);
- __ LeaveBuiltinFrame(cp, x1, x6);
- __ SmiUntag(x6);
- }
- __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
-
- __ bind(&drop_frame_and_ret);
- {
- __ Drop(x6);
- __ Drop(1);
- __ Ret();
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : number of arguments
- // -- x1 : constructor function
- // -- cp : context
- // -- lr : return address
- // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
- // -- sp[argc * 8] : receiver
- // -----------------------------------
- ASM_LOCATION("Builtins::Generate_StringConstructor");
-
- // 1. Load the first argument into x0.
- Label no_arguments;
- {
- __ Cbz(x0, &no_arguments);
- __ Mov(x2, x0); // Store argc in x2.
- __ Sub(x0, x0, 1);
- __ Ldr(x0, MemOperand(jssp, x0, LSL, kPointerSizeLog2));
- }
-
- // 2a. At least one argument, return x0 if it's a string, otherwise
- // dispatch to appropriate conversion.
- Label drop_frame_and_ret, to_string, symbol_descriptive_string;
- {
- __ JumpIfSmi(x0, &to_string);
- STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
- __ CompareObjectType(x0, x3, x3, FIRST_NONSTRING_TYPE);
- __ B(hi, &to_string);
- __ B(eq, &symbol_descriptive_string);
- __ b(&drop_frame_and_ret);
- }
-
- // 2b. No arguments, return the empty string (and pop the receiver).
- __ Bind(&no_arguments);
- {
- __ LoadRoot(x0, Heap::kempty_stringRootIndex);
- __ Drop(1);
- __ Ret();
- }
-
- // 3a. Convert x0 to a string.
- __ Bind(&to_string);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(x2);
- __ EnterBuiltinFrame(cp, x1, x2);
- __ Call(BUILTIN_CODE(masm->isolate(), ToString), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(cp, x1, x2);
- __ SmiUntag(x2);
- }
- __ b(&drop_frame_and_ret);
-
- // 3b. Convert symbol in x0 to a string.
- __ Bind(&symbol_descriptive_string);
- {
- __ Drop(x2);
- __ Drop(1);
- __ Push(x0);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- __ Drop(x2);
- __ Drop(1);
- __ Ret();
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : number of arguments
- // -- x1 : constructor function
- // -- x3 : new target
- // -- cp : context
- // -- lr : return address
- // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
- // -- sp[argc * 8] : receiver
- // -----------------------------------
- ASM_LOCATION("Builtins::Generate_StringConstructor_ConstructStub");
-
- // 1. Make sure we operate in the context of the called function.
- __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
-
- // 2. Load the first argument into x2.
- {
- Label no_arguments, done;
- __ mov(x6, x0); // Store argc in x6.
- __ Cbz(x0, &no_arguments);
- __ Sub(x0, x0, 1);
- __ Ldr(x2, MemOperand(jssp, x0, LSL, kPointerSizeLog2));
- __ B(&done);
- __ Bind(&no_arguments);
- __ LoadRoot(x2, Heap::kempty_stringRootIndex);
- __ Bind(&done);
- }
-
- // 3. Make sure x2 is a string.
- {
- Label convert, done_convert;
- __ JumpIfSmi(x2, &convert);
- __ JumpIfObjectType(x2, x4, x4, FIRST_NONSTRING_TYPE, &done_convert, lo);
- __ Bind(&convert);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(x6);
- __ EnterBuiltinFrame(cp, x1, x6);
- __ Push(x3);
- __ Move(x0, x2);
- __ Call(BUILTIN_CODE(masm->isolate(), ToString), RelocInfo::CODE_TARGET);
- __ Move(x2, x0);
- __ Pop(x3);
- __ LeaveBuiltinFrame(cp, x1, x6);
- __ SmiUntag(x6);
- }
- __ Bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, new_object;
- __ Cmp(x1, x3);
- __ B(ne, &new_object);
-
- // 5. Allocate a JSValue wrapper for the string.
- __ AllocateJSValue(x0, x1, x2, x4, x5, &new_object);
- __ B(&drop_frame_and_ret);
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(x6);
- __ EnterBuiltinFrame(cp, x1, x6);
- __ Push(x2); // first argument
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ Pop(x2);
- __ LeaveBuiltinFrame(cp, x1, x6);
- __ SmiUntag(x6);
- }
- __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
-
- __ bind(&drop_frame_and_ret);
- {
- __ Drop(x6);
- __ Drop(1);
- __ Ret();
- }
-}
-
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
@@ -478,12 +203,10 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
// Preserve the incoming parameters on the stack.
- __ SmiTag(x0);
- __ Push(cp, x0);
- __ SmiUntag(x0);
-
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ SmiTag(x11, x0);
+ __ Push(cp, x11, x10);
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@@ -986,31 +709,12 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
- Register native_context = scratch1;
-
// Store code entry in the closure.
__ Str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ Mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
-
- // Link the closure into the optimized function list.
- __ Ldr(native_context, NativeContextMemOperand());
- __ Ldr(scratch2,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ Str(scratch2,
- FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
- scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ Str(closure,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ Mov(scratch2, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, scratch2,
- scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
@@ -1023,7 +727,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
// Leave the frame (also dropping the register file).
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ __ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
__ Drop(args_count, 1);
@@ -1277,9 +981,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// register in the register file.
Label loop_header;
__ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
- // TODO(rmcilroy): Ensure we always have an even number of registers to
- // allow stack to be 16 bit aligned (and remove need for jssp).
__ Lsr(x11, x11, kPointerSizeLog2);
+ // Round up the number of registers to a multiple of 2, to align the stack
+ // to 16 bytes.
+ __ Add(x11, x11, 1);
+ __ Bic(x11, x11, 1);
__ PushMultipleTimes(x10, x11);
__ Bind(&loop_header);
}
@@ -1405,14 +1111,15 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
Label stack_overflow;
// Add one for the receiver.
- __ add(x3, x0, Operand(1));
+ __ Add(x3, x0, 1);
// Add a stack check before pushing arguments.
Generate_StackOverflowCheck(masm, x3, x6, &stack_overflow);
// Push "undefined" as the receiver arg if we need to.
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ Push(x10);
__ Mov(x3, x0); // Argument count is correct.
}
@@ -1593,6 +1300,18 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
+void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
+ // Set the code slot inside the JSFunction to the trampoline to the
+ // interpreter entry.
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
+ __ Str(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
+ __ RecordWriteField(x1, JSFunction::kCodeOffset, x2, x5, kLRHasNotBeenSaved,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ // Jump to compile lazy.
+ Generate_CompileLazy(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
@@ -1639,68 +1358,182 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
+void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argument count (preserved for callee)
+ // -- x3 : new target (preserved for callee)
+ // -- x1 : target function (preserved for callee)
+ // -----------------------------------
+
+ Label deserialize_in_runtime;
+
+ Register target = x1; // Must be preserved
+ Register scratch0 = x2;
+ Register scratch1 = x4;
+
+ CHECK(!scratch0.is(x0) && !scratch0.is(x3) && !scratch0.is(x1));
+ CHECK(!scratch1.is(x0) && !scratch1.is(x3) && !scratch1.is(x1));
+ CHECK(!scratch0.is(scratch1));
+
+ // Load the builtin id for lazy deserialization from SharedFunctionInfo.
+
+ __ AssertFunction(target);
+ __ Ldr(scratch0,
+ FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
+
+ __ Ldr(scratch1,
+ FieldMemOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
+ __ AssertSmi(scratch1);
+
+ // The builtin may already have been deserialized. If that is the case, it is
+ // stored in the builtins table, and we can copy to correct code object to
+ // both the shared function info and function without calling into runtime.
+ //
+ // Otherwise, we need to call into runtime to deserialize.
+
+ {
+ // Load the code object at builtins_table[builtin_id] into scratch1.
+
+ __ SmiUntag(scratch1);
+ __ Mov(scratch0, ExternalReference::builtins_address(masm->isolate()));
+ __ Ldr(scratch1, MemOperand(scratch0, scratch1, LSL, kPointerSizeLog2));
+
+ // Check if the loaded code object has already been deserialized. This is
+ // the case iff it does not equal DeserializeLazy.
+
+ __ Move(scratch0, masm->CodeObject());
+ __ Cmp(scratch1, scratch0);
+ __ B(eq, &deserialize_in_runtime);
+ }
+
+ {
+ // If we've reached this spot, the target builtin has been deserialized and
+ // we simply need to copy it over. First to the shared function info.
+
+ Register target_builtin = scratch1;
+ Register shared = scratch0;
+
+ __ Ldr(shared,
+ FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
+
+ CHECK(!x5.is(target) && !x5.is(scratch0) && !x5.is(scratch1));
+ CHECK(!x9.is(target) && !x9.is(scratch0) && !x9.is(scratch1));
+
+ __ Str(target_builtin,
+ FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset));
+ __ Mov(x9, target_builtin); // Write barrier clobbers x9 below.
+ __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, x9, x5,
+ kLRHasNotBeenSaved, kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // And second to the target function.
+
+ __ Str(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
+ __ Mov(x9, target_builtin); // Write barrier clobbers x9 below.
+ __ RecordWriteField(target, JSFunction::kCodeOffset, x9, x5,
+ kLRHasNotBeenSaved, kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // All copying is done. Jump to the deserialized code object.
+
+ __ Add(target_builtin, target_builtin,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(target_builtin);
+ }
+
+ __ bind(&deserialize_in_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
+}
+
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
// -- x1 : new target (preserved for callee)
// -- x3 : target function (preserved for callee)
// -----------------------------------
+ Register argc = x0;
+ Register new_target = x1;
+ Register target = x3;
+
Label failed;
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve argument count for later compare.
- __ Move(x4, x0);
- // Push a copy of the target function and the new target.
- __ SmiTag(x0);
- // Push another copy as a parameter to the runtime call.
- __ Push(x0, x1, x3, x1);
- // Copy arguments from caller (stdlib, foreign, heap).
+ // Push argument count, a copy of the target function and the new target,
+ // together with some padding to maintain 16-byte alignment.
+ __ SmiTag(argc);
+ __ Push(argc, new_target, target, padreg);
+
+ // Push another copy of new target as a parameter to the runtime call and
+ // copy the rest of the arguments from caller (stdlib, foreign, heap).
Label args_done;
- for (int j = 0; j < 4; ++j) {
- Label over;
- if (j < 3) {
- __ cmp(x4, Operand(j));
- __ B(ne, &over);
- }
- for (int i = j - 1; i >= 0; --i) {
- __ ldr(x4, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
- i * kPointerSize));
- __ push(x4);
- }
- for (int i = 0; i < 3 - j; ++i) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- }
- if (j < 3) {
- __ jmp(&args_done);
- __ bind(&over);
- }
- }
- __ bind(&args_done);
+ Register undef = x10;
+ Register scratch1 = x12;
+ Register scratch2 = x13;
+ Register scratch3 = x14;
+ __ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
+
+ Label at_least_one_arg;
+ Label three_args;
+ DCHECK(Smi::kZero == 0);
+ __ Cbnz(argc, &at_least_one_arg);
+
+ // No arguments.
+ __ Push(new_target, undef, undef, undef);
+ __ B(&args_done);
+
+ __ Bind(&at_least_one_arg);
+ // Load two arguments, though we may only use one (for the one arg case).
+ __ Ldp(scratch2, scratch1,
+ MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
+
+ // Set flags for determining the value of smi-tagged argc.
+ // lt => 1, eq => 2, gt => 3.
+ __ Cmp(argc, Smi::FromInt(2));
+ __ B(gt, &three_args);
+
+ // One or two arguments.
+ // If there is one argument (flags are lt), scratch2 contains that argument,
+ // and scratch1 must be undefined.
+ __ CmovX(scratch1, scratch2, lt);
+ __ CmovX(scratch2, undef, lt);
+ __ Push(new_target, scratch1, scratch2, undef);
+ __ B(&args_done);
+
+ // Three arguments.
+ __ Bind(&three_args);
+ __ Ldr(scratch3, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
+ 2 * kPointerSize));
+ __ Push(new_target, scratch3, scratch1, scratch2);
+
+ __ Bind(&args_done);
// Call runtime, on success unwind frame, and parent frame.
__ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+
// A smi 0 is returned on failure, an object on success.
__ JumpIfSmi(x0, &failed);
- __ Drop(2);
- __ pop(x4);
- __ SmiUntag(x4);
+ // Peek the argument count from the stack, untagging at the same time.
+ __ Ldr(w4, UntagSmiMemOperand(__ StackPointer(), 3 * kPointerSize));
+ __ Drop(4);
scope.GenerateLeaveFrame();
- __ add(x4, x4, Operand(1));
- __ Drop(x4);
+ // Drop arguments and receiver.
+ __ Add(x4, x4, 1);
+ __ DropArguments(x4);
__ Ret();
- __ bind(&failed);
+ __ Bind(&failed);
// Restore target function and new target.
- __ Pop(x3, x1, x0);
- __ SmiUntag(x0);
+ __ Pop(padreg, target, new_target, argc);
+ __ SmiUntag(argc);
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ Ldr(x4, FieldMemOperand(x1, JSFunction::kCodeOffset));
- __ Add(x4, x4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Ldr(x4, FieldMemOperand(new_target, JSFunction::kCodeOffset));
+ __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
__ Jump(x4);
}
@@ -1714,9 +1547,6 @@ void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
__ Pop(x0);
}
- // Ignore state (pushed by Deoptimizer::EntryGenerator::Generate).
- __ Drop(1);
-
// Jump to the ContinueToBuiltin stub. Deoptimizer::EntryGenerator::Generate
// loads this into lr before it jumps here.
__ Br(lr);
@@ -1773,53 +1603,16 @@ void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
Generate_ContinueToBuiltinHelper(masm, true, true);
}
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass the deoptimization type to the runtime system.
- __ Mov(x0, Smi::FromInt(static_cast<int>(type)));
- __ Push(x0);
__ CallRuntime(Runtime::kNotifyDeoptimized);
}
- // Get the full codegen state from the stack and untag it.
- Register state = x6;
- __ Peek(state, 0);
- __ SmiUntag(state);
-
- // Switch on the state.
- Label with_tos_register, unknown_state;
- __ CompareAndBranch(state,
- static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS),
- ne, &with_tos_register);
- __ Drop(1); // Remove state.
- __ Ret();
-
- __ Bind(&with_tos_register);
- // Reload TOS register.
+ // Pop TOS register and padding.
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), x0.code());
- __ Peek(x0, kPointerSize);
- __ CompareAndBranch(state,
- static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER),
- ne, &unknown_state);
- __ Drop(2); // Remove state and TOS.
+ __ Pop(x0, padreg);
__ Ret();
-
- __ Bind(&unknown_state);
- __ Abort(kInvalidFullCodegenState);
-}
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
@@ -2358,14 +2151,12 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// in the fast case? (fall back to AllocateInNewSpace?)
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(x0);
- __ Push(x0, x1);
+ __ Push(padreg, x0, x1, cp);
__ Mov(x0, x3);
- __ Push(cp);
__ Call(BUILTIN_CODE(masm->isolate(), ToObject),
RelocInfo::CODE_TARGET);
- __ Pop(cp);
__ Mov(x3, x0);
- __ Pop(x1, x0);
+ __ Pop(cp, x1, x0, padreg);
__ SmiUntag(x0);
}
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
@@ -2389,10 +2180,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ InvokeFunctionCode(x1, no_reg, expected, actual, JUMP_FUNCTION);
// The function is a "classConstructor", need to raise an exception.
- __ bind(&class_constructor);
+ __ Bind(&class_constructor);
{
FrameScope frame(masm, StackFrame::INTERNAL);
- __ Push(x1);
+ __ Push(padreg, x1);
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
}
}
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc
index c9a0cda878..96d52e6db2 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.cc
+++ b/deps/v8/src/builtins/builtins-arguments-gen.cc
@@ -410,11 +410,5 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
return result.value();
}
-TF_BUILTIN(FastNewSloppyArguments, ArgumentsBuiltinsAssembler) {
- Node* function = Parameter(FastNewArgumentsDescriptor::kFunction);
- Node* context = Parameter(FastNewArgumentsDescriptor::kContext);
- Return(EmitFastNewSloppyArguments(context, function));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 675225533f..46d20e57eb 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
+#include "src/factory-inl.h"
#include "src/frame-constants.h"
namespace v8 {
@@ -111,8 +112,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Label fast(this);
Label runtime(this);
Label object_push_pre(this), object_push(this), double_push(this);
- BranchIfFastJSArray(a(), context(), FastJSArrayAccessMode::ANY_ACCESS,
- &fast, &runtime);
+ BranchIfFastJSArray(a(), context(), &fast, &runtime);
BIND(&fast);
{
@@ -534,30 +534,23 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
{
if (direction == ForEachDirection::kForward) {
// 8. Repeat, while k < len
- GotoUnlessNumberLessThan(k(), len_, &after_loop);
+ GotoIfNumberGreaterThanOrEqual(k(), len_, &after_loop);
} else {
// OR
// 10. Repeat, while k >= 0
- GotoUnlessNumberLessThan(SmiConstant(-1), k(), &after_loop);
+ GotoIfNumberGreaterThanOrEqual(SmiConstant(-1), k(), &after_loop);
}
Label done_element(this, &to_);
// a. Let Pk be ToString(k).
- // We never have to perform a ToString conversion for Smi keys because
- // they are guaranteed to be stored as elements. We easily hit this case
- // when using any iteration builtin on a dictionary elements Array.
- VARIABLE(p_k, MachineRepresentation::kTagged, k());
- {
- Label continue_with_key(this);
- GotoIf(TaggedIsSmi(p_k.value()), &continue_with_key);
- p_k.Bind(ToString(context(), p_k.value()));
- Goto(&continue_with_key);
- BIND(&continue_with_key);
- }
+ // We never have to perform a ToString conversion as the above guards
+ // guarantee that we have a positive {k} which also is a valid array
+ // index in the range [0, 2^32-1).
+ CSA_ASSERT(this, IsNumberArrayIndex(k()));
// b. Let kPresent be HasProperty(O, Pk).
// c. ReturnIfAbrupt(kPresent).
- Node* k_present = HasProperty(o(), p_k.value(), context(), kHasProperty);
+ Node* k_present = HasProperty(o(), k(), context(), kHasProperty);
// d. If kPresent is true, then
GotoIf(WordNotEqual(k_present, TrueConstant()), &done_element);
@@ -711,7 +704,6 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
GotoIf(TaggedIsNotSmi(len()), slow);
BranchIfFastJSArray(o(), context(),
- CodeStubAssembler::FastJSArrayAccessMode::INBOUNDS_READ,
&switch_on_elements_kind, slow);
BIND(&switch_on_elements_kind);
@@ -759,11 +751,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Int32Constant(JS_ARRAY_TYPE)),
&runtime);
- Node* const native_context = LoadNativeContext(context());
- Node* const initial_array_prototype = LoadContextElement(
- native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
- Node* proto = LoadMapPrototype(original_map);
- GotoIf(WordNotEqual(proto, initial_array_prototype), &runtime);
+ GotoIfNot(IsPrototypeInitialArrayPrototype(context(), original_map),
+ &runtime);
Node* species_protector = SpeciesProtectorConstant();
Node* value =
@@ -780,6 +769,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// element in the input array (maybe the callback deletes an element).
const ElementsKind elements_kind =
GetHoleyElementsKind(GetInitialFastElementsKind());
+ Node* const native_context = LoadNativeContext(context());
Node* array_map = LoadJSArrayElementsMap(elements_kind, native_context);
a_.Bind(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, len, len, nullptr,
CodeStubAssembler::SMI_PARAMETERS));
@@ -835,8 +825,7 @@ TF_BUILTIN(FastArrayPop, CodeStubAssembler) {
// 4) we aren't supposed to shrink the backing store.
// 1) Check that the array has fast elements.
- BranchIfFastJSArray(receiver, context, FastJSArrayAccessMode::INBOUNDS_READ,
- &fast, &runtime);
+ BranchIfFastJSArray(receiver, context, &fast, &runtime);
BIND(&fast);
{
@@ -939,8 +928,7 @@ TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
Node* kind = nullptr;
Label fast(this);
- BranchIfFastJSArray(receiver, context, FastJSArrayAccessMode::ANY_ACCESS,
- &fast, &runtime);
+ BranchIfFastJSArray(receiver, context, &fast, &runtime);
BIND(&fast);
{
@@ -1068,8 +1056,7 @@ TF_BUILTIN(FastArrayShift, CodeStubAssembler) {
// 5) we aren't supposed to left-trim the backing store.
// 1) Check that the array has fast elements.
- BranchIfFastJSArray(receiver, context, FastJSArrayAccessMode::INBOUNDS_READ,
- &fast, &runtime);
+ BranchIfFastJSArray(receiver, context, &fast, &runtime);
BIND(&fast);
{
@@ -1749,8 +1736,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
// Take slow path if not a JSArray, if retrieving elements requires
// traversing prototype, or if access checks are required.
- BranchIfFastJSArray(receiver, context, FastJSArrayAccessMode::INBOUNDS_READ,
- &init_index, &call_runtime);
+ BranchIfFastJSArray(receiver, context, &init_index, &call_runtime);
BIND(&init_index);
VARIABLE(index_var, MachineType::PointerRepresentation(), intptr_zero);
@@ -1819,8 +1805,8 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
{
VARIABLE(search_num, MachineRepresentation::kFloat64);
Label ident_loop(this, &index_var), heap_num_loop(this, &search_num),
- string_loop(this), undef_loop(this, &index_var), not_smi(this),
- not_heap_num(this);
+ string_loop(this), bigint_loop(this, &index_var),
+ undef_loop(this, &index_var), not_smi(this), not_heap_num(this);
GotoIfNot(TaggedIsSmi(search_element), &not_smi);
search_num.Bind(SmiToFloat64(CAST(search_element)));
@@ -1838,6 +1824,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
BIND(&not_heap_num);
Node* search_type = LoadMapInstanceType(map);
GotoIf(IsStringInstanceType(search_type), &string_loop);
+ GotoIf(IsBigIntInstanceType(search_type), &bigint_loop);
Goto(&ident_loop);
BIND(&ident_loop);
@@ -1942,6 +1929,18 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
Increment(&index_var);
Goto(&next_iteration);
}
+
+ BIND(&bigint_loop);
+ {
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
+ &return_not_found);
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ TNode<Object> result = CallRuntime(Runtime::kBigIntEqual, context,
+ search_element, element_k);
+ GotoIf(WordEqual(result, TrueConstant()), &return_found);
+ Increment(&index_var);
+ Goto(&bigint_loop);
+ }
}
BIND(&if_packed_doubles);
@@ -2350,7 +2349,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
length = var_length.value();
}
- GotoUnlessNumberLessThan(index, length, &set_done);
+ GotoIfNumberGreaterThanOrEqual(index, length, &set_done);
StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
NumberInc(index));
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index 5cff179c63..cbb2d7b3e5 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -128,8 +128,8 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates that there is a locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitCaught, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 2);
- Node* const generator = Parameter(Descriptor::kReceiver);
+ CSA_ASSERT_JS_ARGC_EQ(this, 3);
+ Node* const generator = Parameter(Descriptor::kGenerator);
Node* const awaited = Parameter(Descriptor::kAwaited);
Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
Node* const context = Parameter(Descriptor::kContext);
@@ -143,8 +143,8 @@ TF_BUILTIN(AsyncFunctionAwaitCaught, AsyncFunctionBuiltinsAssembler) {
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates no locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitUncaught, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 2);
- Node* const generator = Parameter(Descriptor::kReceiver);
+ CSA_ASSERT_JS_ARGC_EQ(this, 3);
+ Node* const generator = Parameter(Descriptor::kGenerator);
Node* const awaited = Parameter(Descriptor::kAwaited);
Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
Node* const context = Parameter(Descriptor::kContext);
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index c0e3f2d24d..a42bade80f 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -3,7 +3,10 @@
// found in the LICENSE file.
#include "src/builtins/builtins-async-gen.h"
+
#include "src/builtins/builtins-utils-gen.h"
+#include "src/factory-inl.h"
+#include "src/objects/shared-function-info.h"
namespace v8 {
namespace internal {
@@ -195,8 +198,6 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
Node* const code =
LoadObjectField(shared_info, SharedFunctionInfo::kCodeOffset);
StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeOffset, code);
- StoreObjectFieldRoot(function, JSFunction::kNextFunctionLinkOffset,
- Heap::kUndefinedValueRootIndex);
}
Node* AsyncBuiltinsAssembler::CreateUnwrapClosure(Node* native_context,
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index 76001f735d..230da6bbe7 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -245,7 +245,7 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
template <typename Descriptor>
void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
- Node* generator = Parameter(Descriptor::kReceiver);
+ Node* generator = Parameter(Descriptor::kGenerator);
Node* value = Parameter(Descriptor::kAwaited);
Node* context = Parameter(Descriptor::kContext);
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
new file mode 100644
index 0000000000..a2526795c3
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -0,0 +1,164 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/conversions.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+BUILTIN(BigIntConstructor) {
+ HandleScope scope(isolate);
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+
+ // TODO(jkummerow): Implement properly.
+
+ // Dummy implementation only takes Smi args.
+ if (!value->IsSmi()) return isolate->heap()->undefined_value();
+ int num = Smi::ToInt(*value);
+ return *isolate->factory()->NewBigIntFromInt(num);
+}
+
+BUILTIN(BigIntConstructor_ConstructStub) {
+ HandleScope scope(isolate);
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ DCHECK(*target == target->native_context()->bigint_function());
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::New(target, new_target));
+
+ // TODO(jkummerow): Implement.
+ USE(value);
+ USE(result);
+
+ UNIMPLEMENTED();
+}
+
+BUILTIN(BigIntParseInt) {
+ HandleScope scope(isolate);
+ Handle<Object> string = args.atOrUndefined(isolate, 1);
+ Handle<Object> radix = args.atOrUndefined(isolate, 2);
+
+ // Convert {string} to a String and flatten it.
+ // Fast path: avoid back-and-forth conversion for Smi inputs.
+ if (string->IsSmi() && radix->IsUndefined(isolate)) {
+ int num = Smi::ToInt(*string);
+ return *isolate->factory()->NewBigIntFromInt(num);
+ }
+ Handle<String> subject;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, subject,
+ Object::ToString(isolate, string));
+ subject = String::Flatten(subject);
+
+ // Convert {radix} to Int32.
+ if (!radix->IsNumber()) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, radix, Object::ToNumber(radix));
+ }
+ int radix32 = DoubleToInt32(radix->Number());
+ if (radix32 != 0 && (radix32 < 2 || radix32 > 36)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewSyntaxError(MessageTemplate::kToRadixFormatRange));
+ }
+ RETURN_RESULT_OR_FAILURE(isolate, StringToBigInt(isolate, subject, radix32));
+}
+
+BUILTIN(BigIntAsUintN) {
+ HandleScope scope(isolate);
+ Handle<Object> bits_obj = args.atOrUndefined(isolate, 1);
+ Handle<Object> bigint_obj = args.atOrUndefined(isolate, 2);
+
+ // TODO(jkummerow): Implement.
+ USE(bits_obj);
+ USE(bigint_obj);
+
+ UNIMPLEMENTED();
+}
+
+BUILTIN(BigIntAsIntN) {
+ HandleScope scope(isolate);
+ Handle<Object> bits_obj = args.atOrUndefined(isolate, 1);
+ Handle<Object> bigint_obj = args.atOrUndefined(isolate, 2);
+
+ // TODO(jkummerow): Implement.
+ USE(bits_obj);
+ USE(bigint_obj);
+
+ UNIMPLEMENTED();
+}
+
+BUILTIN(BigIntPrototypeToLocaleString) {
+ HandleScope scope(isolate);
+
+ // TODO(jkummerow): Implement.
+
+ UNIMPLEMENTED();
+}
+
+namespace {
+
+MaybeHandle<BigInt> ThisBigIntValue(Isolate* isolate, Handle<Object> value,
+ const char* caller) {
+ // 1. If Type(value) is BigInt, return value.
+ if (value->IsBigInt()) return Handle<BigInt>::cast(value);
+ // 2. If Type(value) is Object and value has a [[BigIntData]] internal slot:
+ if (value->IsJSValue()) {
+ // 2a. Assert: value.[[BigIntData]] is a BigInt value.
+ // 2b. Return value.[[BigIntData]].
+ Object* data = JSValue::cast(*value)->value();
+ if (data->IsBigInt()) return handle(BigInt::cast(data), isolate);
+ }
+ // 3. Throw a TypeError exception.
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kNotGeneric,
+ isolate->factory()->NewStringFromAsciiChecked(caller),
+ isolate->factory()->NewStringFromStaticChars("BigInt")),
+ BigInt);
+}
+
+} // namespace
+
+BUILTIN(BigIntPrototypeToString) {
+ HandleScope scope(isolate);
+ // 1. Let x be ? thisBigIntValue(this value).
+ Handle<BigInt> x;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, x,
+ ThisBigIntValue(isolate, args.receiver(), "BigInt.prototype.toString"));
+ // 2. If radix is not present, let radixNumber be 10.
+ // 3. Else if radix is undefined, let radixNumber be 10.
+ Handle<Object> radix = args.atOrUndefined(isolate, 1);
+ int radix_number;
+ if (radix->IsUndefined(isolate)) {
+ radix_number = 10;
+ } else {
+ // 4. Else, let radixNumber be ? ToInteger(radix).
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, radix,
+ Object::ToInteger(isolate, radix));
+ radix_number = static_cast<int>(radix->Number());
+ }
+ // 5. If radixNumber < 2 or radixNumber > 36, throw a RangeError exception.
+ if (radix_number < 2 || radix_number > 36) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kToRadixFormatRange));
+ }
+ // Return the String representation of this Number value using the radix
+ // specified by radixNumber.
+ RETURN_RESULT_OR_FAILURE(isolate, BigInt::ToString(x, radix_number));
+}
+
+BUILTIN(BigIntPrototypeValueOf) {
+ HandleScope scope(isolate);
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ ThisBigIntValue(isolate, args.receiver(), "BigInt.prototype.valueOf"));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index 90cd1d4e65..ab428e8caa 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -7,8 +7,10 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/globals.h"
+#include "src/heap/heap-inl.h"
#include "src/isolate.h"
#include "src/macro-assembler.h"
+#include "src/objects/arguments.h"
namespace v8 {
namespace internal {
@@ -151,17 +153,9 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
{
// For holey JSArrays we need to check that the array prototype chain
// protector is intact and our prototype is the Array.prototype actually.
- Node* arguments_list_prototype = LoadMapPrototype(arguments_list_map);
- Node* initial_array_prototype = LoadContextElement(
- native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
- GotoIfNot(WordEqual(arguments_list_prototype, initial_array_prototype),
+ GotoIfNot(IsPrototypeInitialArrayPrototype(context, arguments_list_map),
&if_runtime);
- Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
- DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
- Branch(
- WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Isolate::kProtectorValid)),
- &if_done, &if_runtime);
+ Branch(IsArrayProtectorCellInvalid(), &if_runtime, &if_done);
}
BIND(&if_arguments);
@@ -283,13 +277,8 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
Node* spread_map = LoadMap(spread);
GotoIfNot(IsJSArrayMap(spread_map), &if_runtime);
- Node* native_context = LoadNativeContext(context);
-
// Check that we have the original ArrayPrototype.
- Node* prototype = LoadMapPrototype(spread_map);
- Node* array_prototype = LoadContextElement(
- native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
- GotoIfNot(WordEqual(prototype, array_prototype), &if_runtime);
+ GotoIfNot(IsPrototypeInitialArrayPrototype(context, spread_map), &if_runtime);
// Check that the ArrayPrototype hasn't been modified in a way that would
// affect iteration.
@@ -301,6 +290,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
&if_runtime);
// Check that the map of the initial array iterator hasn't changed.
+ Node* native_context = LoadNativeContext(context);
Node* arr_it_proto_map = LoadMap(LoadContextElement(
native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
Node* initial_map = LoadContextElement(
@@ -323,14 +313,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
// Check the ArrayProtector cell for holey arrays.
BIND(&if_holey);
- {
- Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
- DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
- Branch(
- WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Isolate::kProtectorValid)),
- &if_done, &if_runtime);
- }
+ { Branch(IsArrayProtectorCellInvalid(), &if_runtime, &if_done); }
BIND(&if_runtime);
{
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index 24dc946a24..5ce0aa0155 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -9,7 +9,6 @@
#include "src/objects-inl.h"
#include "src/objects/frame-array-inl.h"
#include "src/string-builder.h"
-#include "src/wasm/wasm-module.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index acb4c949ae..4aa7fa310b 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/code-stub-assembler.h"
+#include "src/factory-inl.h"
#include "src/objects/hash-table.h"
namespace v8 {
@@ -84,6 +85,17 @@ class CollectionsBuiltinsAssembler : public CodeStubAssembler {
Label* entry_found,
Label* not_found);
+ // Specialization for bigints.
+ // The {result} variable will contain the entry index if the key was found,
+ // or the hash code otherwise.
+ void SameValueZeroBigInt(Node* key, Node* candidate_key, Label* if_same,
+ Label* if_not_same);
+ template <typename CollectionType>
+ void FindOrderedHashTableEntryForBigIntKey(Node* context, Node* table,
+ Node* key, Variable* result,
+ Label* entry_found,
+ Label* not_found);
+
// Specialization for string.
// The {result} variable will contain the entry index if the key was found,
// or the hash code otherwise.
@@ -560,6 +572,21 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForHeapNumberKey(
}
template <typename CollectionType>
+void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForBigIntKey(
+ Node* context, Node* table, Node* key, Variable* result, Label* entry_found,
+ Label* not_found) {
+ Node* hash = CallGetHashRaw(key);
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
+ result->Bind(hash);
+ FindOrderedHashTableEntry<CollectionType>(
+ table, hash,
+ [&](Node* other_key, Label* if_same, Label* if_not_same) {
+ SameValueZeroBigInt(key, other_key, if_same, if_not_same);
+ },
+ result, entry_found, not_found);
+}
+
+template <typename CollectionType>
void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForOtherKey(
Node* context, Node* table, Node* key, Variable* result, Label* entry_found,
Label* not_found) {
@@ -606,13 +633,26 @@ void CollectionsBuiltinsAssembler::SameValueZeroString(Node* context,
if_same, if_not_same);
}
+void CollectionsBuiltinsAssembler::SameValueZeroBigInt(Node* key,
+ Node* candidate_key,
+ Label* if_same,
+ Label* if_not_same) {
+ CSA_ASSERT(this, IsBigInt(key));
+ GotoIf(TaggedIsSmi(candidate_key), if_not_same);
+ GotoIfNot(IsBigInt(candidate_key), if_not_same);
+
+ Branch(WordEqual(CallRuntime(Runtime::kBigIntEqual, NoContextConstant(), key,
+ candidate_key),
+ TrueConstant()),
+ if_same, if_not_same);
+}
+
void CollectionsBuiltinsAssembler::SameValueZeroHeapNumber(Node* key_float,
Node* candidate_key,
Label* if_same,
Label* if_not_same) {
Label if_smi(this), if_keyisnan(this);
- // If the candidate is not a string, the keys are not equal.
GotoIf(TaggedIsSmi(candidate_key), &if_smi);
GotoIfNot(IsHeapNumber(candidate_key), if_not_same);
@@ -837,7 +877,7 @@ std::tuple<Node*, Node*, Node*> CollectionsBuiltinsAssembler::NextSkipHoles(
var_index.value());
}
-TF_BUILTIN(MapGet, CollectionsBuiltinsAssembler) {
+TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) {
Node* const receiver = Parameter(Descriptor::kReceiver);
Node* const key = Parameter(Descriptor::kKey);
Node* const context = Parameter(Descriptor::kContext);
@@ -845,20 +885,24 @@ TF_BUILTIN(MapGet, CollectionsBuiltinsAssembler) {
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.get");
Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
- Node* index = CallBuiltin(Builtins::kMapLookupHashIndex, context, table, key);
+ Node* index =
+ CallBuiltin(Builtins::kFindOrderedHashMapEntry, context, table, key);
Label if_found(this), if_not_found(this);
Branch(SmiGreaterThanOrEqual(index, SmiConstant(0)), &if_found,
&if_not_found);
BIND(&if_found);
- Return(LoadFixedArrayElement(table, SmiUntag(index)));
+ Return(LoadFixedArrayElement(
+ table, SmiUntag(index),
+ (OrderedHashMap::kHashTableStartIndex + OrderedHashMap::kValueOffset) *
+ kPointerSize));
BIND(&if_not_found);
Return(UndefinedConstant());
}
-TF_BUILTIN(MapHas, CollectionsBuiltinsAssembler) {
+TF_BUILTIN(MapPrototypeHas, CollectionsBuiltinsAssembler) {
Node* const receiver = Parameter(Descriptor::kReceiver);
Node* const key = Parameter(Descriptor::kKey);
Node* const context = Parameter(Descriptor::kContext);
@@ -866,7 +910,8 @@ TF_BUILTIN(MapHas, CollectionsBuiltinsAssembler) {
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.has");
Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
- Node* index = CallBuiltin(Builtins::kMapLookupHashIndex, context, table, key);
+ Node* index =
+ CallBuiltin(Builtins::kFindOrderedHashMapEntry, context, table, key);
Label if_found(this), if_not_found(this);
Branch(SmiGreaterThanOrEqual(index, SmiConstant(0)), &if_found,
@@ -896,7 +941,7 @@ Node* CollectionsBuiltinsAssembler::NormalizeNumberKey(Node* const key) {
return result.value();
}
-TF_BUILTIN(MapSet, CollectionsBuiltinsAssembler) {
+TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
Node* const receiver = Parameter(Descriptor::kReceiver);
Node* key = Parameter(Descriptor::kKey);
Node* const value = Parameter(Descriptor::kValue);
@@ -1008,7 +1053,7 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashMapNewEntry(
SmiAdd(number_of_elements, SmiConstant(1)));
}
-TF_BUILTIN(MapDelete, CollectionsBuiltinsAssembler) {
+TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) {
Node* const receiver = Parameter(Descriptor::kReceiver);
Node* key = Parameter(Descriptor::kKey);
Node* const context = Parameter(Descriptor::kContext);
@@ -1067,7 +1112,7 @@ TF_BUILTIN(MapDelete, CollectionsBuiltinsAssembler) {
Return(TrueConstant());
}
-TF_BUILTIN(SetAdd, CollectionsBuiltinsAssembler) {
+TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
Node* const receiver = Parameter(Descriptor::kReceiver);
Node* key = Parameter(Descriptor::kKey);
Node* const context = Parameter(Descriptor::kContext);
@@ -1171,7 +1216,7 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashSetNewEntry(
SmiAdd(number_of_elements, SmiConstant(1)));
}
-TF_BUILTIN(SetDelete, CollectionsBuiltinsAssembler) {
+TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) {
Node* const receiver = Parameter(Descriptor::kReceiver);
Node* key = Parameter(Descriptor::kKey);
Node* const context = Parameter(Descriptor::kContext);
@@ -1398,7 +1443,7 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
}
}
-TF_BUILTIN(SetHas, CollectionsBuiltinsAssembler) {
+TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) {
Node* const receiver = Parameter(Descriptor::kReceiver);
Node* const key = Parameter(Descriptor::kKey);
Node* const context = Parameter(Descriptor::kContext);
@@ -1411,11 +1456,16 @@ TF_BUILTIN(SetHas, CollectionsBuiltinsAssembler) {
IntPtrConstant(0));
VARIABLE(result, MachineRepresentation::kTaggedSigned, IntPtrConstant(0));
Label if_key_smi(this), if_key_string(this), if_key_heap_number(this),
- entry_found(this), not_found(this), done(this);
+ if_key_bigint(this), entry_found(this), not_found(this), done(this);
GotoIf(TaggedIsSmi(key), &if_key_smi);
- GotoIf(IsString(key), &if_key_string);
- GotoIf(IsHeapNumber(key), &if_key_heap_number);
+
+ Node* key_map = LoadMap(key);
+ Node* key_instance_type = LoadMapInstanceType(key_map);
+
+ GotoIf(IsStringInstanceType(key_instance_type), &if_key_string);
+ GotoIf(IsHeapNumberMap(key_map), &if_key_heap_number);
+ GotoIf(IsBigIntInstanceType(key_instance_type), &if_key_bigint);
FindOrderedHashTableEntryForOtherKey<OrderedHashSet>(
context, table, key, &entry_start_position, &entry_found, &not_found);
@@ -1438,6 +1488,12 @@ TF_BUILTIN(SetHas, CollectionsBuiltinsAssembler) {
context, table, key, &entry_start_position, &entry_found, &not_found);
}
+ BIND(&if_key_bigint);
+ {
+ FindOrderedHashTableEntryForBigIntKey<OrderedHashSet>(
+ context, table, key, &entry_start_position, &entry_found, &not_found);
+ }
+
BIND(&entry_found);
Return(TrueConstant());
@@ -1598,11 +1654,17 @@ template <typename CollectionType>
void CollectionsBuiltinsAssembler::TryLookupOrderedHashTableIndex(
Node* const table, Node* const key, Node* const context, Variable* result,
Label* if_entry_found, Label* if_not_found) {
- Label if_key_smi(this), if_key_string(this), if_key_heap_number(this);
+ Label if_key_smi(this), if_key_string(this), if_key_heap_number(this),
+ if_key_bigint(this);
GotoIf(TaggedIsSmi(key), &if_key_smi);
- GotoIf(IsString(key), &if_key_string);
- GotoIf(IsHeapNumber(key), &if_key_heap_number);
+
+ Node* key_map = LoadMap(key);
+ Node* key_instance_type = LoadMapInstanceType(key_map);
+
+ GotoIf(IsStringInstanceType(key_instance_type), &if_key_string);
+ GotoIf(IsHeapNumberMap(key_map), &if_key_heap_number);
+ GotoIf(IsBigIntInstanceType(key_instance_type), &if_key_bigint);
FindOrderedHashTableEntryForOtherKey<CollectionType>(
context, table, key, result, if_entry_found, if_not_found);
@@ -1624,9 +1686,15 @@ void CollectionsBuiltinsAssembler::TryLookupOrderedHashTableIndex(
FindOrderedHashTableEntryForHeapNumberKey<CollectionType>(
context, table, key, result, if_entry_found, if_not_found);
}
+
+ BIND(&if_key_bigint);
+ {
+ FindOrderedHashTableEntryForBigIntKey<CollectionType>(
+ context, table, key, result, if_entry_found, if_not_found);
+ }
}
-TF_BUILTIN(MapLookupHashIndex, CollectionsBuiltinsAssembler) {
+TF_BUILTIN(FindOrderedHashMapEntry, CollectionsBuiltinsAssembler) {
Node* const table = Parameter(Descriptor::kTable);
Node* const key = Parameter(Descriptor::kKey);
Node* const context = Parameter(Descriptor::kContext);
@@ -1639,10 +1707,7 @@ TF_BUILTIN(MapLookupHashIndex, CollectionsBuiltinsAssembler) {
table, key, context, &entry_start_position, &entry_found, &not_found);
BIND(&entry_found);
- Node* index = IntPtrAdd(entry_start_position.value(),
- IntPtrConstant(OrderedHashMap::kHashTableStartIndex +
- OrderedHashMap::kValueOffset));
- Return(SmiTag(index));
+ Return(SmiTag(entry_start_position.value()));
BIND(&not_found);
Return(SmiConstant(-1));
diff --git a/deps/v8/src/builtins/builtins-collections.cc b/deps/v8/src/builtins/builtins-collections.cc
index 0497eaaac1..e3c97d3841 100644
--- a/deps/v8/src/builtins/builtins-collections.cc
+++ b/deps/v8/src/builtins/builtins-collections.cc
@@ -9,7 +9,7 @@
namespace v8 {
namespace internal {
-BUILTIN(MapClear) {
+BUILTIN(MapPrototypeClear) {
HandleScope scope(isolate);
const char* const kMethodName = "Map.prototype.clear";
CHECK_RECEIVER(JSMap, map, kMethodName);
@@ -17,7 +17,7 @@ BUILTIN(MapClear) {
return isolate->heap()->undefined_value();
}
-BUILTIN(SetClear) {
+BUILTIN(SetPrototypeClear) {
HandleScope scope(isolate);
const char* const kMethodName = "Set.prototype.clear";
CHECK_RECEIVER(JSSet, set, kMethodName);
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index dccb774e4a..d55f26163c 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -5,6 +5,7 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
+#include "src/api.h"
#include "src/debug/interface-types.h"
#include "src/objects-inl.h"
@@ -61,14 +62,13 @@ void ConsoleCall(
(isolate->console_delegate()->*func)(
wrapper,
v8::debug::ConsoleContext(context_id, Utils::ToLocal(context_name)));
- CHECK(!isolate->has_pending_exception());
- CHECK(!isolate->has_scheduled_exception());
}
} // namespace
#define CONSOLE_BUILTIN_IMPLEMENTATION(call, name) \
BUILTIN(Console##call) { \
ConsoleCall(isolate, args, &debug::ConsoleDelegate::call); \
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); \
return isolate->heap()->undefined_value(); \
}
CONSOLE_METHOD_LIST(CONSOLE_BUILTIN_IMPLEMENTATION)
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 5021365cd4..67a87271c2 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -151,33 +151,6 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
BIND(&cell_done);
}
- {
- // If the feedback vector has optimized code, check whether it is marked
- // for deopt and, if so, clear the slot.
- Label optimized_code_ok(this), clear_optimized_code(this);
- Node* literals = LoadObjectField(literals_cell, Cell::kValueOffset);
- GotoIfNot(IsFeedbackVector(literals), &optimized_code_ok);
- Node* optimized_code_cell_slot =
- LoadObjectField(literals, FeedbackVector::kOptimizedCodeOffset);
- GotoIf(TaggedIsSmi(optimized_code_cell_slot), &optimized_code_ok);
-
- Node* optimized_code =
- LoadWeakCellValue(optimized_code_cell_slot, &clear_optimized_code);
- Node* code_flags = LoadObjectField(
- optimized_code, Code::kKindSpecificFlags1Offset, MachineType::Uint32());
- Node* marked_for_deopt =
- DecodeWord32<Code::MarkedForDeoptimizationField>(code_flags);
- Branch(Word32Equal(marked_for_deopt, Int32Constant(0)), &optimized_code_ok,
- &clear_optimized_code);
-
- // Cell is empty or code is marked for deopt, clear the optimized code slot.
- BIND(&clear_optimized_code);
- StoreObjectFieldNoWriteBarrier(
- literals, FeedbackVector::kOptimizedCodeOffset, SmiConstant(0));
- Goto(&optimized_code_ok);
-
- BIND(&optimized_code_ok);
- }
StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackVectorOffset,
literals_cell);
StoreObjectFieldNoWriteBarrier(
@@ -189,9 +162,6 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
isolate->builtins()->builtin(Builtins::kCompileLazy));
Node* lazy_builtin = HeapConstant(lazy_builtin_handle);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeOffset, lazy_builtin);
- StoreObjectFieldNoWriteBarrier(result, JSFunction::kNextFunctionLinkOffset,
- UndefinedConstant());
-
return result;
}
@@ -364,17 +334,14 @@ TF_BUILTIN(FastNewFunctionContextFunction, ConstructorBuiltinsAssembler) {
ScopeType::FUNCTION_SCOPE));
}
-Node* ConstructorBuiltinsAssembler::EmitFastCloneRegExp(Node* closure,
- Node* literal_index,
- Node* pattern,
- Node* flags,
- Node* context) {
+Node* ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
+ Node* feedback_vector, Node* slot, Node* pattern, Node* flags,
+ Node* context) {
Label call_runtime(this, Label::kDeferred), end(this);
VARIABLE(result, MachineRepresentation::kTagged);
- Node* feedback_vector = LoadFeedbackVector(closure);
Node* literal_site =
- LoadFeedbackVectorSlot(feedback_vector, literal_index, 0, SMI_PARAMETERS);
+ LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS);
GotoIf(NotHasBoilerplate(literal_site), &call_runtime);
{
Node* boilerplate = literal_site;
@@ -391,8 +358,8 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneRegExp(Node* closure,
BIND(&call_runtime);
{
- result.Bind(CallRuntime(Runtime::kCreateRegExpLiteral, context, closure,
- literal_index, pattern, flags));
+ result.Bind(CallRuntime(Runtime::kCreateRegExpLiteral, context,
+ feedback_vector, SmiTag(slot), pattern, flags));
Goto(&end);
}
@@ -400,14 +367,15 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneRegExp(Node* closure,
return result.value();
}
-TF_BUILTIN(FastCloneRegExp, ConstructorBuiltinsAssembler) {
- Node* closure = Parameter(FastCloneRegExpDescriptor::kClosure);
- Node* literal_index = Parameter(FastCloneRegExpDescriptor::kLiteralIndex);
- Node* pattern = Parameter(FastCloneRegExpDescriptor::kPattern);
- Node* flags = Parameter(FastCloneRegExpDescriptor::kFlags);
- Node* context = Parameter(FastCloneRegExpDescriptor::kContext);
-
- Return(EmitFastCloneRegExp(closure, literal_index, pattern, flags, context));
+TF_BUILTIN(CreateRegExpLiteral, ConstructorBuiltinsAssembler) {
+ Node* feedback_vector = Parameter(Descriptor::kFeedbackVector);
+ Node* slot = SmiUntag(Parameter(Descriptor::kSlot));
+ Node* pattern = Parameter(Descriptor::kPattern);
+ Node* flags = Parameter(Descriptor::kFlags);
+ Node* context = Parameter(Descriptor::kContext);
+ Node* result =
+ EmitCreateRegExpLiteral(feedback_vector, slot, pattern, flags, context);
+ Return(result);
}
Node* ConstructorBuiltinsAssembler::NonEmptyShallowClone(
@@ -432,16 +400,15 @@ Node* ConstructorBuiltinsAssembler::NonEmptyShallowClone(
return array;
}
-Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowArray(
- Node* closure, Node* literal_index, Node* context, Label* call_runtime,
+Node* ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral(
+ Node* feedback_vector, Node* slot, Node* context, Label* call_runtime,
AllocationSiteMode allocation_site_mode) {
Label zero_capacity(this), cow_elements(this), fast_elements(this),
return_result(this);
VARIABLE(result, MachineRepresentation::kTagged);
- Node* feedback_vector = LoadFeedbackVector(closure);
Node* allocation_site =
- LoadFeedbackVectorSlot(feedback_vector, literal_index, 0, SMI_PARAMETERS);
+ LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS);
GotoIf(NotHasBoilerplate(allocation_site), call_runtime);
Node* boilerplate = LoadAllocationSiteBoilerplate(allocation_site);
@@ -504,7 +471,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowArray(
BIND(&allocate_without_elements);
{
Node* array = AllocateUninitializedJSArrayWithoutElements(
- PACKED_ELEMENTS, boilerplate_map, length.value(), allocation_site);
+ boilerplate_map, length.value(), allocation_site);
StoreObjectField(array, JSObject::kElementsOffset, elements.value());
result.Bind(array);
Goto(&return_result);
@@ -514,49 +481,32 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowArray(
return result.value();
}
-void ConstructorBuiltinsAssembler::CreateFastCloneShallowArrayBuiltin(
- AllocationSiteMode allocation_site_mode) {
- Node* closure = Parameter(FastCloneShallowArrayDescriptor::kClosure);
- Node* literal_index =
- Parameter(FastCloneShallowArrayDescriptor::kLiteralIndex);
- Node* constant_elements =
- Parameter(FastCloneShallowArrayDescriptor::kConstantElements);
- Node* context = Parameter(FastCloneShallowArrayDescriptor::kContext);
+TF_BUILTIN(CreateShallowArrayLiteral, ConstructorBuiltinsAssembler) {
+ Node* feedback_vector = Parameter(Descriptor::kFeedbackVector);
+ Node* slot = SmiUntag(Parameter(Descriptor::kSlot));
+ Node* constant_elements = Parameter(Descriptor::kConstantElements);
+ Node* context = Parameter(Descriptor::kContext);
Label call_runtime(this, Label::kDeferred);
- Return(EmitFastCloneShallowArray(closure, literal_index, context,
- &call_runtime, allocation_site_mode));
+ Return(EmitCreateShallowArrayLiteral(feedback_vector, slot, context,
+ &call_runtime,
+ DONT_TRACK_ALLOCATION_SITE));
BIND(&call_runtime);
{
Comment("call runtime");
- int flags = AggregateLiteral::kIsShallow;
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- // Force initial allocation sites on the initial literal setup step.
- flags |= AggregateLiteral::kNeedsInitialAllocationSite;
- } else {
- flags |= AggregateLiteral::kDisableMementos;
- }
- Return(CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
- literal_index, constant_elements, SmiConstant(flags)));
+ int const flags =
+ AggregateLiteral::kDisableMementos | AggregateLiteral::kIsShallow;
+ Return(CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector,
+ SmiTag(slot), constant_elements, SmiConstant(flags)));
}
}
-TF_BUILTIN(FastCloneShallowArrayTrack, ConstructorBuiltinsAssembler) {
- CreateFastCloneShallowArrayBuiltin(TRACK_ALLOCATION_SITE);
-}
-
-TF_BUILTIN(FastCloneShallowArrayDontTrack, ConstructorBuiltinsAssembler) {
- CreateFastCloneShallowArrayBuiltin(DONT_TRACK_ALLOCATION_SITE);
-}
-
Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
- Node* closure, Node* literal_index, Node* context) {
+ Node* feedback_vector, Node* slot, Node* context) {
// Array literals always have a valid AllocationSite to properly track
// elements transitions.
- Node* feedback_vector = LoadFeedbackVector(closure);
VARIABLE(allocation_site, MachineRepresentation::kTagged,
- LoadFeedbackVectorSlot(feedback_vector, literal_index, 0,
- SMI_PARAMETERS));
+ LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
Label create_empty_array(this),
initialize_allocation_site(this, Label::kDeferred), done(this);
@@ -567,7 +517,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
BIND(&initialize_allocation_site);
{
allocation_site.Bind(
- CreateAllocationSiteInFeedbackVector(feedback_vector, literal_index));
+ CreateAllocationSiteInFeedbackVector(feedback_vector, SmiTag(slot)));
Goto(&create_empty_array);
}
@@ -593,18 +543,17 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
}
TF_BUILTIN(CreateEmptyArrayLiteral, ConstructorBuiltinsAssembler) {
- Node* closure = Parameter(Descriptor::kClosure);
- Node* literal_index = Parameter(Descriptor::kLiteralIndex);
+ Node* feedback_vector = Parameter(Descriptor::kFeedbackVector);
+ Node* slot = SmiUntag(Parameter(Descriptor::kSlot));
Node* context = Parameter(Descriptor::kContext);
- Node* result = EmitCreateEmptyArrayLiteral(closure, literal_index, context);
+ Node* result = EmitCreateEmptyArrayLiteral(feedback_vector, slot, context);
Return(result);
}
-Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
- Label* call_runtime, Node* closure, Node* literals_index) {
- Node* feedback_vector = LoadFeedbackVector(closure);
- Node* allocation_site = LoadFeedbackVectorSlot(
- feedback_vector, literals_index, 0, SMI_PARAMETERS);
+Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
+ Node* feedback_vector, Node* slot, Label* call_runtime) {
+ Node* allocation_site =
+ LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS);
GotoIf(NotHasBoilerplate(allocation_site), call_runtime);
Node* boilerplate = LoadAllocationSiteBoilerplate(allocation_site);
@@ -760,12 +709,12 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
return copy;
}
-TF_BUILTIN(FastCloneShallowObject, ConstructorBuiltinsAssembler) {
+TF_BUILTIN(CreateShallowObjectLiteral, ConstructorBuiltinsAssembler) {
Label call_runtime(this);
- Node* closure = Parameter(Descriptor::kClosure);
- Node* literals_index = Parameter(Descriptor::kLiteralIndex);
+ Node* feedback_vector = Parameter(Descriptor::kFeedbackVector);
+ Node* slot = SmiUntag(Parameter(Descriptor::kSlot));
Node* copy =
- EmitFastCloneShallowObject(&call_runtime, closure, literals_index);
+ EmitCreateShallowObjectLiteral(feedback_vector, slot, &call_runtime);
Return(copy);
BIND(&call_runtime);
@@ -773,11 +722,11 @@ TF_BUILTIN(FastCloneShallowObject, ConstructorBuiltinsAssembler) {
Parameter(Descriptor::kBoilerplateDescription);
Node* flags = Parameter(Descriptor::kFlags);
Node* context = Parameter(Descriptor::kContext);
- TailCallRuntime(Runtime::kCreateObjectLiteral, context, closure,
- literals_index, boilerplate_description, flags);
+ TailCallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector,
+ SmiTag(slot), boilerplate_description, flags);
}
-// Used by the CreateEmptyObjectLiteral stub and bytecode.
+// Used by the CreateEmptyObjectLiteral bytecode and the Object constructor.
Node* ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral(
Node* context) {
Node* native_context = LoadNativeContext(context);
@@ -786,17 +735,185 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral(
Node* map = LoadObjectField(object_function,
JSFunction::kPrototypeOrInitialMapOffset);
CSA_ASSERT(this, IsMap(map));
+ // Ensure that slack tracking is disabled for the map.
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ CSA_ASSERT(this,
+ IsClearWord32<Map::ConstructionCounter>(LoadMapBitField3(map)));
Node* empty_fixed_array = EmptyFixedArrayConstant();
Node* result =
AllocateJSObjectFromMap(map, empty_fixed_array, empty_fixed_array);
- HandleSlackTracking(context, result, map, JSObject::kHeaderSize);
return result;
}
-TF_BUILTIN(CreateEmptyObjectLiteral, ConstructorBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* result = EmitCreateEmptyObjectLiteral(context);
- Return(result);
+TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
+ int const kValueArg = 0;
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* value = args.GetOptionalArgumentValue(kValueArg);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
+
+ Label return_to_object(this);
+
+ GotoIf(Word32And(IsNotUndefined(value), IsNotNull(value)), &return_to_object);
+
+ args.PopAndReturn(EmitCreateEmptyObjectLiteral(context));
+
+ BIND(&return_to_object);
+ args.PopAndReturn(CallBuiltin(Builtins::kToObject, context, value));
}
+
+TF_BUILTIN(ObjectConstructor_ConstructStub, ConstructorBuiltinsAssembler) {
+ int const kValueArg = 0;
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* value = args.GetOptionalArgumentValue(kValueArg);
+
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ CSA_ASSERT(this, IsNotUndefined(new_target));
+
+ Label return_to_object(this);
+
+ GotoIf(Word32And(WordEqual(target, new_target),
+ Word32And(IsNotUndefined(value), IsNotNull(value))),
+ &return_to_object);
+ args.PopAndReturn(EmitFastNewObject(context, target, new_target));
+
+ BIND(&return_to_object);
+ args.PopAndReturn(CallBuiltin(Builtins::kToObject, context, value));
+}
+
+TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Label return_zero(this);
+
+ GotoIf(IntPtrEqual(IntPtrConstant(0), argc), &return_zero);
+
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ args.PopAndReturn(ToNumber(context, args.AtIndex(0)));
+
+ BIND(&return_zero);
+ args.PopAndReturn(SmiConstant(0));
+}
+
+TF_BUILTIN(NumberConstructor_ConstructStub, ConstructorBuiltinsAssembler) {
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Label return_zero(this), wrap(this);
+
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+
+ GotoIf(IntPtrEqual(IntPtrConstant(0), argc), &return_zero);
+ {
+ var_result.Bind(ToNumber(context, args.AtIndex(0)));
+ Goto(&wrap);
+ }
+
+ BIND(&return_zero);
+ {
+ var_result.Bind(SmiConstant(0));
+ Goto(&wrap);
+ }
+
+ BIND(&wrap);
+ {
+ Node* result = EmitFastNewObject(context, target, new_target);
+ StoreObjectField(result, JSValue::kValueOffset, var_result.value());
+ args.PopAndReturn(result);
+ }
+}
+
+Node* ConstructorBuiltinsAssembler::EmitConstructString(Node* argc,
+ CodeStubArguments& args,
+ Node* context,
+ bool convert_symbol) {
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+
+ Label return_empty_string(this), to_string(this),
+ check_symbol(this, Label::kDeferred), done(this);
+
+ GotoIf(IntPtrEqual(IntPtrConstant(0), argc), &return_empty_string);
+
+ Node* argument = args.AtIndex(0);
+
+ GotoIf(TaggedIsSmi(argument), &to_string);
+
+ Node* instance_type = LoadInstanceType(argument);
+
+ Label* non_string = convert_symbol ? &check_symbol : &to_string;
+ GotoIfNot(IsStringInstanceType(instance_type), non_string);
+ {
+ var_result.Bind(argument);
+ Goto(&done);
+ }
+
+ if (convert_symbol) {
+ BIND(&check_symbol);
+ GotoIfNot(IsSymbolInstanceType(instance_type), &to_string);
+ {
+ var_result.Bind(
+ CallRuntime(Runtime::kSymbolDescriptiveString, context, argument));
+ Goto(&done);
+ }
+ }
+
+ BIND(&to_string);
+ {
+ var_result.Bind(ToString(context, argument));
+ Goto(&done);
+ }
+
+ BIND(&return_empty_string);
+ {
+ var_result.Bind(EmptyStringConstant());
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return var_result.value();
+}
+
+TF_BUILTIN(StringConstructor, ConstructorBuiltinsAssembler) {
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ args.PopAndReturn(EmitConstructString(argc, args, context, true));
+}
+
+TF_BUILTIN(StringConstructor_ConstructStub, ConstructorBuiltinsAssembler) {
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* string = EmitConstructString(argc, args, context, false);
+ Node* result = EmitFastNewObject(context, target, new_target);
+ StoreObjectField(result, JSValue::kValueOffset, string);
+ args.PopAndReturn(result);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h
index e65c61e849..b889100148 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.h
+++ b/deps/v8/src/builtins/builtins-constructor-gen.h
@@ -20,19 +20,17 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
Node* EmitFastNewFunctionContext(Node* closure, Node* slots, Node* context,
ScopeType scope_type);
- Node* EmitFastCloneRegExp(Node* closure, Node* literal_index, Node* pattern,
- Node* flags, Node* context);
- Node* EmitFastCloneShallowArray(Node* closure, Node* literal_index,
- Node* context, Label* call_runtime,
- AllocationSiteMode allocation_site_mode);
+ Node* EmitCreateRegExpLiteral(Node* feedback_vector, Node* slot,
+ Node* pattern, Node* flags, Node* context);
+ Node* EmitCreateShallowArrayLiteral(Node* feedback_vector, Node* slot,
+ Node* context, Label* call_runtime,
+ AllocationSiteMode allocation_site_mode);
- Node* EmitCreateEmptyArrayLiteral(Node* closure, Node* iteral_index,
+ Node* EmitCreateEmptyArrayLiteral(Node* feedback_vector, Node* slot,
Node* context);
- void CreateFastCloneShallowArrayBuiltin(
- AllocationSiteMode allocation_site_mode);
- Node* EmitFastCloneShallowObject(Label* call_runtime, Node* closure,
- Node* literals_index);
+ Node* EmitCreateShallowObjectLiteral(Node* feedback_vector, Node* slot,
+ Label* call_runtime);
Node* EmitCreateEmptyObjectLiteral(Node* context);
Node* EmitFastNewObject(Node* context, Node* target, Node* new_target);
@@ -40,6 +38,9 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
Node* EmitFastNewObject(Node* context, Node* target, Node* new_target,
Label* call_runtime);
+ Node* EmitConstructString(Node* argc, CodeStubArguments& args, Node* context,
+ bool convert_symbol);
+
private:
Node* NonEmptyShallowClone(Node* boilerplate, Node* boilerplate_map,
Node* boilerplate_elements, Node* allocation_site,
diff --git a/deps/v8/src/builtins/builtins-constructor.h b/deps/v8/src/builtins/builtins-constructor.h
index e783e11f77..bb6d13e4b4 100644
--- a/deps/v8/src/builtins/builtins-constructor.h
+++ b/deps/v8/src/builtins/builtins-constructor.h
@@ -22,7 +22,7 @@ class ConstructorBuiltins {
// Maximum number of elements in copied array (chosen so that even an array
// backed by a double backing store will fit into new-space).
static const int kMaximumClonedShallowArrayElements =
- JSArray::kInitialMaxFastElementArray * kPointerSize / kDoubleSize;
+ JSArray::kInitialMaxFastElementArray;
// Maximum number of properties in copied object so that the properties store
// will fit into new-space. This constant is based on the assumption that
// NameDictionaries are 50% over-allocated.
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index b897c4dc63..c61ea70cf4 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -312,7 +312,7 @@ TF_BUILTIN(ToInteger, CodeStubAssembler) {
// ES6 section 7.1.13 ToObject (argument)
TF_BUILTIN(ToObject, CodeStubAssembler) {
- Label if_number(this, Label::kDeferred), if_notsmi(this), if_jsreceiver(this),
+ Label if_smi(this, Label::kDeferred), if_jsreceiver(this),
if_noconstructor(this, Label::kDeferred), if_wrapjsvalue(this);
Node* context = Parameter(Descriptor::kContext);
@@ -321,13 +321,9 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
VARIABLE(constructor_function_index_var,
MachineType::PointerRepresentation());
- Branch(TaggedIsSmi(object), &if_number, &if_notsmi);
+ GotoIf(TaggedIsSmi(object), &if_smi);
- BIND(&if_notsmi);
Node* map = LoadMap(object);
-
- GotoIf(IsHeapNumberMap(map), &if_number);
-
Node* instance_type = LoadMapInstanceType(map);
GotoIf(IsJSReceiverInstanceType(instance_type), &if_jsreceiver);
@@ -338,7 +334,7 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
constructor_function_index_var.Bind(constructor_function_index);
Goto(&if_wrapjsvalue);
- BIND(&if_number);
+ BIND(&if_smi);
constructor_function_index_var.Bind(
IntPtrConstant(Context::NUMBER_FUNCTION_INDEX));
Goto(&if_wrapjsvalue);
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 9b0118ee6a..cc89c4e365 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -19,7 +19,7 @@ namespace internal {
// TFC: Builtin in Turbofan, with CodeStub linkage and custom descriptor.
// Args: name, interface descriptor, return_size
// TFH: Handlers in Turbofan, with CodeStub linkage.
-// Args: name, code kind, extra IC state, interface descriptor
+// Args: name, interface descriptor
// ASM: Builtin in platform-dependent assembly.
// Args: name
@@ -73,13 +73,11 @@ namespace internal {
TFC(FastNewClosure, FastNewClosure, 1) \
TFC(FastNewFunctionContextEval, FastNewFunctionContext, 1) \
TFC(FastNewFunctionContextFunction, FastNewFunctionContext, 1) \
- TFC(FastNewSloppyArguments, FastNewArguments, 1) \
- TFC(FastCloneRegExp, FastCloneRegExp, 1) \
- TFC(FastCloneShallowArrayTrack, FastCloneShallowArray, 1) \
- TFC(FastCloneShallowArrayDontTrack, FastCloneShallowArray, 1) \
- TFS(CreateEmptyArrayLiteral, kClosure, kLiteralIndex) \
- TFS(CreateEmptyObjectLiteral, kClosure) \
- TFC(FastCloneShallowObject, FastCloneShallowObject, 1) \
+ TFS(CreateRegExpLiteral, kFeedbackVector, kSlot, kPattern, kFlags) \
+ TFS(CreateEmptyArrayLiteral, kFeedbackVector, kSlot) \
+ TFS(CreateShallowArrayLiteral, kFeedbackVector, kSlot, kConstantElements) \
+ TFS(CreateShallowObjectLiteral, kFeedbackVector, kSlot, \
+ kBoilerplateDescription, kFlags) \
/* ES6 section 9.5.14 [[Construct]] ( argumentsList, newTarget) */ \
TFC(ConstructProxy, ConstructTrampoline, 1) \
\
@@ -121,11 +119,11 @@ namespace internal {
\
/* Code life-cycle */ \
ASM(CompileLazy) \
+ ASM(CompileLazyDeoptimizedCode) \
ASM(CheckOptimizationMarker) \
+ ASM(DeserializeLazy) \
ASM(InstantiateAsmJs) \
ASM(NotifyDeoptimized) \
- ASM(NotifySoftDeoptimized) \
- ASM(NotifyLazyDeoptimized) \
ASM(NotifyBuiltinContinuation) \
\
/* Trampolines called when returning from a deoptimization that expects */ \
@@ -168,7 +166,7 @@ namespace internal {
TFS(CopyFastSmiOrObjectElements, kObject) \
TFC(GrowFastDoubleElements, GrowArrayElements, 1) \
TFC(GrowFastSmiOrObjectElements, GrowArrayElements, 1) \
- TFC(NewUnmappedArgumentsElements, NewArgumentsElements, 1) \
+ TFC(NewArgumentsElements, NewArgumentsElements, 1) \
\
/* Debugger */ \
ASM(FrameDropperTrampoline) \
@@ -197,30 +195,27 @@ namespace internal {
TFC(ToBooleanLazyDeoptContinuation, TypeConversionStackParameter, 1) \
\
/* Handlers */ \
- TFH(LoadICProtoArray, BUILTIN, kNoExtraICState, LoadICProtoArray) \
- TFH(LoadICProtoArrayThrowIfNonexistent, BUILTIN, kNoExtraICState, \
- LoadICProtoArray) \
- TFH(KeyedLoadIC_Megamorphic, BUILTIN, kNoExtraICState, LoadWithVector) \
- TFH(KeyedLoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector) \
- TFH(KeyedLoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector) \
- TFH(KeyedLoadIC_IndexedString, HANDLER, Code::LOAD_IC, LoadWithVector) \
- TFH(KeyedStoreIC_Megamorphic, BUILTIN, kNoExtraICState, StoreWithVector) \
- TFH(KeyedStoreIC_Megamorphic_Strict, BUILTIN, kNoExtraICState, \
- StoreWithVector) \
- TFH(KeyedStoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector) \
- TFH(KeyedStoreIC_Slow, HANDLER, Code::STORE_IC, StoreWithVector) \
- TFH(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector) \
- TFH(LoadGlobalIC_Slow, HANDLER, Code::LOAD_GLOBAL_IC, LoadGlobalWithVector) \
- TFH(LoadField, BUILTIN, kNoExtraICState, LoadField) \
- TFH(LoadIC_FunctionPrototype, HANDLER, Code::LOAD_IC, LoadWithVector) \
+ TFH(LoadICProtoArray, LoadICProtoArray) \
+ TFH(LoadICProtoArrayThrowIfNonexistent, LoadICProtoArray) \
+ TFH(KeyedLoadIC_Megamorphic, LoadWithVector) \
+ TFH(KeyedLoadIC_Miss, LoadWithVector) \
+ TFH(KeyedLoadIC_Slow, LoadWithVector) \
+ TFH(KeyedLoadIC_IndexedString, LoadWithVector) \
+ TFH(KeyedStoreIC_Megamorphic, StoreWithVector) \
+ TFH(KeyedStoreIC_Miss, StoreWithVector) \
+ TFH(KeyedStoreIC_Slow, StoreWithVector) \
+ TFH(LoadGlobalIC_Miss, LoadGlobalWithVector) \
+ TFH(LoadGlobalIC_Slow, LoadGlobalWithVector) \
+ TFH(LoadField, LoadField) \
+ TFH(LoadIC_FunctionPrototype, LoadWithVector) \
ASM(LoadIC_Getter_ForDeopt) \
- TFH(LoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector) \
- TFH(LoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector) \
- TFH(LoadIC_Uninitialized, BUILTIN, kNoExtraICState, LoadWithVector) \
- TFH(StoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector) \
+ TFH(LoadIC_Miss, LoadWithVector) \
+ TFH(LoadIC_Slow, LoadWithVector) \
+ TFH(LoadIC_StringLength, LoadWithVector) \
+ TFH(LoadIC_Uninitialized, LoadWithVector) \
+ TFH(StoreIC_Miss, StoreWithVector) \
ASM(StoreIC_Setter_ForDeopt) \
- TFH(StoreIC_Uninitialized, BUILTIN, kNoExtraICState, StoreWithVector) \
- TFH(StoreICStrict_Uninitialized, BUILTIN, kNoExtraICState, StoreWithVector) \
+ TFH(StoreIC_Uninitialized, StoreWithVector) \
\
/* Promise helpers */ \
TFS(ResolveNativePromise, kPromise, kValue) \
@@ -323,13 +318,23 @@ namespace internal {
CPP(ArrayBufferPrototypeSlice) \
\
/* AsyncFunction */ \
- TFJ(AsyncFunctionAwaitCaught, 2, kAwaited, kOuterPromise) \
- TFJ(AsyncFunctionAwaitUncaught, 2, kAwaited, kOuterPromise) \
+ TFJ(AsyncFunctionAwaitCaught, 3, kGenerator, kAwaited, kOuterPromise) \
+ TFJ(AsyncFunctionAwaitUncaught, 3, kGenerator, kAwaited, kOuterPromise) \
TFJ(AsyncFunctionAwaitRejectClosure, 1, kSentError) \
TFJ(AsyncFunctionAwaitResolveClosure, 1, kSentValue) \
TFJ(AsyncFunctionPromiseCreate, 0) \
TFJ(AsyncFunctionPromiseRelease, 1, kPromise) \
\
+ /* BigInt */ \
+ CPP(BigIntConstructor) \
+ CPP(BigIntConstructor_ConstructStub) \
+ CPP(BigIntParseInt) \
+ CPP(BigIntAsUintN) \
+ CPP(BigIntAsIntN) \
+ CPP(BigIntPrototypeToLocaleString) \
+ CPP(BigIntPrototypeToString) \
+ CPP(BigIntPrototypeValueOf) \
+ \
/* Boolean */ \
CPP(BooleanConstructor) \
CPP(BooleanConstructor_ConstructStub) \
@@ -533,34 +538,28 @@ namespace internal {
CPP(JsonStringify) \
\
/* ICs */ \
- TFH(LoadIC, LOAD_IC, kNoExtraICState, LoadWithVector) \
- TFH(LoadIC_Noninlined, BUILTIN, kNoExtraICState, LoadWithVector) \
- TFH(LoadICTrampoline, LOAD_IC, kNoExtraICState, Load) \
- TFH(KeyedLoadIC, KEYED_LOAD_IC, kNoExtraICState, LoadWithVector) \
- TFH(KeyedLoadICTrampoline, KEYED_LOAD_IC, kNoExtraICState, Load) \
- TFH(StoreIC, STORE_IC, kNoExtraICState, StoreWithVector) \
- TFH(StoreICTrampoline, STORE_IC, kNoExtraICState, Store) \
- TFH(StoreICStrict, STORE_IC, kNoExtraICState, StoreWithVector) \
- TFH(StoreICStrictTrampoline, STORE_IC, kNoExtraICState, Store) \
- TFH(KeyedStoreIC, KEYED_STORE_IC, kNoExtraICState, StoreWithVector) \
- TFH(KeyedStoreICTrampoline, KEYED_STORE_IC, kNoExtraICState, Store) \
- TFH(KeyedStoreICStrict, KEYED_STORE_IC, kNoExtraICState, StoreWithVector) \
- TFH(KeyedStoreICStrictTrampoline, KEYED_STORE_IC, kNoExtraICState, Store) \
- TFH(LoadGlobalIC, LOAD_GLOBAL_IC, kNoExtraICState, LoadGlobalWithVector) \
- TFH(LoadGlobalICInsideTypeof, LOAD_GLOBAL_IC, kNoExtraICState, \
- LoadGlobalWithVector) \
- TFH(LoadGlobalICTrampoline, LOAD_GLOBAL_IC, kNoExtraICState, LoadGlobal) \
- TFH(LoadGlobalICInsideTypeofTrampoline, LOAD_GLOBAL_IC, kNoExtraICState, \
- LoadGlobal) \
+ TFH(LoadIC, LoadWithVector) \
+ TFH(LoadIC_Noninlined, LoadWithVector) \
+ TFH(LoadICTrampoline, Load) \
+ TFH(KeyedLoadIC, LoadWithVector) \
+ TFH(KeyedLoadICTrampoline, Load) \
+ TFH(StoreIC, StoreWithVector) \
+ TFH(StoreICTrampoline, Store) \
+ TFH(KeyedStoreIC, StoreWithVector) \
+ TFH(KeyedStoreICTrampoline, Store) \
+ TFH(LoadGlobalIC, LoadGlobalWithVector) \
+ TFH(LoadGlobalICInsideTypeof, LoadGlobalWithVector) \
+ TFH(LoadGlobalICTrampoline, LoadGlobal) \
+ TFH(LoadGlobalICInsideTypeofTrampoline, LoadGlobal) \
\
/* Map */ \
- TFS(MapLookupHashIndex, kTable, kKey) \
+ TFS(FindOrderedHashMapEntry, kTable, kKey) \
TFJ(MapConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(MapSet, 2, kKey, kValue) \
- TFJ(MapDelete, 1, kKey) \
- TFJ(MapGet, 1, kKey) \
- TFJ(MapHas, 1, kKey) \
- CPP(MapClear) \
+ TFJ(MapPrototypeSet, 2, kKey, kValue) \
+ TFJ(MapPrototypeDelete, 1, kKey) \
+ TFJ(MapPrototypeGet, 1, kKey) \
+ TFJ(MapPrototypeHas, 1, kKey) \
+ CPP(MapPrototypeClear) \
/* ES #sec-map.prototype.entries */ \
TFJ(MapPrototypeEntries, 0) \
/* ES #sec-get-map.prototype.size */ \
@@ -647,10 +646,12 @@ namespace internal {
TFJ(MathTrunc, 1, kX) \
\
/* Number */ \
+ TFC(AllocateHeapNumber, AllocateHeapNumber, 1) \
/* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case */ \
- ASM(NumberConstructor) \
+ TFJ(NumberConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case */ \
- ASM(NumberConstructor_ConstructStub) \
+ TFJ(NumberConstructor_ConstructStub, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-number.isfinite */ \
TFJ(NumberIsFinite, 1, kNumber) \
/* ES6 #sec-number.isinteger */ \
@@ -689,6 +690,9 @@ namespace internal {
TFC(StrictEqual, Compare, 1) \
\
/* Object */ \
+ TFJ(ObjectConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ObjectConstructor_ConstructStub, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(ObjectAssign) \
/* ES #sec-object.create */ \
TFJ(ObjectCreate, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -698,13 +702,14 @@ namespace internal {
CPP(ObjectDefineSetter) \
CPP(ObjectEntries) \
CPP(ObjectFreeze) \
- CPP(ObjectGetOwnPropertyDescriptor) \
+ TFJ(ObjectGetOwnPropertyDescriptor, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(ObjectGetOwnPropertyDescriptors) \
CPP(ObjectGetOwnPropertyNames) \
CPP(ObjectGetOwnPropertySymbols) \
CPP(ObjectGetPrototypeOf) \
CPP(ObjectSetPrototypeOf) \
- CPP(ObjectIs) \
+ TFJ(ObjectIs, 2, kLeft, kRight) \
CPP(ObjectIsExtensible) \
CPP(ObjectIsFrozen) \
CPP(ObjectIsSealed) \
@@ -730,9 +735,8 @@ namespace internal {
TFC(InstanceOf, Compare, 1) \
\
/* for-in */ \
+ TFS(ForInEnumerate, kReceiver) \
TFS(ForInFilter, kKey, kObject) \
- TFS(ForInNext, kObject, kCacheArray, kCacheType, kIndex) \
- TFC(ForInPrepare, ForInPrepare, 3) \
\
/* Promise */ \
/* ES6 #sec-getcapabilitiesexecutor-functions */ \
@@ -758,7 +762,8 @@ namespace internal {
TFJ(PromiseHandle, 5, kValue, kHandler, kDeferredPromise, \
kDeferredOnResolve, kDeferredOnReject) \
/* ES #sec-promise.resolve */ \
- TFJ(PromiseResolve, 1, kValue) \
+ TFJ(PromiseResolveWrapper, 1, kValue) \
+ TFS(PromiseResolve, kConstructor, kValue) \
/* ES #sec-promise.reject */ \
TFJ(PromiseReject, 1, kReason) \
TFJ(InternalPromiseReject, 3, kPromise, kReason, kDebugEvent) \
@@ -778,6 +783,7 @@ namespace internal {
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
TFS(ProxyGetProperty, kProxy, kName, kReceiverValue) \
TFS(ProxyHasProperty, kProxy, kName) \
+ TFS(ProxySetProperty, kProxy, kName, kValue, kReceiverValue, kLanguageMode) \
\
/* Reflect */ \
ASM(ReflectApply) \
@@ -853,10 +859,10 @@ namespace internal {
\
/* Set */ \
TFJ(SetConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(SetHas, 1, kKey) \
- TFJ(SetAdd, 1, kKey) \
- TFJ(SetDelete, 1, kKey) \
- CPP(SetClear) \
+ TFJ(SetPrototypeHas, 1, kKey) \
+ TFJ(SetPrototypeAdd, 1, kKey) \
+ TFJ(SetPrototypeDelete, 1, kKey) \
+ CPP(SetPrototypeClear) \
/* ES #sec-set.prototype.entries */ \
TFJ(SetPrototypeEntries, 0) \
/* ES #sec-get-set.prototype.size */ \
@@ -885,11 +891,20 @@ namespace internal {
CPP(AtomicsWake) \
\
/* String */ \
- ASM(StringConstructor) \
- ASM(StringConstructor_ConstructStub) \
+ TFJ(StringConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(StringConstructor_ConstructStub, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(StringFromCodePoint) \
/* ES6 #sec-string.fromcharcode */ \
TFJ(StringFromCharCode, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 #sec-string.prototype.anchor */ \
+ TFJ(StringPrototypeAnchor, 1, kValue) \
+ /* ES6 #sec-string.prototype.big */ \
+ TFJ(StringPrototypeBig, 0) \
+ /* ES6 #sec-string.prototype.blink */ \
+ TFJ(StringPrototypeBlink, 0) \
+ /* ES6 #sec-string.prototype.bold */ \
+ TFJ(StringPrototypeBold, 0) \
/* ES6 #sec-string.prototype.charat */ \
TFJ(StringPrototypeCharAt, 1, kPosition) \
/* ES6 #sec-string.prototype.charcodeat */ \
@@ -900,33 +915,55 @@ namespace internal {
TFJ(StringPrototypeConcat, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.endswith */ \
CPP(StringPrototypeEndsWith) \
+ /* ES6 #sec-string.prototype.fontcolor */ \
+ TFJ(StringPrototypeFontcolor, 1, kValue) \
+ /* ES6 #sec-string.prototype.fontsize */ \
+ TFJ(StringPrototypeFontsize, 1, kValue) \
+ /* ES6 #sec-string.prototype.fixed */ \
+ TFJ(StringPrototypeFixed, 0) \
/* ES6 #sec-string.prototype.includes */ \
TFJ(StringPrototypeIncludes, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.indexof */ \
TFJ(StringPrototypeIndexOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 #sec-string.prototype.italics */ \
+ TFJ(StringPrototypeItalics, 0) \
/* ES6 #sec-string.prototype.lastindexof */ \
CPP(StringPrototypeLastIndexOf) \
+ /* ES6 #sec-string.prototype.link */ \
+ TFJ(StringPrototypeLink, 1, kValue) \
/* ES6 #sec-string.prototype.localecompare */ \
CPP(StringPrototypeLocaleCompare) \
+ /* ES6 #sec-string.prototype.repeat */ \
+ TFJ(StringPrototypeRepeat, 1, kCount) \
/* ES6 #sec-string.prototype.replace */ \
TFJ(StringPrototypeReplace, 2, kSearch, kReplace) \
/* ES6 #sec-string.prototype.slice */ \
TFJ(StringPrototypeSlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 #sec-string.prototype.small */ \
+ TFJ(StringPrototypeSmall, 0) \
/* ES6 #sec-string.prototype.split */ \
TFJ(StringPrototypeSplit, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 #sec-string.prototype.strike */ \
+ TFJ(StringPrototypeStrike, 0) \
+ /* ES6 #sec-string.prototype.sub */ \
+ TFJ(StringPrototypeSub, 0) \
/* ES6 #sec-string.prototype.substr */ \
TFJ(StringPrototypeSubstr, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.substring */ \
TFJ(StringPrototypeSubstring, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 #sec-string.prototype.sup */ \
+ TFJ(StringPrototypeSup, 0) \
/* ES6 #sec-string.prototype.startswith */ \
CPP(StringPrototypeStartsWith) \
/* ES6 #sec-string.prototype.tostring */ \
TFJ(StringPrototypeToString, 0) \
- CPP(StringPrototypeTrim) \
- CPP(StringPrototypeTrimLeft) \
- CPP(StringPrototypeTrimRight) \
+ TFJ(StringPrototypeTrim, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(StringPrototypeTrimLeft, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(StringPrototypeTrimRight, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.valueof */ \
TFJ(StringPrototypeValueOf, 0) \
/* ES6 #sec-string.prototype-@@iterator */ \
@@ -986,8 +1023,12 @@ namespace internal {
CPP(TypedArrayPrototypeLastIndexOf) \
/* ES6 #sec-%typedarray%.prototype.reverse */ \
CPP(TypedArrayPrototypeReverse) \
+ /* ES6 %TypedArray%.prototype.set */ \
+ TFJ(TypedArrayPrototypeSet, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-%typedarray%.prototype.slice */ \
CPP(TypedArrayPrototypeSlice) \
+ /* ES6 #sec-get-%typedarray%.prototype-@@tostringtag */ \
+ TFJ(TypedArrayPrototypeToStringTag, 0) \
/* ES6 %TypedArray%.prototype.every */ \
TFJ(TypedArrayPrototypeEvery, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -1052,8 +1093,8 @@ namespace internal {
\
/* Await (proposal-async-iteration/#await), with resume behaviour */ \
/* specific to Async Generators. Internal / Not exposed to JS code. */ \
- TFJ(AsyncGeneratorAwaitCaught, 1, kAwaited) \
- TFJ(AsyncGeneratorAwaitUncaught, 1, kAwaited) \
+ TFJ(AsyncGeneratorAwaitCaught, 2, kGenerator, kAwaited) \
+ TFJ(AsyncGeneratorAwaitUncaught, 2, kGenerator, kAwaited) \
TFJ(AsyncGeneratorAwaitResolveClosure, 1, kValue) \
TFJ(AsyncGeneratorAwaitRejectClosure, 1, kValue) \
TFJ(AsyncGeneratorYieldResolveClosure, 1, kValue) \
diff --git a/deps/v8/src/builtins/builtins-descriptors.h b/deps/v8/src/builtins/builtins-descriptors.h
index 92fc0816bd..6026b9f721 100644
--- a/deps/v8/src/builtins/builtins-descriptors.h
+++ b/deps/v8/src/builtins/builtins-descriptors.h
@@ -33,8 +33,7 @@ namespace internal {
typedef Name##Descriptor Builtin_##Name##_InterfaceDescriptor;
// Define interface descriptors for IC handlers/dispatchers.
-#define DEFINE_TFH_INTERFACE_DESCRIPTOR(Name, Kind, Extra, \
- InterfaceDescriptor) \
+#define DEFINE_TFH_INTERFACE_DESCRIPTOR(Name, InterfaceDescriptor) \
typedef InterfaceDescriptor##Descriptor Builtin_##Name##_InterfaceDescriptor;
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DEFINE_TFJ_INTERFACE_DESCRIPTOR,
diff --git a/deps/v8/src/builtins/builtins-forin-gen.cc b/deps/v8/src/builtins/builtins-forin-gen.cc
deleted file mode 100644
index 8dbe998564..0000000000
--- a/deps/v8/src/builtins/builtins-forin-gen.cc
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/builtins/builtins-forin-gen.h"
-
-#include "src/builtins/builtins-utils-gen.h"
-#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
-#include "src/counters.h"
-#include "src/keys.h"
-#include "src/lookup.h"
-#include "src/objects-inl.h"
-#include "src/property-descriptor.h"
-
-namespace v8 {
-namespace internal {
-
-typedef compiler::Node Node;
-
-Node* ForInBuiltinsAssembler::ForInFilter(Node* key, Node* object,
- Node* context) {
- CSA_ASSERT(this, IsName(key));
-
- VARIABLE(var_result, MachineRepresentation::kTagged, key);
-
- Node* has_property = HasProperty(object, key, context, kForInHasProperty);
-
- Label end(this);
- GotoIf(WordEqual(has_property, BooleanConstant(true)), &end);
-
- var_result.Bind(UndefinedConstant());
- Goto(&end);
-
- BIND(&end);
- return var_result.value();
-}
-
-std::tuple<Node*, Node*, Node*> ForInBuiltinsAssembler::EmitForInPrepare(
- Node* object, Node* context, Label* call_runtime,
- Label* nothing_to_iterate) {
- Label use_cache(this);
- CSA_ASSERT(this, IsJSReceiver(object));
-
- CheckEnumCache(object, &use_cache, nothing_to_iterate, call_runtime);
-
- BIND(&use_cache);
- Node* map = LoadMap(object);
- Node* enum_length = EnumLength(map);
- GotoIf(WordEqual(enum_length, SmiConstant(0)), nothing_to_iterate);
- Node* descriptors = LoadMapDescriptors(map);
- Node* cache_offset =
- LoadObjectField(descriptors, DescriptorArray::kEnumCacheBridgeOffset);
- Node* enum_cache = LoadObjectField(
- cache_offset, DescriptorArray::kEnumCacheBridgeCacheOffset);
-
- return std::make_tuple(map, enum_cache, enum_length);
-}
-
-Node* ForInBuiltinsAssembler::EnumLength(Node* map) {
- CSA_ASSERT(this, IsMap(map));
- Node* bitfield_3 = LoadMapBitField3(map);
- Node* enum_length = DecodeWordFromWord32<Map::EnumLengthBits>(bitfield_3);
- return SmiTag(enum_length);
-}
-
-void ForInBuiltinsAssembler::CheckPrototypeEnumCache(Node* receiver, Node* map,
- Label* use_cache,
- Label* use_runtime) {
- VARIABLE(current_js_object, MachineRepresentation::kTagged, receiver);
- VARIABLE(current_map, MachineRepresentation::kTagged, map);
-
- // These variables are updated in the loop below.
- Variable* loop_vars[2] = {&current_js_object, &current_map};
- Label loop(this, 2, loop_vars), next(this);
-
- Goto(&loop);
- // Check that there are no elements. |current_js_object| contains
- // the current JS object we've reached through the prototype chain.
- BIND(&loop);
- {
- Label if_elements(this), if_no_elements(this);
- TNode<JSReceiver> receiver = CAST(current_js_object.value());
- // The following relies on the elements only aliasing with JSProxy::target,
- // which is a Javascript value and hence cannot be confused with an elements
- // backing store.
- STATIC_ASSERT(JSObject::kElementsOffset == JSProxy::kTargetOffset);
- Node* elements = LoadObjectField(receiver, JSObject::kElementsOffset);
- Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
- // Check that there are no elements.
- Branch(WordEqual(elements, empty_fixed_array), &if_no_elements,
- &if_elements);
- BIND(&if_elements);
- {
- // Second chance, the object may be using the empty slow element
- // dictionary.
- Node* slow_empty_dictionary =
- LoadRoot(Heap::kEmptySlowElementDictionaryRootIndex);
- Branch(WordNotEqual(elements, slow_empty_dictionary), use_runtime,
- &if_no_elements);
- }
-
- BIND(&if_no_elements);
- {
- // Update map prototype.
- current_js_object.Bind(LoadMapPrototype(current_map.value()));
- Branch(WordEqual(current_js_object.value(), NullConstant()), use_cache,
- &next);
- }
- }
-
- BIND(&next);
- {
- // For all objects but the receiver, check that the cache is empty.
- current_map.Bind(LoadMap(current_js_object.value()));
- Node* enum_length = EnumLength(current_map.value());
- Node* zero_constant = SmiConstant(0);
- Branch(WordEqual(enum_length, zero_constant), &loop, use_runtime);
- }
-}
-
-void ForInBuiltinsAssembler::CheckEnumCache(Node* receiver, Label* use_cache,
- Label* nothing_to_iterate,
- Label* use_runtime) {
- Node* map = LoadMap(receiver);
-
- Label check_empty_prototype(this),
- check_dict_receiver(this, Label::kDeferred);
-
- // Check if the enum length field is properly initialized, indicating that
- // there is an enum cache.
- {
- Node* invalid_enum_cache_sentinel = SmiConstant(kInvalidEnumCacheSentinel);
- Node* enum_length = EnumLength(map);
- Branch(WordEqual(enum_length, invalid_enum_cache_sentinel),
- &check_dict_receiver, &check_empty_prototype);
- }
-
- // Check that there are no elements on the fast |receiver| and its prototype
- // chain.
- BIND(&check_empty_prototype);
- CheckPrototypeEnumCache(receiver, map, use_cache, use_runtime);
-
- Label dict_loop(this);
- BIND(&check_dict_receiver);
- {
- // Avoid runtime-call for empty dictionary receivers.
- GotoIfNot(IsDictionaryMap(map), use_runtime);
- Node* properties = LoadSlowProperties(receiver);
- Node* length = LoadFixedArrayElement(
- properties, NameDictionary::kNumberOfElementsIndex);
- GotoIfNot(WordEqual(length, SmiConstant(0)), use_runtime);
- // Check that there are no elements on the |receiver| and its prototype
- // chain. Given that we do not create an EnumCache for dict-mode objects,
- // directly jump to |nothing_to_iterate| if there are no elements and no
- // properties on the |receiver|.
- CheckPrototypeEnumCache(receiver, map, nothing_to_iterate, use_runtime);
- }
-}
-
-TF_BUILTIN(ForInFilter, ForInBuiltinsAssembler) {
- Node* key = Parameter(Descriptor::kKey);
- Node* object = Parameter(Descriptor::kObject);
- Node* context = Parameter(Descriptor::kContext);
-
- Return(ForInFilter(key, object, context));
-}
-
-TF_BUILTIN(ForInNext, ForInBuiltinsAssembler) {
- Label filter(this);
- Node* object = Parameter(Descriptor::kObject);
- Node* cache_array = Parameter(Descriptor::kCacheArray);
- Node* cache_type = Parameter(Descriptor::kCacheType);
- Node* index = Parameter(Descriptor::kIndex);
- Node* context = Parameter(Descriptor::kContext);
-
- Node* key = LoadFixedArrayElement(cache_array, SmiUntag(index));
- Node* map = LoadMap(object);
- GotoIfNot(WordEqual(map, cache_type), &filter);
- Return(key);
- BIND(&filter);
- Return(ForInFilter(key, object, context));
-}
-
-TF_BUILTIN(ForInPrepare, ForInBuiltinsAssembler) {
- Label call_runtime(this), nothing_to_iterate(this);
- Node* object = Parameter(Descriptor::kObject);
- Node* context = Parameter(Descriptor::kContext);
-
- Node* cache_type;
- Node* cache_array;
- Node* cache_length;
- std::tie(cache_type, cache_array, cache_length) =
- EmitForInPrepare(object, context, &call_runtime, &nothing_to_iterate);
-
- Return(cache_type, cache_array, cache_length);
-
- BIND(&call_runtime);
- TailCallRuntime(Runtime::kForInPrepare, context, object);
-
- BIND(&nothing_to_iterate);
- {
- Node* zero = SmiConstant(0);
- Return(zero, zero, zero);
- }
-}
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-forin-gen.h b/deps/v8/src/builtins/builtins-forin-gen.h
deleted file mode 100644
index 1740ba32f3..0000000000
--- a/deps/v8/src/builtins/builtins-forin-gen.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_BUILTINS_BUILTINS_FORIN_GEN_H_
-#define V8_BUILTINS_BUILTINS_FORIN_GEN_H_
-
-#include "src/code-stub-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-class ForInBuiltinsAssembler : public CodeStubAssembler {
- public:
- explicit ForInBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
-
- std::tuple<Node*, Node*, Node*> EmitForInPrepare(Node* object, Node* context,
- Label* call_runtime,
- Label* nothing_to_iterate);
-
- Node* ForInFilter(Node* key, Node* object, Node* context);
-
- private:
- // Get the enumerable length from |map| and return the result as a Smi.
- Node* EnumLength(Node* map);
- void CheckPrototypeEnumCache(Node* receiver, Node* map, Label* use_cache,
- Label* use_runtime);
- // Check the cache validity for |receiver|. Branch to |use_cache| if
- // the cache is valid, otherwise branch to |use_runtime|.
- void CheckEnumCache(Node* receiver, Label* use_cache,
- Label* nothing_to_iterate, Label* use_runtime);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_BUILTINS_BUILTINS_FORIN_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc
index 61d102ef88..f5a173c71e 100644
--- a/deps/v8/src/builtins/builtins-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-function-gen.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/frame-constants.h"
+#include "src/zone/zone-list-inl.h" // TODO(mstarzinger): Temporary cycle breaker.
namespace v8 {
namespace internal {
@@ -26,8 +27,15 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
GotoIf(TaggedIsSmi(receiver), &slow);
Node* receiver_map = LoadMap(receiver);
- Node* instance_type = LoadMapInstanceType(receiver_map);
- GotoIf(Word32NotEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE)), &slow);
+ {
+ Label fast(this);
+ Node* instance_type = LoadMapInstanceType(receiver_map);
+ GotoIf(Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE)), &fast);
+ GotoIf(Word32Equal(instance_type, Int32Constant(JS_BOUND_FUNCTION_TYPE)),
+ &fast);
+ Goto(&slow);
+ BIND(&fast);
+ }
// Disallow binding of slow-mode functions. We need to figure out whether the
// length and name property are in the original state.
@@ -46,51 +54,55 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
// AccessorInfo objects. In that case, their value can be recomputed even if
// the actual value on the object changes.
Comment("Check name and length properties");
- const int length_index = JSFunction::kLengthDescriptorIndex;
- Node* maybe_length = LoadFixedArrayElement(
- descriptors, DescriptorArray::ToKeyIndex(length_index));
- GotoIf(WordNotEqual(maybe_length, LoadRoot(Heap::klength_stringRootIndex)),
- &slow);
-
- Node* maybe_length_accessor = LoadFixedArrayElement(
- descriptors, DescriptorArray::ToValueIndex(length_index));
- GotoIf(TaggedIsSmi(maybe_length_accessor), &slow);
- Node* length_value_map = LoadMap(maybe_length_accessor);
- GotoIfNot(IsAccessorInfoMap(length_value_map), &slow);
-
- const int name_index = JSFunction::kNameDescriptorIndex;
- Node* maybe_name = LoadFixedArrayElement(
- descriptors, DescriptorArray::ToKeyIndex(name_index));
- GotoIf(WordNotEqual(maybe_name, LoadRoot(Heap::kname_stringRootIndex)),
- &slow);
-
- Node* maybe_name_accessor = LoadFixedArrayElement(
- descriptors, DescriptorArray::ToValueIndex(name_index));
- GotoIf(TaggedIsSmi(maybe_name_accessor), &slow);
- Node* name_value_map = LoadMap(maybe_name_accessor);
- GotoIfNot(IsAccessorInfoMap(name_value_map), &slow);
+ {
+ const int length_index = JSFunction::kLengthDescriptorIndex;
+ Node* maybe_length = LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToKeyIndex(length_index));
+ GotoIf(WordNotEqual(maybe_length, LoadRoot(Heap::klength_stringRootIndex)),
+ &slow);
+
+ Node* maybe_length_accessor = LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToValueIndex(length_index));
+ GotoIf(TaggedIsSmi(maybe_length_accessor), &slow);
+ Node* length_value_map = LoadMap(maybe_length_accessor);
+ GotoIfNot(IsAccessorInfoMap(length_value_map), &slow);
+
+ const int name_index = JSFunction::kNameDescriptorIndex;
+ Node* maybe_name = LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToKeyIndex(name_index));
+ GotoIf(WordNotEqual(maybe_name, LoadRoot(Heap::kname_stringRootIndex)),
+ &slow);
+
+ Node* maybe_name_accessor = LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToValueIndex(name_index));
+ GotoIf(TaggedIsSmi(maybe_name_accessor), &slow);
+ Node* name_value_map = LoadMap(maybe_name_accessor);
+ GotoIfNot(IsAccessorInfoMap(name_value_map), &slow);
+ }
// Choose the right bound function map based on whether the target is
// constructable.
Comment("Choose the right bound function map");
VARIABLE(bound_function_map, MachineRepresentation::kTagged);
- Label with_constructor(this);
- VariableList vars({&bound_function_map}, zone());
- Node* native_context = LoadNativeContext(context);
+ {
+ Label with_constructor(this);
+ VariableList vars({&bound_function_map}, zone());
+ Node* native_context = LoadNativeContext(context);
- Label map_done(this, vars);
- GotoIf(IsConstructorMap(receiver_map), &with_constructor);
+ Label map_done(this, vars);
+ GotoIf(IsConstructorMap(receiver_map), &with_constructor);
- bound_function_map.Bind(LoadContextElement(
- native_context, Context::BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX));
- Goto(&map_done);
+ bound_function_map.Bind(LoadContextElement(
+ native_context, Context::BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX));
+ Goto(&map_done);
- BIND(&with_constructor);
- bound_function_map.Bind(LoadContextElement(
- native_context, Context::BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX));
- Goto(&map_done);
+ BIND(&with_constructor);
+ bound_function_map.Bind(LoadContextElement(
+ native_context, Context::BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX));
+ Goto(&map_done);
- BIND(&map_done);
+ BIND(&map_done);
+ }
// Verify that __proto__ matches that of a the target bound function.
Comment("Verify that __proto__ matches target bound function");
@@ -101,68 +113,74 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
// Allocate the arguments array.
Comment("Allocate the arguments array");
VARIABLE(argument_array, MachineRepresentation::kTagged);
- Label empty_arguments(this);
- Label arguments_done(this, &argument_array);
- GotoIf(Uint32LessThanOrEqual(argc, Int32Constant(1)), &empty_arguments);
- Node* elements_length =
- ChangeUint32ToWord(Unsigned(Int32Sub(argc, Int32Constant(1))));
- Node* elements =
- AllocateFixedArray(PACKED_ELEMENTS, elements_length, INTPTR_PARAMETERS,
- kAllowLargeObjectAllocation);
- VARIABLE(index, MachineType::PointerRepresentation());
- index.Bind(IntPtrConstant(0));
- VariableList foreach_vars({&index}, zone());
- args.ForEach(foreach_vars,
- [this, elements, &index](Node* arg) {
- StoreFixedArrayElement(elements, index.value(), arg);
- Increment(&index);
- },
- IntPtrConstant(1));
- argument_array.Bind(elements);
- Goto(&arguments_done);
-
- BIND(&empty_arguments);
- argument_array.Bind(EmptyFixedArrayConstant());
- Goto(&arguments_done);
-
- BIND(&arguments_done);
+ {
+ Label empty_arguments(this);
+ Label arguments_done(this, &argument_array);
+ GotoIf(Uint32LessThanOrEqual(argc, Int32Constant(1)), &empty_arguments);
+ Node* elements_length =
+ ChangeUint32ToWord(Unsigned(Int32Sub(argc, Int32Constant(1))));
+ Node* elements =
+ AllocateFixedArray(PACKED_ELEMENTS, elements_length, INTPTR_PARAMETERS,
+ kAllowLargeObjectAllocation);
+ VARIABLE(index, MachineType::PointerRepresentation());
+ index.Bind(IntPtrConstant(0));
+ VariableList foreach_vars({&index}, zone());
+ args.ForEach(foreach_vars,
+ [this, elements, &index](Node* arg) {
+ StoreFixedArrayElement(elements, index.value(), arg);
+ Increment(&index);
+ },
+ IntPtrConstant(1));
+ argument_array.Bind(elements);
+ Goto(&arguments_done);
+
+ BIND(&empty_arguments);
+ argument_array.Bind(EmptyFixedArrayConstant());
+ Goto(&arguments_done);
+
+ BIND(&arguments_done);
+ }
// Determine bound receiver.
Comment("Determine bound receiver");
VARIABLE(bound_receiver, MachineRepresentation::kTagged);
- Label has_receiver(this);
- Label receiver_done(this, &bound_receiver);
- GotoIf(Word32NotEqual(argc, Int32Constant(0)), &has_receiver);
- bound_receiver.Bind(UndefinedConstant());
- Goto(&receiver_done);
+ {
+ Label has_receiver(this);
+ Label receiver_done(this, &bound_receiver);
+ GotoIf(Word32NotEqual(argc, Int32Constant(0)), &has_receiver);
+ bound_receiver.Bind(UndefinedConstant());
+ Goto(&receiver_done);
- BIND(&has_receiver);
- bound_receiver.Bind(args.AtIndex(0));
- Goto(&receiver_done);
+ BIND(&has_receiver);
+ bound_receiver.Bind(args.AtIndex(0));
+ Goto(&receiver_done);
- BIND(&receiver_done);
+ BIND(&receiver_done);
+ }
// Allocate the resulting bound function.
Comment("Allocate the resulting bound function");
- Node* bound_function = Allocate(JSBoundFunction::kSize);
- StoreMapNoWriteBarrier(bound_function, bound_function_map.value());
- StoreObjectFieldNoWriteBarrier(
- bound_function, JSBoundFunction::kBoundTargetFunctionOffset, receiver);
- StoreObjectFieldNoWriteBarrier(bound_function,
- JSBoundFunction::kBoundThisOffset,
- bound_receiver.value());
- StoreObjectFieldNoWriteBarrier(bound_function,
- JSBoundFunction::kBoundArgumentsOffset,
- argument_array.value());
- Node* empty_fixed_array = EmptyFixedArrayConstant();
- StoreObjectFieldNoWriteBarrier(
- bound_function, JSObject::kPropertiesOrHashOffset, empty_fixed_array);
- StoreObjectFieldNoWriteBarrier(bound_function, JSObject::kElementsOffset,
- empty_fixed_array);
-
- args.PopAndReturn(bound_function);
- BIND(&slow);
+ {
+ Node* bound_function = Allocate(JSBoundFunction::kSize);
+ StoreMapNoWriteBarrier(bound_function, bound_function_map.value());
+ StoreObjectFieldNoWriteBarrier(
+ bound_function, JSBoundFunction::kBoundTargetFunctionOffset, receiver);
+ StoreObjectFieldNoWriteBarrier(bound_function,
+ JSBoundFunction::kBoundThisOffset,
+ bound_receiver.value());
+ StoreObjectFieldNoWriteBarrier(bound_function,
+ JSBoundFunction::kBoundArgumentsOffset,
+ argument_array.value());
+ Node* empty_fixed_array = EmptyFixedArrayConstant();
+ StoreObjectFieldNoWriteBarrier(
+ bound_function, JSObject::kPropertiesOrHashOffset, empty_fixed_array);
+ StoreObjectFieldNoWriteBarrier(bound_function, JSObject::kElementsOffset,
+ empty_fixed_array);
+
+ args.PopAndReturn(bound_function);
+ }
+ BIND(&slow);
Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
MachineType::TaggedPointer());
TailCallStub(CodeFactory::FunctionPrototypeBind(isolate()), context, target,
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index 8f5ab699ac..1f16d81fe3 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -13,6 +13,13 @@
namespace v8 {
namespace internal {
+TF_BUILTIN(LoadIC_StringLength, CodeStubAssembler) {
+ Node* value = Parameter(Descriptor::kReceiver);
+ Node* string = LoadJSValueValue(value);
+ Node* result = LoadStringLength(string);
+ Return(result);
+}
+
TF_BUILTIN(KeyedLoadIC_IndexedString, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* index = Parameter(Descriptor::kName);
@@ -56,22 +63,12 @@ TF_BUILTIN(KeyedLoadIC_Slow, CodeStubAssembler) {
void Builtins::Generate_KeyedStoreIC_Megamorphic(
compiler::CodeAssemblerState* state) {
- KeyedStoreGenericGenerator::Generate(state, SLOPPY);
-}
-
-void Builtins::Generate_KeyedStoreIC_Megamorphic_Strict(
- compiler::CodeAssemblerState* state) {
- KeyedStoreGenericGenerator::Generate(state, STRICT);
+ KeyedStoreGenericGenerator::Generate(state);
}
void Builtins::Generate_StoreIC_Uninitialized(
compiler::CodeAssemblerState* state) {
- StoreICUninitializedGenerator::Generate(state, SLOPPY);
-}
-
-void Builtins::Generate_StoreICStrict_Uninitialized(
- compiler::CodeAssemblerState* state) {
- StoreICUninitializedGenerator::Generate(state, STRICT);
+ StoreICUninitializedGenerator::Generate(state);
}
TF_BUILTIN(KeyedStoreIC_Miss, CodeStubAssembler) {
diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc
index 29136627e1..70dc34e302 100644
--- a/deps/v8/src/builtins/builtins-ic-gen.cc
+++ b/deps/v8/src/builtins/builtins-ic-gen.cc
@@ -28,15 +28,11 @@ IC_BUILTIN(LoadICTrampoline)
IC_BUILTIN(LoadField)
IC_BUILTIN(KeyedLoadICTrampoline)
IC_BUILTIN(KeyedLoadIC_Megamorphic)
+IC_BUILTIN(StoreIC)
+IC_BUILTIN(StoreICTrampoline)
+IC_BUILTIN(KeyedStoreIC)
+IC_BUILTIN(KeyedStoreICTrampoline)
-IC_BUILTIN_PARAM(StoreIC, StoreIC, SLOPPY)
-IC_BUILTIN_PARAM(StoreICTrampoline, StoreICTrampoline, SLOPPY)
-IC_BUILTIN_PARAM(StoreICStrict, StoreIC, STRICT)
-IC_BUILTIN_PARAM(StoreICStrictTrampoline, StoreICTrampoline, STRICT)
-IC_BUILTIN_PARAM(KeyedStoreIC, KeyedStoreIC, SLOPPY)
-IC_BUILTIN_PARAM(KeyedStoreICTrampoline, KeyedStoreICTrampoline, SLOPPY)
-IC_BUILTIN_PARAM(KeyedStoreICStrict, KeyedStoreIC, STRICT)
-IC_BUILTIN_PARAM(KeyedStoreICStrictTrampoline, KeyedStoreICTrampoline, STRICT)
IC_BUILTIN_PARAM(LoadGlobalIC, LoadGlobalIC, NOT_INSIDE_TYPEOF)
IC_BUILTIN_PARAM(LoadGlobalICInsideTypeof, LoadGlobalIC, INSIDE_TYPEOF)
IC_BUILTIN_PARAM(LoadGlobalICTrampoline, LoadGlobalICTrampoline,
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index aaa16332fd..00e7422e59 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -5,7 +5,9 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
+#include "src/heap/heap-inl.h"
#include "src/macro-assembler.h"
+#include "src/objects/shared-function-info.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -92,9 +94,10 @@ TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
}
-TF_BUILTIN(NewUnmappedArgumentsElements, CodeStubAssembler) {
+TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
Node* frame = Parameter(Descriptor::kFrame);
Node* length = SmiToWord(Parameter(Descriptor::kLength));
+ Node* mapped_count = SmiToWord(Parameter(Descriptor::kMappedCount));
// Check if we can allocate in new space.
ElementsKind kind = PACKED_ELEMENTS;
@@ -119,21 +122,49 @@ TF_BUILTIN(NewUnmappedArgumentsElements, CodeStubAssembler) {
// Allocate a FixedArray in new space.
Node* result = AllocateFixedArray(kind, length);
+ // The elements might be used to back mapped arguments. In that case fill
+ // the mapped elements (i.e. the first {mapped_count}) with the hole, but
+ // make sure not to overshoot the {length} if some arguments are missing.
+ Node* number_of_holes =
+ SelectConstant(IntPtrLessThan(mapped_count, length), mapped_count,
+ length, MachineType::PointerRepresentation());
+ Node* the_hole = TheHoleConstant();
+
+ // Fill the first elements up to {number_of_holes} with the hole.
+ VARIABLE(var_index, MachineType::PointerRepresentation());
+ Label loop1(this, &var_index), done_loop1(this);
+ var_index.Bind(IntPtrConstant(0));
+ Goto(&loop1);
+ BIND(&loop1);
+ {
+ // Load the current {index}.
+ Node* index = var_index.value();
+
+ // Check if we are done.
+ GotoIf(WordEqual(index, number_of_holes), &done_loop1);
+
+ // Store the hole into the {result}.
+ StoreFixedArrayElement(result, index, the_hole, SKIP_WRITE_BARRIER);
+
+ // Continue with next {index}.
+ var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
+ Goto(&loop1);
+ }
+ BIND(&done_loop1);
+
// Compute the effective {offset} into the {frame}.
Node* offset = IntPtrAdd(length, IntPtrConstant(1));
// Copy the parameters from {frame} (starting at {offset}) to {result}.
- VARIABLE(var_index, MachineType::PointerRepresentation());
- Label loop(this, &var_index), done_loop(this);
- var_index.Bind(IntPtrConstant(0));
- Goto(&loop);
- BIND(&loop);
+ Label loop2(this, &var_index), done_loop2(this);
+ Goto(&loop2);
+ BIND(&loop2);
{
// Load the current {index}.
Node* index = var_index.value();
// Check if we are done.
- GotoIf(WordEqual(index, length), &done_loop);
+ GotoIf(WordEqual(index, length), &done_loop2);
// Load the parameter at the given {index}.
Node* value = Load(MachineType::AnyTagged(), frame,
@@ -144,10 +175,10 @@ TF_BUILTIN(NewUnmappedArgumentsElements, CodeStubAssembler) {
// Continue with next {index}.
var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
- Goto(&loop);
+ Goto(&loop2);
}
+ BIND(&done_loop2);
- BIND(&done_loop);
Return(result);
}
}
@@ -156,7 +187,8 @@ TF_BUILTIN(NewUnmappedArgumentsElements, CodeStubAssembler) {
{
// Allocate in old space (or large object space).
TailCallRuntime(Runtime::kNewArgumentsElements, NoContextConstant(),
- BitcastWordToTagged(frame), SmiFromWord(length));
+ BitcastWordToTagged(frame), SmiFromWord(length),
+ SmiFromWord(mapped_count));
}
}
@@ -262,7 +294,60 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
}
}
- void InsertToStoreBufferAndGoto(Node* isolate, Node* slot, Label* next) {
+ Node* ShouldSkipFPRegs(Node* mode) {
+ return WordEqual(mode, SmiConstant(kDontSaveFPRegs));
+ }
+
+ Node* ShouldEmitRememberSet(Node* remembered_set) {
+ return WordEqual(remembered_set, SmiConstant(EMIT_REMEMBERED_SET));
+ }
+
+ void CallCFunction1WithCallerSavedRegistersMode(MachineType return_type,
+ MachineType arg0_type,
+ Node* function, Node* arg0,
+ Node* mode, Label* next) {
+ Label dont_save_fp(this), save_fp(this);
+ Branch(ShouldSkipFPRegs(mode), &dont_save_fp, &save_fp);
+ BIND(&dont_save_fp);
+ {
+ CallCFunction1WithCallerSavedRegisters(return_type, arg0_type, function,
+ arg0, kDontSaveFPRegs);
+ Goto(next);
+ }
+
+ BIND(&save_fp);
+ {
+ CallCFunction1WithCallerSavedRegisters(return_type, arg0_type, function,
+ arg0, kSaveFPRegs);
+ Goto(next);
+ }
+ }
+
+ void CallCFunction3WithCallerSavedRegistersMode(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
+ Node* mode, Label* next) {
+ Label dont_save_fp(this), save_fp(this);
+ Branch(ShouldSkipFPRegs(mode), &dont_save_fp, &save_fp);
+ BIND(&dont_save_fp);
+ {
+ CallCFunction3WithCallerSavedRegisters(return_type, arg0_type, arg1_type,
+ arg2_type, function, arg0, arg1,
+ arg2, kDontSaveFPRegs);
+ Goto(next);
+ }
+
+ BIND(&save_fp);
+ {
+ CallCFunction3WithCallerSavedRegisters(return_type, arg0_type, arg1_type,
+ arg2_type, function, arg0, arg1,
+ arg2, kSaveFPRegs);
+ Goto(next);
+ }
+ }
+
+ void InsertToStoreBufferAndGoto(Node* isolate, Node* slot, Node* mode,
+ Label* next) {
Node* store_buffer_top_addr =
ExternalConstant(ExternalReference::store_buffer_top(this->isolate()));
Node* store_buffer_top =
@@ -284,9 +369,9 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
{
Node* function = ExternalConstant(
ExternalReference::store_buffer_overflow_function(this->isolate()));
- CallCFunction1WithCallerSavedRegisters(
- MachineType::Int32(), MachineType::Pointer(), function, isolate);
- Goto(next);
+ CallCFunction1WithCallerSavedRegistersMode(MachineType::Int32(),
+ MachineType::Pointer(),
+ function, isolate, mode, next);
}
}
};
@@ -295,42 +380,51 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
Node* slot = Parameter(Descriptor::kSlot);
Node* isolate = Parameter(Descriptor::kIsolate);
- Node* value;
+ Node* remembered_set = Parameter(Descriptor::kRememberedSet);
+ Node* fp_mode = Parameter(Descriptor::kFPMode);
- Label test_old_to_new_flags(this);
- Label store_buffer_exit(this), store_buffer_incremental_wb(this);
+ Node* value = Load(MachineType::Pointer(), slot);
+
+ Label generational_wb(this);
Label incremental_wb(this);
Label exit(this);
- // When incremental marking is not on, we skip cross generation pointer
- // checking here, because there are checks for
- // `kPointersFromHereAreInterestingMask` and
- // `kPointersToHereAreInterestingMask` in
- // `src/compiler/<arch>/code-generator-<arch>.cc` before calling this stub,
- // which serves as the cross generation checking.
- Branch(IsMarking(), &test_old_to_new_flags, &store_buffer_exit);
+ Branch(ShouldEmitRememberSet(remembered_set), &generational_wb,
+ &incremental_wb);
- BIND(&test_old_to_new_flags);
+ BIND(&generational_wb);
{
- value = Load(MachineType::Pointer(), slot);
- // TODO(albertnetymk): Try to cache the page flag for value and object,
- // instead of calling IsPageFlagSet each time.
- Node* value_in_new_space =
- IsPageFlagSet(value, MemoryChunk::kIsInNewSpaceMask);
- GotoIfNot(value_in_new_space, &incremental_wb);
-
- Node* object_in_new_space =
- IsPageFlagSet(object, MemoryChunk::kIsInNewSpaceMask);
- GotoIf(object_in_new_space, &incremental_wb);
-
- Goto(&store_buffer_incremental_wb);
- }
+ Label test_old_to_new_flags(this);
+ Label store_buffer_exit(this), store_buffer_incremental_wb(this);
+ // When incremental marking is not on, we skip cross generation pointer
+ // checking here, because there are checks for
+ // `kPointersFromHereAreInterestingMask` and
+ // `kPointersToHereAreInterestingMask` in
+ // `src/compiler/<arch>/code-generator-<arch>.cc` before calling this stub,
+ // which serves as the cross generation checking.
+ Branch(IsMarking(), &test_old_to_new_flags, &store_buffer_exit);
+
+ BIND(&test_old_to_new_flags);
+ {
+ // TODO(albertnetymk): Try to cache the page flag for value and object,
+ // instead of calling IsPageFlagSet each time.
+ Node* value_in_new_space =
+ IsPageFlagSet(value, MemoryChunk::kIsInNewSpaceMask);
+ GotoIfNot(value_in_new_space, &incremental_wb);
- BIND(&store_buffer_exit);
- { InsertToStoreBufferAndGoto(isolate, slot, &exit); }
+ Node* object_in_new_space =
+ IsPageFlagSet(object, MemoryChunk::kIsInNewSpaceMask);
+ GotoIf(object_in_new_space, &incremental_wb);
- BIND(&store_buffer_incremental_wb);
- { InsertToStoreBufferAndGoto(isolate, slot, &incremental_wb); }
+ Goto(&store_buffer_incremental_wb);
+ }
+
+ BIND(&store_buffer_exit);
+ { InsertToStoreBufferAndGoto(isolate, slot, fp_mode, &exit); }
+
+ BIND(&store_buffer_incremental_wb);
+ { InsertToStoreBufferAndGoto(isolate, slot, fp_mode, &incremental_wb); }
+ }
BIND(&incremental_wb);
{
@@ -359,10 +453,10 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
Node* function = ExternalConstant(
ExternalReference::incremental_marking_record_write_function(
this->isolate()));
- CallCFunction3WithCallerSavedRegisters(
+ CallCFunction3WithCallerSavedRegistersMode(
MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
- MachineType::Pointer(), function, object, slot, isolate);
- Goto(&exit);
+ MachineType::Pointer(), function, object, slot, isolate, fp_mode,
+ &exit);
}
}
@@ -491,5 +585,38 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
}
}
+TF_BUILTIN(ForInEnumerate, CodeStubAssembler) {
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Label if_empty(this), if_runtime(this, Label::kDeferred);
+ Node* receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime);
+ Return(receiver_map);
+
+ BIND(&if_empty);
+ Return(EmptyFixedArrayConstant());
+
+ BIND(&if_runtime);
+ TailCallRuntime(Runtime::kForInEnumerate, context, receiver);
+}
+
+TF_BUILTIN(ForInFilter, CodeStubAssembler) {
+ Node* key = Parameter(Descriptor::kKey);
+ Node* object = Parameter(Descriptor::kObject);
+ Node* context = Parameter(Descriptor::kContext);
+
+ CSA_ASSERT(this, IsString(key));
+
+ Label if_true(this), if_false(this);
+ Node* result = HasProperty(object, key, context, kForInHasProperty);
+ Branch(IsTrue(result), &if_true, &if_false);
+
+ BIND(&if_true);
+ Return(key);
+
+ BIND(&if_false);
+ Return(UndefinedConstant());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc
index 15db8f6326..e9c90f5b31 100644
--- a/deps/v8/src/builtins/builtins-intl-gen.cc
+++ b/deps/v8/src/builtins/builtins-intl-gen.cc
@@ -8,6 +8,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/code-stub-assembler.h"
+#include "src/zone/zone-list-inl.h" // TODO(mstarzinger): Temporary cycle breaker.
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index b3ad156158..79dc039b8b 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -351,7 +351,7 @@ BUILTIN(NumberFormatPrototypeFormatToParts) {
}
Handle<Object> x;
- if (args.length() >= 1) {
+ if (args.length() >= 2) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x,
Object::ToNumber(args.at(1)));
} else {
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index d60cfb7128..344aee3786 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -4,6 +4,8 @@
#include "src/builtins/builtins-iterator-gen.h"
+#include "src/factory-inl.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-math-gen.cc b/deps/v8/src/builtins/builtins-math-gen.cc
index a82d3917c8..3e22a138eb 100644
--- a/deps/v8/src/builtins/builtins-math-gen.cc
+++ b/deps/v8/src/builtins/builtins-math-gen.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
+#include "src/zone/zone-list-inl.h" // TODO(mstarzinger): Temporary cycle breaker.
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index 9a1484708f..3988204936 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -57,7 +57,7 @@ class NumberBuiltinsAssembler : public CodeStubAssembler {
template <typename Descriptor>
void BinaryOp(Label* smis, Variable* var_left, Variable* var_right,
Label* doubles, Variable* var_left_double,
- Variable* var_right_double);
+ Variable* var_right_double, Label* bigints);
};
// ES6 #sec-number.isfinite
@@ -84,6 +84,11 @@ TF_BUILTIN(NumberIsFinite, CodeStubAssembler) {
Return(BooleanConstant(false));
}
+TF_BUILTIN(AllocateHeapNumber, CodeStubAssembler) {
+ Node* result = AllocateHeapNumber();
+ Return(result);
+}
+
// ES6 #sec-number.isinteger
TF_BUILTIN(NumberIsInteger, CodeStubAssembler) {
Node* number = Parameter(Descriptor::kNumber);
@@ -401,7 +406,8 @@ TF_BUILTIN(Add, AddStubAssembler) {
Variable* loop_vars[2] = {&var_left, &var_right};
Label loop(this, 2, loop_vars),
string_add_convert_left(this, Label::kDeferred),
- string_add_convert_right(this, Label::kDeferred);
+ string_add_convert_right(this, Label::kDeferred),
+ do_bigint_add(this, Label::kDeferred);
Goto(&loop);
BIND(&loop);
{
@@ -451,6 +457,7 @@ TF_BUILTIN(Add, AddStubAssembler) {
Node* right_instance_type = LoadMapInstanceType(right_map);
GotoIf(IsStringInstanceType(right_instance_type),
&string_add_convert_left);
+ GotoIf(IsBigIntInstanceType(right_instance_type), &do_bigint_add);
ConvertAndLoop(&var_right, right_instance_type, &loop, context);
}
} // if_right_heapobject
@@ -477,6 +484,7 @@ TF_BUILTIN(Add, AddStubAssembler) {
Node* left_instance_type = LoadMapInstanceType(left_map);
GotoIf(IsStringInstanceType(left_instance_type),
&string_add_convert_right);
+ GotoIf(IsBigIntInstanceType(left_instance_type), &do_bigint_add);
// {left} is neither a Number nor a String, and {right} is a Smi.
ConvertAndLoop(&var_left, left_instance_type, &loop, context);
}
@@ -504,6 +512,7 @@ TF_BUILTIN(Add, AddStubAssembler) {
Node* right_instance_type = LoadMapInstanceType(right_map);
GotoIf(IsStringInstanceType(right_instance_type),
&string_add_convert_left);
+ GotoIf(IsBigIntInstanceType(right_instance_type), &do_bigint_add);
// {left} is a HeapNumber, {right} is neither Number nor String.
ConvertAndLoop(&var_right, right_instance_type, &loop, context);
}
@@ -517,6 +526,8 @@ TF_BUILTIN(Add, AddStubAssembler) {
Node* right_instance_type = LoadMapInstanceType(right_map);
GotoIf(IsStringInstanceType(right_instance_type),
&string_add_convert_left);
+ GotoIf(IsBigIntInstanceType(left_instance_type), &do_bigint_add);
+ GotoIf(IsBigIntInstanceType(right_instance_type), &do_bigint_add);
Label if_left_not_receiver(this, Label::kDeferred);
Label if_right_not_receiver(this, Label::kDeferred);
GotoIfNot(IsJSReceiverInstanceType(left_instance_type),
@@ -554,6 +565,12 @@ TF_BUILTIN(Add, AddStubAssembler) {
Return(CallStub(callable, context, var_left.value(), var_right.value()));
}
+ BIND(&do_bigint_add);
+ {
+ Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
+ var_right.value(), SmiConstant(Token::ADD)));
+ }
+
BIND(&do_double_add);
{
Node* value = Float64Add(var_left_double.value(), var_right_double.value());
@@ -565,7 +582,8 @@ template <typename Descriptor>
void NumberBuiltinsAssembler::BinaryOp(Label* smis, Variable* var_left,
Variable* var_right, Label* doubles,
Variable* var_left_double,
- Variable* var_right_double) {
+ Variable* var_right_double,
+ Label* bigints) {
DCHECK(var_left->rep() == MachineRepresentation::kTagged);
DCHECK(var_right->rep() == MachineRepresentation::kTagged);
@@ -610,6 +628,8 @@ void NumberBuiltinsAssembler::BinaryOp(Label* smis, Variable* var_left,
BIND(&left_not_number);
{
+ GotoIf(IsBigInt(var_left->value()), bigints);
+ // TODO(jkummerow): Here and below, this should call NonNumericToNumeric.
var_left->Bind(
CallBuiltin(Builtins::kNonNumberToNumber, context, var_left->value()));
Goto(&loop);
@@ -617,6 +637,7 @@ void NumberBuiltinsAssembler::BinaryOp(Label* smis, Variable* var_left,
BIND(&right_not_number);
{
+ GotoIf(IsBigInt(var_right->value()), bigints);
var_right->Bind(
CallBuiltin(Builtins::kNonNumberToNumber, context, var_right->value()));
Goto(&loop);
@@ -628,10 +649,10 @@ TF_BUILTIN(Subtract, NumberBuiltinsAssembler) {
VARIABLE(var_right, MachineRepresentation::kTagged);
VARIABLE(var_left_double, MachineRepresentation::kFloat64);
VARIABLE(var_right_double, MachineRepresentation::kFloat64);
- Label do_smi_sub(this), do_double_sub(this);
+ Label do_smi_sub(this), do_double_sub(this), do_bigint_sub(this);
BinaryOp<Descriptor>(&do_smi_sub, &var_left, &var_right, &do_double_sub,
- &var_left_double, &var_right_double);
+ &var_left_double, &var_right_double, &do_bigint_sub);
BIND(&do_smi_sub);
{
@@ -658,6 +679,13 @@ TF_BUILTIN(Subtract, NumberBuiltinsAssembler) {
Node* value = Float64Sub(var_left_double.value(), var_right_double.value());
Return(AllocateHeapNumberWithValue(value));
}
+
+ BIND(&do_bigint_sub);
+ {
+ Node* context = Parameter(Descriptor::kContext);
+ Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
+ var_right.value(), SmiConstant(Token::SUB)));
+ }
}
TF_BUILTIN(Multiply, NumberBuiltinsAssembler) {
@@ -665,10 +693,10 @@ TF_BUILTIN(Multiply, NumberBuiltinsAssembler) {
VARIABLE(var_right, MachineRepresentation::kTagged);
VARIABLE(var_left_double, MachineRepresentation::kFloat64);
VARIABLE(var_right_double, MachineRepresentation::kFloat64);
- Label do_smi_mul(this), do_double_mul(this);
+ Label do_smi_mul(this), do_double_mul(this), do_bigint_mul(this);
BinaryOp<Descriptor>(&do_smi_mul, &var_left, &var_right, &do_double_mul,
- &var_left_double, &var_right_double);
+ &var_left_double, &var_right_double, &do_bigint_mul);
BIND(&do_smi_mul);
// The result is not necessarily a smi, in case of overflow.
@@ -677,6 +705,13 @@ TF_BUILTIN(Multiply, NumberBuiltinsAssembler) {
BIND(&do_double_mul);
Node* value = Float64Mul(var_left_double.value(), var_right_double.value());
Return(AllocateHeapNumberWithValue(value));
+
+ BIND(&do_bigint_mul);
+ {
+ Node* context = Parameter(Descriptor::kContext);
+ Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
+ var_right.value(), SmiConstant(Token::MUL)));
+ }
}
TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
@@ -684,10 +719,10 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
VARIABLE(var_right, MachineRepresentation::kTagged);
VARIABLE(var_left_double, MachineRepresentation::kFloat64);
VARIABLE(var_right_double, MachineRepresentation::kFloat64);
- Label do_smi_div(this), do_double_div(this);
+ Label do_smi_div(this), do_double_div(this), do_bigint_div(this);
BinaryOp<Descriptor>(&do_smi_div, &var_left, &var_right, &do_double_div,
- &var_left_double, &var_right_double);
+ &var_left_double, &var_right_double, &do_bigint_div);
BIND(&do_smi_div);
{
@@ -754,6 +789,13 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
Node* value = Float64Div(var_left_double.value(), var_right_double.value());
Return(AllocateHeapNumberWithValue(value));
}
+
+ BIND(&do_bigint_div);
+ {
+ Node* context = Parameter(Descriptor::kContext);
+ Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
+ var_right.value(), SmiConstant(Token::DIV)));
+ }
}
TF_BUILTIN(Modulus, NumberBuiltinsAssembler) {
@@ -761,10 +803,10 @@ TF_BUILTIN(Modulus, NumberBuiltinsAssembler) {
VARIABLE(var_right, MachineRepresentation::kTagged);
VARIABLE(var_left_double, MachineRepresentation::kFloat64);
VARIABLE(var_right_double, MachineRepresentation::kFloat64);
- Label do_smi_mod(this), do_double_mod(this);
+ Label do_smi_mod(this), do_double_mod(this), do_bigint_mod(this);
BinaryOp<Descriptor>(&do_smi_mod, &var_left, &var_right, &do_double_mod,
- &var_left_double, &var_right_double);
+ &var_left_double, &var_right_double, &do_bigint_mod);
BIND(&do_smi_mod);
Return(SmiMod(var_left.value(), var_right.value()));
@@ -772,6 +814,13 @@ TF_BUILTIN(Modulus, NumberBuiltinsAssembler) {
BIND(&do_double_mod);
Node* value = Float64Mod(var_left_double.value(), var_right_double.value());
Return(AllocateHeapNumberWithValue(value));
+
+ BIND(&do_bigint_mod);
+ {
+ Node* context = Parameter(Descriptor::kContext);
+ Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
+ var_right.value(), SmiConstant(Token::MOD)));
+ }
}
TF_BUILTIN(ShiftLeft, NumberBuiltinsAssembler) {
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 18d2434b88..b1af0cf8ab 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -5,6 +5,9 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
+#include "src/factory-inl.h"
+#include "src/objects/property-descriptor-object.h"
+#include "src/objects/shared-function-info.h"
namespace v8 {
namespace internal {
@@ -21,6 +24,9 @@ class ObjectBuiltinsAssembler : public CodeStubAssembler {
protected:
void ReturnToStringFormat(Node* context, Node* string);
+ void AddToDictionaryIf(Node* condition, Node* name_dictionary,
+ Handle<Name> name, Node* value, Label* bailout);
+ Node* FromPropertyDescriptor(Node* context, Node* desc);
};
void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context,
@@ -111,8 +117,8 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
VARIABLE(var_length, MachineRepresentation::kTagged);
VARIABLE(var_elements, MachineRepresentation::kTagged);
- Label if_empty(this, Label::kDeferred), if_fast(this),
- if_slow(this, Label::kDeferred), if_join(this);
+ Label if_empty(this, Label::kDeferred), if_empty_elements(this),
+ if_fast(this), if_slow(this, Label::kDeferred), if_join(this);
// Check if the {object} has a usable enum cache.
GotoIf(TaggedIsSmi(object), &if_slow);
@@ -127,19 +133,24 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
// Ensure that the {object} doesn't have any elements.
CSA_ASSERT(this, IsJSObjectMap(object_map));
Node* object_elements = LoadObjectField(object, JSObject::kElementsOffset);
- GotoIfNot(IsEmptyFixedArray(object_elements), &if_slow);
+ GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements);
+ Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements,
+ &if_slow);
+
+ // Check whether there are enumerable properties.
+ BIND(&if_empty_elements);
Branch(WordEqual(object_enum_length, IntPtrConstant(0)), &if_empty, &if_fast);
BIND(&if_fast);
{
// The {object} has a usable enum cache, use that.
Node* object_descriptors = LoadMapDescriptors(object_map);
- Node* object_enum_cache_bridge = LoadObjectField(
- object_descriptors, DescriptorArray::kEnumCacheBridgeOffset);
- Node* object_enum_cache = LoadObjectField(
- object_enum_cache_bridge, DescriptorArray::kEnumCacheBridgeCacheOffset);
+ Node* object_enum_cache =
+ LoadObjectField(object_descriptors, DescriptorArray::kEnumCacheOffset);
+ Node* object_enum_keys =
+ LoadObjectField(object_enum_cache, EnumCache::kKeysOffset);
- // Allocate a JSArray and copy the elements from the {object_enum_cache}.
+ // Allocate a JSArray and copy the elements from the {object_enum_keys}.
Node* array = nullptr;
Node* elements = nullptr;
Node* native_context = LoadNativeContext(context);
@@ -148,7 +159,7 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
PACKED_ELEMENTS, array_map, array_length, nullptr, object_enum_length,
INTPTR_PARAMETERS);
- CopyFixedArrayElements(PACKED_ELEMENTS, object_enum_cache, elements,
+ CopyFixedArrayElements(PACKED_ELEMENTS, object_enum_keys, elements,
object_enum_length, SKIP_WRITE_BARRIER);
Return(array);
}
@@ -176,7 +187,7 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
Node* native_context = LoadNativeContext(context);
Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
Node* array = AllocateUninitializedJSArrayWithoutElements(
- PACKED_ELEMENTS, array_map, var_length.value(), nullptr);
+ array_map, var_length.value(), nullptr);
StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset,
var_elements.value());
Return(array);
@@ -587,6 +598,21 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
}
}
+// ES #sec-object.is
+TF_BUILTIN(ObjectIs, ObjectBuiltinsAssembler) {
+ Node* const left = Parameter(Descriptor::kLeft);
+ Node* const right = Parameter(Descriptor::kRight);
+
+ Label return_true(this), return_false(this);
+ BranchIfSameValue(left, right, &return_true, &return_false);
+
+ BIND(&return_true);
+ Return(TrueConstant());
+
+ BIND(&return_false);
+ Return(FalseConstant());
+}
+
TF_BUILTIN(CreateIterResultObject, ObjectBuiltinsAssembler) {
Node* const value = Parameter(Descriptor::kValue);
Node* const done = Parameter(Descriptor::kDone);
@@ -682,5 +708,181 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
}
}
+// ES6 section 19.1.2.7 Object.getOwnPropertyDescriptor ( O, P )
+TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
+ Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ CSA_ASSERT(this, WordEqual(Parameter(BuiltinDescriptor::kNewTarget),
+ UndefinedConstant()));
+
+ CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ Node* obj = args.GetOptionalArgumentValue(0);
+ Node* key = args.GetOptionalArgumentValue(1);
+
+ // 1. Let obj be ? ToObject(O).
+ Node* object = CallBuiltin(Builtins::kToObject, context, obj);
+
+ // 2. Let key be ? ToPropertyKey(P).
+ Node* name = ToName(context, key);
+
+ // 3. Let desc be ? obj.[[GetOwnProperty]](key).
+ Node* desc =
+ CallRuntime(Runtime::kGetOwnPropertyDescriptor, context, object, name);
+
+ Label return_undefined(this, Label::kDeferred);
+ GotoIf(IsUndefined(desc), &return_undefined);
+
+ CSA_ASSERT(this, IsFixedArray(desc));
+
+ // 4. Return FromPropertyDescriptor(desc).
+ args.PopAndReturn(FromPropertyDescriptor(context, desc));
+
+ BIND(&return_undefined);
+ args.PopAndReturn(UndefinedConstant());
+}
+
+void ObjectBuiltinsAssembler::AddToDictionaryIf(Node* condition,
+ Node* name_dictionary,
+ Handle<Name> name, Node* value,
+ Label* bailout) {
+ Label done(this);
+ GotoIfNot(condition, &done);
+
+ Add<NameDictionary>(name_dictionary, HeapConstant(name), value, bailout);
+ Goto(&done);
+
+ BIND(&done);
+}
+
+Node* ObjectBuiltinsAssembler::FromPropertyDescriptor(Node* context,
+ Node* desc) {
+ VARIABLE(js_descriptor, MachineRepresentation::kTagged);
+
+ Node* flags = LoadAndUntagToWord32ObjectField(
+ desc, PropertyDescriptorObject::kFlagsOffset);
+
+ Node* has_flags =
+ Word32And(flags, Int32Constant(PropertyDescriptorObject::kHasMask));
+
+ Label if_accessor_desc(this), if_data_desc(this), if_generic_desc(this),
+ return_desc(this);
+ GotoIf(
+ Word32Equal(has_flags,
+ Int32Constant(
+ PropertyDescriptorObject::kRegularAccessorPropertyBits)),
+ &if_accessor_desc);
+ GotoIf(Word32Equal(
+ has_flags,
+ Int32Constant(PropertyDescriptorObject::kRegularDataPropertyBits)),
+ &if_data_desc);
+ Goto(&if_generic_desc);
+
+ BIND(&if_accessor_desc);
+ {
+ Node* native_context = LoadNativeContext(context);
+ Node* map = LoadContextElement(
+ native_context, Context::ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX);
+ Node* js_desc = AllocateJSObjectFromMap(map);
+
+ StoreObjectFieldNoWriteBarrier(
+ js_desc, JSAccessorPropertyDescriptor::kGetOffset,
+ LoadObjectField(desc, PropertyDescriptorObject::kGetOffset));
+ StoreObjectFieldNoWriteBarrier(
+ js_desc, JSAccessorPropertyDescriptor::kSetOffset,
+ LoadObjectField(desc, PropertyDescriptorObject::kSetOffset));
+ StoreObjectFieldNoWriteBarrier(
+ js_desc, JSAccessorPropertyDescriptor::kEnumerableOffset,
+ SelectBooleanConstant(
+ IsSetWord32<PropertyDescriptorObject::IsEnumerableBit>(flags)));
+ StoreObjectFieldNoWriteBarrier(
+ js_desc, JSAccessorPropertyDescriptor::kConfigurableOffset,
+ SelectBooleanConstant(
+ IsSetWord32<PropertyDescriptorObject::IsConfigurableBit>(flags)));
+
+ js_descriptor.Bind(js_desc);
+ Goto(&return_desc);
+ }
+
+ BIND(&if_data_desc);
+ {
+ Node* native_context = LoadNativeContext(context);
+ Node* map = LoadContextElement(native_context,
+ Context::DATA_PROPERTY_DESCRIPTOR_MAP_INDEX);
+ Node* js_desc = AllocateJSObjectFromMap(map);
+
+ StoreObjectFieldNoWriteBarrier(
+ js_desc, JSDataPropertyDescriptor::kValueOffset,
+ LoadObjectField(desc, PropertyDescriptorObject::kValueOffset));
+ StoreObjectFieldNoWriteBarrier(
+ js_desc, JSDataPropertyDescriptor::kWritableOffset,
+ SelectBooleanConstant(
+ IsSetWord32<PropertyDescriptorObject::IsWritableBit>(flags)));
+ StoreObjectFieldNoWriteBarrier(
+ js_desc, JSDataPropertyDescriptor::kEnumerableOffset,
+ SelectBooleanConstant(
+ IsSetWord32<PropertyDescriptorObject::IsEnumerableBit>(flags)));
+ StoreObjectFieldNoWriteBarrier(
+ js_desc, JSDataPropertyDescriptor::kConfigurableOffset,
+ SelectBooleanConstant(
+ IsSetWord32<PropertyDescriptorObject::IsConfigurableBit>(flags)));
+
+ js_descriptor.Bind(js_desc);
+ Goto(&return_desc);
+ }
+
+ BIND(&if_generic_desc);
+ {
+ Node* native_context = LoadNativeContext(context);
+ Node* map = LoadContextElement(
+ native_context, Context::SLOW_OBJECT_WITH_OBJECT_PROTOTYPE_MAP);
+ // We want to preallocate the slots for value, writable, get, set,
+ // enumerable and configurable - a total of 6
+ Node* properties = AllocateNameDictionary(6);
+ Node* js_desc = AllocateJSObjectFromMap(map, properties);
+
+ Label bailout(this, Label::kDeferred);
+
+ Factory* factory = isolate()->factory();
+ Node* value = LoadObjectField(desc, PropertyDescriptorObject::kValueOffset);
+ AddToDictionaryIf(IsNotTheHole(value), properties, factory->value_string(),
+ value, &bailout);
+ AddToDictionaryIf(
+ IsSetWord32<PropertyDescriptorObject::HasWritableBit>(flags),
+ properties, factory->writable_string(),
+ SelectBooleanConstant(
+ IsSetWord32<PropertyDescriptorObject::IsWritableBit>(flags)),
+ &bailout);
+
+ Node* get = LoadObjectField(desc, PropertyDescriptorObject::kGetOffset);
+ AddToDictionaryIf(IsNotTheHole(get), properties, factory->get_string(), get,
+ &bailout);
+ Node* set = LoadObjectField(desc, PropertyDescriptorObject::kSetOffset);
+ AddToDictionaryIf(IsNotTheHole(set), properties, factory->set_string(), set,
+ &bailout);
+
+ AddToDictionaryIf(
+ IsSetWord32<PropertyDescriptorObject::HasEnumerableBit>(flags),
+ properties, factory->enumerable_string(),
+ SelectBooleanConstant(
+ IsSetWord32<PropertyDescriptorObject::IsEnumerableBit>(flags)),
+ &bailout);
+ AddToDictionaryIf(
+ IsSetWord32<PropertyDescriptorObject::HasConfigurableBit>(flags),
+ properties, factory->configurable_string(),
+ SelectBooleanConstant(
+ IsSetWord32<PropertyDescriptorObject::IsConfigurableBit>(flags)),
+ &bailout);
+
+ js_descriptor.Bind(js_desc);
+ Goto(&return_desc);
+
+ BIND(&bailout);
+ CSA_ASSERT(this, Int32Constant(0));
+ Unreachable();
+ }
+
+ BIND(&return_desc);
+ return js_descriptor.value();
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 95d2149f31..3b34834e3d 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -340,29 +340,6 @@ BUILTIN(ObjectPrototypeSetProto) {
return isolate->heap()->undefined_value();
}
-// ES6 section 19.1.2.6 Object.getOwnPropertyDescriptor ( O, P )
-BUILTIN(ObjectGetOwnPropertyDescriptor) {
- HandleScope scope(isolate);
- // 1. Let obj be ? ToObject(O).
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
- // 2. Let key be ? ToPropertyKey(P).
- Handle<Object> property = args.atOrUndefined(isolate, 2);
- Handle<Name> key;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
- Object::ToName(isolate, property));
- // 3. Let desc be ? obj.[[GetOwnProperty]](key).
- PropertyDescriptor desc;
- Maybe<bool> found =
- JSReceiver::GetOwnPropertyDescriptor(isolate, receiver, key, &desc);
- MAYBE_RETURN(found, isolate->heap()->exception());
- // 4. Return FromPropertyDescriptor(desc).
- if (!found.FromJust()) return isolate->heap()->undefined_value();
- return *desc.ToObject(isolate);
-}
-
namespace {
Object* GetOwnPropertyKeys(Isolate* isolate, BuiltinArguments args,
@@ -392,15 +369,6 @@ BUILTIN(ObjectGetOwnPropertySymbols) {
return GetOwnPropertyKeys(isolate, args, SKIP_STRINGS);
}
-// ES#sec-object.is Object.is ( value1, value2 )
-BUILTIN(ObjectIs) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(3, args.length());
- Handle<Object> value1 = args.at(1);
- Handle<Object> value2 = args.at(2);
- return isolate->heap()->ToBoolean(value1->SameValue(*value2));
-}
-
// ES6 section 19.1.2.11 Object.isExtensible ( O )
BUILTIN(ObjectIsExtensible) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index e10cc6e8d6..0d00c8bc27 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -109,15 +109,8 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
Node* native_context = LoadNativeContext(context);
- Node* map = LoadRoot(Heap::kJSPromiseCapabilityMapRootIndex);
- Node* capability = AllocateJSObjectFromMap(map);
-
- StoreObjectFieldNoWriteBarrier(
- capability, JSPromiseCapability::kPromiseOffset, UndefinedConstant());
- StoreObjectFieldNoWriteBarrier(
- capability, JSPromiseCapability::kResolveOffset, UndefinedConstant());
- StoreObjectFieldNoWriteBarrier(capability, JSPromiseCapability::kRejectOffset,
- UndefinedConstant());
+ Node* map = LoadRoot(Heap::kPromiseCapabilityMapRootIndex);
+ Node* capability = AllocateStruct(map);
VARIABLE(var_result, MachineRepresentation::kTagged);
var_result.Bind(capability);
@@ -133,15 +126,15 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
{
Node* promise = AllocateJSPromise(context);
PromiseInit(promise);
- StoreObjectField(capability, JSPromiseCapability::kPromiseOffset, promise);
+ StoreObjectField(capability, PromiseCapability::kPromiseOffset, promise);
Node* resolve = nullptr;
Node* reject = nullptr;
std::tie(resolve, reject) =
CreatePromiseResolvingFunctions(promise, debug_event, native_context);
- StoreObjectField(capability, JSPromiseCapability::kResolveOffset, resolve);
- StoreObjectField(capability, JSPromiseCapability::kRejectOffset, reject);
+ StoreObjectField(capability, PromiseCapability::kResolveOffset, resolve);
+ StoreObjectField(capability, PromiseCapability::kRejectOffset, reject);
GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &out);
CallRuntime(Runtime::kPromiseHookInit, context, promise,
@@ -165,25 +158,25 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
constructor, executor);
Node* resolve =
- LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
+ LoadObjectField(capability, PromiseCapability::kResolveOffset);
GotoIf(TaggedIsSmi(resolve), &if_notcallable);
GotoIfNot(IsCallableMap(LoadMap(resolve)), &if_notcallable);
Node* reject =
- LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
+ LoadObjectField(capability, PromiseCapability::kRejectOffset);
GotoIf(TaggedIsSmi(reject), &if_notcallable);
GotoIfNot(IsCallableMap(LoadMap(reject)), &if_notcallable);
- StoreObjectField(capability, JSPromiseCapability::kPromiseOffset, promise);
+ StoreObjectField(capability, PromiseCapability::kPromiseOffset, promise);
Goto(&out);
BIND(&if_notcallable);
- StoreObjectField(capability, JSPromiseCapability::kPromiseOffset,
+ StoreObjectField(capability, PromiseCapability::kPromiseOffset,
UndefinedConstant());
- StoreObjectField(capability, JSPromiseCapability::kResolveOffset,
+ StoreObjectField(capability, PromiseCapability::kResolveOffset,
UndefinedConstant());
- StoreObjectField(capability, JSPromiseCapability::kRejectOffset,
+ StoreObjectField(capability, PromiseCapability::kRejectOffset,
UndefinedConstant());
ThrowTypeError(context, MessageTemplate::kPromiseNonCallable);
}
@@ -415,11 +408,11 @@ Node* PromiseBuiltinsAssembler::InternalPromiseThen(Node* context,
{
Node* const capability = NewPromiseCapability(context, constructor);
var_deferred_promise.Bind(
- LoadObjectField(capability, JSPromiseCapability::kPromiseOffset));
+ LoadObjectField(capability, PromiseCapability::kPromiseOffset));
var_deferred_on_resolve.Bind(
- LoadObjectField(capability, JSPromiseCapability::kResolveOffset));
+ LoadObjectField(capability, PromiseCapability::kResolveOffset));
var_deferred_on_reject.Bind(
- LoadObjectField(capability, JSPromiseCapability::kRejectOffset));
+ LoadObjectField(capability, PromiseCapability::kRejectOffset));
Goto(&perform_promise_then);
}
@@ -690,7 +683,8 @@ void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context,
VARIABLE(var_reason, MachineRepresentation::kTagged);
VARIABLE(var_then, MachineRepresentation::kTagged);
- Label do_enqueue(this), fulfill(this), if_cycle(this, Label::kDeferred),
+ Label do_enqueue(this), fulfill(this), if_nocycle(this),
+ if_cycle(this, Label::kDeferred),
if_rejectpromise(this, Label::kDeferred), out(this);
Label cycle_check(this);
@@ -700,7 +694,8 @@ void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context,
BIND(&cycle_check);
// 6. If SameValue(resolution, promise) is true, then
- GotoIf(SameValue(promise, result), &if_cycle);
+ BranchIfSameValue(promise, result, &if_cycle, &if_nocycle);
+ BIND(&if_nocycle);
// 7. If Type(resolution) is not Object, then
GotoIf(TaggedIsSmi(result), &fulfill);
@@ -964,32 +959,23 @@ void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
Node* promise, Node* value,
bool debug_event) {
- Label fulfill(this), report_unhandledpromise(this), run_promise_hook(this);
+ Label fulfill(this), exit(this);
+ GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &fulfill);
if (debug_event) {
- GotoIfNot(IsDebugActive(), &run_promise_hook);
CallRuntime(Runtime::kDebugPromiseReject, context, promise, value);
- Goto(&run_promise_hook);
- } else {
- Goto(&run_promise_hook);
- }
-
- BIND(&run_promise_hook);
- {
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &report_unhandledpromise);
- CallRuntime(Runtime::kPromiseHookResolve, context, promise);
- Goto(&report_unhandledpromise);
- }
-
- BIND(&report_unhandledpromise);
- {
- GotoIf(PromiseHasHandler(promise), &fulfill);
- CallRuntime(Runtime::kReportPromiseReject, context, promise, value);
- Goto(&fulfill);
}
+ CallRuntime(Runtime::kPromiseHookResolve, context, promise);
+ Goto(&fulfill);
BIND(&fulfill);
PromiseFulfill(context, promise, value, v8::Promise::kRejected);
+
+ GotoIf(PromiseHasHandler(promise), &exit);
+ CallRuntime(Runtime::kReportPromiseReject, context, promise, value);
+ Goto(&exit);
+
+ BIND(&exit);
}
void PromiseBuiltinsAssembler::SetForwardingHandlerIfTrue(
@@ -1392,60 +1378,70 @@ TF_BUILTIN(PromiseCatch, PromiseBuiltinsAssembler) {
}
}
-TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
+TF_BUILTIN(PromiseResolveWrapper, PromiseBuiltinsAssembler) {
// 1. Let C be the this value.
Node* receiver = Parameter(Descriptor::kReceiver);
Node* value = Parameter(Descriptor::kValue);
Node* context = Parameter(Descriptor::kContext);
- Isolate* isolate = this->isolate();
// 2. If Type(C) is not Object, throw a TypeError exception.
ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
"PromiseResolve");
+ // 3. Return ? PromiseResolve(C, x).
+ Return(CallBuiltin(Builtins::kPromiseResolve, context, receiver, value));
+}
+
+TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
+ Node* constructor = Parameter(Descriptor::kConstructor);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* context = Parameter(Descriptor::kContext);
+ Isolate* isolate = this->isolate();
+
Node* const native_context = LoadNativeContext(context);
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- Label if_valueisnativepromise(this), if_valueisnotnativepromise(this),
- if_valueisnotpromise(this);
+ Label if_value_is_native_promise(this),
+ if_value_or_constructor_are_not_native_promise(this),
+ if_need_to_allocate(this);
- // 3.If IsPromise(x) is true, then
- GotoIf(TaggedIsSmi(value), &if_valueisnotpromise);
+ GotoIf(TaggedIsSmi(value), &if_need_to_allocate);
// This shortcircuits the constructor lookups.
- GotoIfNot(HasInstanceType(value, JS_PROMISE_TYPE), &if_valueisnotpromise);
+ GotoIfNot(HasInstanceType(value, JS_PROMISE_TYPE), &if_need_to_allocate);
// This adds a fast path as non-subclassed native promises don't have
// an observable constructor lookup.
- BranchIfFastPath(native_context, promise_fun, value, &if_valueisnativepromise,
- &if_valueisnotnativepromise);
+ BranchIfFastPath(native_context, promise_fun, value,
+ &if_value_is_native_promise,
+ &if_value_or_constructor_are_not_native_promise);
- BIND(&if_valueisnativepromise);
+ BIND(&if_value_is_native_promise);
{
- GotoIfNot(WordEqual(promise_fun, receiver), &if_valueisnotnativepromise);
+ GotoIfNot(WordEqual(promise_fun, constructor),
+ &if_value_or_constructor_are_not_native_promise);
Return(value);
}
- // At this point, value or/and receiver are not native promises, but
+ // At this point, value or/and constructor are not native promises, but
// they could be of the same subclass.
- BIND(&if_valueisnotnativepromise);
+ BIND(&if_value_or_constructor_are_not_native_promise);
{
- // 3.a Let xConstructor be ? Get(x, "constructor").
- // The constructor lookup is observable.
- Node* const constructor =
+ Label if_return(this);
+ Node* const xConstructor =
GetProperty(context, value, isolate->factory()->constructor_string());
+ BranchIfSameValue(xConstructor, constructor, &if_return,
+ &if_need_to_allocate);
- // 3.b If SameValue(xConstructor, C) is true, return x.
- GotoIfNot(SameValue(constructor, receiver), &if_valueisnotpromise);
-
+ BIND(&if_return);
Return(value);
}
- BIND(&if_valueisnotpromise);
+ BIND(&if_need_to_allocate);
{
Label if_nativepromise(this), if_notnativepromise(this);
- Branch(WordEqual(promise_fun, receiver), &if_nativepromise,
+ Branch(WordEqual(promise_fun, constructor), &if_nativepromise,
&if_notnativepromise);
// This adds a fast path for native promises that don't need to
@@ -1459,18 +1455,15 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
BIND(&if_notnativepromise);
{
- // 4. Let promiseCapability be ? NewPromiseCapability(C).
- Node* const capability = NewPromiseCapability(context, receiver);
+ Node* const capability = NewPromiseCapability(context, constructor);
- // 5. Perform ? Call(promiseCapability.[[Resolve]], undefined, « x »).
Callable call_callable = CodeFactory::Call(isolate);
Node* const resolve =
- LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
+ LoadObjectField(capability, PromiseCapability::kResolveOffset);
CallJS(call_callable, context, resolve, UndefinedConstant(), value);
- // 6. Return promiseCapability.[[Promise]].
Node* const result =
- LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ LoadObjectField(capability, PromiseCapability::kPromiseOffset);
Return(result);
}
}
@@ -1486,16 +1479,16 @@ TF_BUILTIN(PromiseGetCapabilitiesExecutor, PromiseBuiltinsAssembler) {
Label if_alreadyinvoked(this, Label::kDeferred);
GotoIf(WordNotEqual(
- LoadObjectField(capability, JSPromiseCapability::kResolveOffset),
+ LoadObjectField(capability, PromiseCapability::kResolveOffset),
UndefinedConstant()),
&if_alreadyinvoked);
GotoIf(WordNotEqual(
- LoadObjectField(capability, JSPromiseCapability::kRejectOffset),
+ LoadObjectField(capability, PromiseCapability::kRejectOffset),
UndefinedConstant()),
&if_alreadyinvoked);
- StoreObjectField(capability, JSPromiseCapability::kResolveOffset, resolve);
- StoreObjectField(capability, JSPromiseCapability::kRejectOffset, reject);
+ StoreObjectField(capability, PromiseCapability::kResolveOffset, resolve);
+ StoreObjectField(capability, PromiseCapability::kRejectOffset, reject);
Return(UndefinedConstant());
@@ -1547,13 +1540,13 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
// 4. Perform ? Call(promiseCapability.[[Reject]], undefined, « r »).
Node* const reject =
- LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
+ LoadObjectField(capability, PromiseCapability::kRejectOffset);
Callable call_callable = CodeFactory::Call(isolate());
CallJS(call_callable, context, reject, UndefinedConstant(), reason);
// 5. Return promiseCapability.[[Promise]].
Node* const promise =
- LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ LoadObjectField(capability, PromiseCapability::kPromiseOffset);
Return(promise);
}
}
@@ -1569,11 +1562,13 @@ TF_BUILTIN(InternalPromiseReject, PromiseBuiltinsAssembler) {
}
std::pair<Node*, Node*> PromiseBuiltinsAssembler::CreatePromiseFinallyFunctions(
- Node* on_finally, Node* native_context) {
+ Node* on_finally, Node* constructor, Node* native_context) {
Node* const promise_context =
CreatePromiseContext(native_context, kPromiseFinallyContextLength);
StoreContextElementNoWriteBarrier(promise_context, kOnFinallySlot,
on_finally);
+ StoreContextElementNoWriteBarrier(promise_context, kConstructorSlot,
+ constructor);
Node* const map = LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
Node* const then_finally_info = LoadContextElement(
@@ -1614,30 +1609,37 @@ TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) {
Node* const value = Parameter(Descriptor::kValue);
Node* const context = Parameter(Descriptor::kContext);
+ // 1. Let onFinally be F.[[OnFinally]].
Node* const on_finally = LoadContextElement(context, kOnFinallySlot);
- // 2.a Let result be ? Call(onFinally, undefined).
+ // 2. Assert: IsCallable(onFinally) is true.
+ CSA_ASSERT(this, IsCallable(on_finally));
+
+ // 3. Let result be ? Call(onFinally).
Callable call_callable = CodeFactory::Call(isolate());
- Node* result =
+ Node* const result =
CallJS(call_callable, context, on_finally, UndefinedConstant());
- // 2.b Let promise be ! PromiseResolve( %Promise%, result).
- Node* const promise = AllocateAndInitJSPromise(context);
- InternalResolvePromise(context, promise, result);
+ // 4. Let C be F.[[Constructor]].
+ Node* const constructor = LoadContextElement(context, kConstructorSlot);
+
+ // 5. Assert: IsConstructor(C) is true.
+ CSA_ASSERT(this, IsConstructor(constructor));
- // 2.c Let valueThunk be equivalent to a function that returns value.
+ // 6. Let promise be ? PromiseResolve(C, result).
+ Node* const promise =
+ CallBuiltin(Builtins::kPromiseResolve, context, constructor, result);
+
+ // 7. Let valueThunk be equivalent to a function that returns value.
Node* native_context = LoadNativeContext(context);
Node* const value_thunk = CreateValueThunkFunction(value, native_context);
- // 2.d Let promiseCapability be ! NewPromiseCapability( %Promise%).
- Node* const promise_capability = AllocateAndInitJSPromise(context, promise);
-
- // 2.e Return PerformPromiseThen(promise, valueThunk, undefined,
- // promiseCapability).
- InternalPerformPromiseThen(context, promise, value_thunk, UndefinedConstant(),
- promise_capability, UndefinedConstant(),
- UndefinedConstant());
- Return(promise_capability);
+ // 8. Return ? Invoke(promise, "then", « valueThunk »).
+ Node* const promise_then =
+ GetProperty(context, promise, factory()->then_string());
+ Node* const result_promise = CallJS(call_callable, context,
+ promise_then, promise, value_thunk);
+ Return(result_promise);
}
TF_BUILTIN(PromiseThrowerFinally, PromiseBuiltinsAssembler) {
@@ -1668,30 +1670,37 @@ TF_BUILTIN(PromiseCatchFinally, PromiseBuiltinsAssembler) {
Node* const reason = Parameter(Descriptor::kReason);
Node* const context = Parameter(Descriptor::kContext);
+ // 1. Let onFinally be F.[[OnFinally]].
Node* const on_finally = LoadContextElement(context, kOnFinallySlot);
- // 2.a Let result be ? Call(onFinally, undefined).
+ // 2. Assert: IsCallable(onFinally) is true.
+ CSA_ASSERT(this, IsCallable(on_finally));
+
+ // 3. Let result be ? Call(onFinally).
Callable call_callable = CodeFactory::Call(isolate());
Node* result =
- CallJS(call_callable, context, on_finally, UndefinedConstant());
+ CallJS(call_callable, context, on_finally, UndefinedConstant());
- // 2.b Let promise be ! PromiseResolve( %Promise%, result).
- Node* const promise = AllocateAndInitJSPromise(context);
- InternalResolvePromise(context, promise, result);
+ // 4. Let C be F.[[Constructor]].
+ Node* const constructor = LoadContextElement(context, kConstructorSlot);
+
+ // 5. Assert: IsConstructor(C) is true.
+ CSA_ASSERT(this, IsConstructor(constructor));
- // 2.c Let thrower be equivalent to a function that throws reason.
+ // 6. Let promise be ? PromiseResolve(C, result).
+ Node* const promise =
+ CallBuiltin(Builtins::kPromiseResolve, context, constructor, result);
+
+ // 7. Let thrower be equivalent to a function that throws reason.
Node* native_context = LoadNativeContext(context);
Node* const thrower = CreateThrowerFunction(reason, native_context);
- // 2.d Let promiseCapability be ! NewPromiseCapability( %Promise%).
- Node* const promise_capability = AllocateAndInitJSPromise(context, promise);
-
- // 2.e Return PerformPromiseThen(promise, thrower, undefined,
- // promiseCapability).
- InternalPerformPromiseThen(context, promise, thrower, UndefinedConstant(),
- promise_capability, UndefinedConstant(),
- UndefinedConstant());
- Return(promise_capability);
+ // 8. Return ? Invoke(promise, "then", « thrower »).
+ Node* const promise_then =
+ GetProperty(context, promise, factory()->then_string());
+ Node* const result_promise = CallJS(call_callable, context,
+ promise_then, promise, thrower);
+ Return(result_promise);
}
TF_BUILTIN(PromiseFinally, PromiseBuiltinsAssembler) {
@@ -1706,26 +1715,43 @@ TF_BUILTIN(PromiseFinally, PromiseBuiltinsAssembler) {
ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE,
"Promise.prototype.finally");
+ // 3. Let C be ? SpeciesConstructor(promise, %Promise%).
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ Node* const constructor = SpeciesConstructor(context, promise, promise_fun);
+
+ // 4. Assert: IsConstructor(C) is true.
+ CSA_ASSERT(this, IsConstructor(constructor));
+
VARIABLE(var_then_finally, MachineRepresentation::kTagged);
VARIABLE(var_catch_finally, MachineRepresentation::kTagged);
Label if_notcallable(this, Label::kDeferred), perform_finally(this);
- // 3. Let thenFinally be ! CreateThenFinally(onFinally).
- // 4. Let catchFinally be ! CreateCatchFinally(onFinally).
GotoIf(TaggedIsSmi(on_finally), &if_notcallable);
- Node* const on_finally_map = LoadMap(on_finally);
- GotoIfNot(IsCallableMap(on_finally_map), &if_notcallable);
-
- Node* const native_context = LoadNativeContext(context);
+ GotoIfNot(IsCallable(on_finally), &if_notcallable);
+
+ // 6. Else,
+ // a. Let thenFinally be a new built-in function object as defined
+ // in ThenFinally Function.
+ // b. Let catchFinally be a new built-in function object as
+ // defined in CatchFinally Function.
+ // c. Set thenFinally and catchFinally's [[Constructor]] internal
+ // slots to C.
+ // d. Set thenFinally and catchFinally's [[OnFinally]] internal
+ // slots to onFinally.
Node* then_finally = nullptr;
Node* catch_finally = nullptr;
std::tie(then_finally, catch_finally) =
- CreatePromiseFinallyFunctions(on_finally, native_context);
+ CreatePromiseFinallyFunctions(on_finally, constructor, native_context);
var_then_finally.Bind(then_finally);
var_catch_finally.Bind(catch_finally);
Goto(&perform_finally);
+ // 5. If IsCallable(onFinally) is not true,
+ // a. Let thenFinally be onFinally.
+ // b. Let catchFinally be onFinally.
BIND(&if_notcallable);
{
var_then_finally.Bind(on_finally);
@@ -1733,32 +1759,14 @@ TF_BUILTIN(PromiseFinally, PromiseBuiltinsAssembler) {
Goto(&perform_finally);
}
- // 5. Return PerformPromiseThen(promise, valueThunk, undefined,
- // promiseCapability).
+ // 7. Return ? Invoke(promise, "then", « thenFinally, catchFinally »).
BIND(&perform_finally);
- Label if_nativepromise(this), if_custompromise(this, Label::kDeferred);
- BranchIfFastPath(context, promise, &if_nativepromise, &if_custompromise);
-
- BIND(&if_nativepromise);
- {
- Node* deferred_promise = AllocateAndInitJSPromise(context, promise);
- InternalPerformPromiseThen(context, promise, var_then_finally.value(),
- var_catch_finally.value(), deferred_promise,
- UndefinedConstant(), UndefinedConstant());
- Return(deferred_promise);
- }
-
- BIND(&if_custompromise);
- {
- Node* const then =
- GetProperty(context, promise, isolate()->factory()->then_string());
- Callable call_callable = CodeFactory::Call(isolate());
- // 5. Return ? Invoke(promise, "then", « thenFinally, catchFinally »).
- Node* const result =
- CallJS(call_callable, context, then, promise, var_then_finally.value(),
- var_catch_finally.value());
- Return(result);
- }
+ Node* const promise_then =
+ GetProperty(context, promise, factory()->then_string());
+ Node* const result_promise =
+ CallJS(CodeFactory::Call(isolate()), context, promise_then, promise,
+ var_then_finally.value(), var_catch_finally.value());
+ Return(result_promise);
}
TF_BUILTIN(ResolveNativePromise, PromiseBuiltinsAssembler) {
@@ -1810,7 +1818,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
// instead, recurse outwards.
SetForwardingHandlerIfTrue(
context, instrumenting,
- LoadObjectField(capability, JSPromiseCapability::kRejectOffset));
+ LoadObjectField(capability, PromiseCapability::kRejectOffset));
Node* const native_context = LoadNativeContext(context);
Node* const array_map = LoadContextElement(
@@ -1899,14 +1907,14 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
Node* const then_call = CallJS(
CodeFactory::Call(isolate()), context, then, next_promise, resolve,
- LoadObjectField(capability, JSPromiseCapability::kRejectOffset));
+ LoadObjectField(capability, PromiseCapability::kRejectOffset));
GotoIfException(then_call, &close_iterator, var_exception);
// For catch prediction, mark that rejections here are semantically
// handled by the combined Promise.
SetPromiseHandledByIfTrue(context, instrumenting, then_call, [=]() {
// Load promiseCapability.[[Promise]]
- return LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ return LoadObjectField(capability, PromiseCapability::kPromiseOffset);
});
// Set index to index + 1
@@ -1939,7 +1947,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
BIND(&resolve_promise);
Node* const resolve =
- LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
+ LoadObjectField(capability, PromiseCapability::kResolveOffset);
Node* const resolve_call =
CallJS(CodeFactory::Call(isolate()), context, resolve,
UndefinedConstant(), values_array);
@@ -1951,7 +1959,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
}
Node* const promise =
- LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ LoadObjectField(capability, PromiseCapability::kPromiseOffset);
return promise;
}
@@ -2022,13 +2030,13 @@ TF_BUILTIN(PromiseAll, PromiseBuiltinsAssembler) {
// Exception must be bound to a JS value.
CSA_SLOW_ASSERT(this, IsNotTheHole(var_exception.value()));
Node* const reject =
- LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
+ LoadObjectField(capability, PromiseCapability::kRejectOffset);
Callable callable = CodeFactory::Call(isolate());
CallJS(callable, context, reject, UndefinedConstant(),
var_exception.value());
Node* const promise =
- LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ LoadObjectField(capability, PromiseCapability::kPromiseOffset);
Return(promise);
}
}
@@ -2099,7 +2107,7 @@ TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
Node* const capability =
LoadContextElement(context, kPromiseAllResolveElementCapabilitySlot);
Node* const resolve =
- LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
+ LoadObjectField(capability, PromiseCapability::kResolveOffset);
CallJS(CodeFactory::Call(isolate()), context, resolve, UndefinedConstant(),
values_array);
Return(UndefinedConstant());
@@ -2126,9 +2134,9 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
Node* const capability = NewPromiseCapability(context, receiver, debug_event);
Node* const resolve =
- LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
+ LoadObjectField(capability, PromiseCapability::kResolveOffset);
Node* const reject =
- LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
+ LoadObjectField(capability, PromiseCapability::kRejectOffset);
Node* const instrumenting = IsDebugActive();
@@ -2193,13 +2201,13 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
// handled by the combined Promise.
SetPromiseHandledByIfTrue(context, instrumenting, then_call, [=]() {
// Load promiseCapability.[[Promise]]
- return LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ return LoadObjectField(capability, PromiseCapability::kPromiseOffset);
});
Goto(&loop);
}
BIND(&break_loop);
- Return(LoadObjectField(capability, JSPromiseCapability::kPromiseOffset));
+ Return(LoadObjectField(capability, PromiseCapability::kPromiseOffset));
}
BIND(&close_iterator);
@@ -2212,13 +2220,13 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
BIND(&reject_promise);
{
Node* const reject =
- LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
+ LoadObjectField(capability, PromiseCapability::kRejectOffset);
Callable callable = CodeFactory::Call(isolate());
CallJS(callable, context, reject, UndefinedConstant(),
var_exception.value());
Node* const promise =
- LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ LoadObjectField(capability, PromiseCapability::kPromiseOffset);
Return(promise);
}
}
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index 5240da182d..c2cadecfd2 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -57,11 +57,11 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
// This is used by the Promise.prototype.finally builtin to store
// onFinally callback and the Promise constructor.
- // TODO(gsathya): Add extra slot for Promise constructor.
// TODO(gsathya): For native promises we can create a variant of
// this without extra space for the constructor to save memory.
enum PromiseFinallyContextSlot {
kOnFinallySlot = Context::MIN_CONTEXT_SLOTS,
+ kConstructorSlot,
kPromiseFinallyContextLength,
};
@@ -156,6 +156,7 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
void InternalPromiseReject(Node* context, Node* promise, Node* value,
Node* debug_event);
std::pair<Node*, Node*> CreatePromiseFinallyFunctions(Node* on_finally,
+ Node* constructor,
Node* native_context);
Node* CreateValueThunkFunction(Node* value, Node* native_context);
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index d6f5697b38..29c5a4eaeb 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -75,18 +75,18 @@ Node* ProxiesCodeStubAssembler::AllocateProxy(Node* target, Node* handler,
Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
Node* context, CodeStubArguments& args, Node* argc, ParameterMode mode) {
- Node* array = nullptr;
- Node* elements = nullptr;
Node* native_context = LoadNativeContext(context);
Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
Node* argc_smi = ParameterToTagged(argc, mode);
- std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
- PACKED_ELEMENTS, array_map, argc_smi, nullptr, argc, INTPTR_PARAMETERS);
- VARIABLE(index, MachineType::PointerRepresentation());
- index.Bind(IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
+ Node* array = AllocateJSArray(PACKED_ELEMENTS, array_map, argc, argc_smi,
+ nullptr, mode);
+ Node* elements = LoadElements(array);
+
+ VARIABLE(index, MachineType::PointerRepresentation(),
+ IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
VariableList list({&index}, zone());
- args.ForEach(list, [this, elements, &index](Node* arg) {
+ args.ForEach(list, [=, &index](Node* arg) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, index.value(),
arg);
Increment(&index, kPointerSize);
@@ -322,6 +322,256 @@ TF_BUILTIN(ProxyHasProperty, ProxiesCodeStubAssembler) {
StringConstant("has"), proxy);
}
+TF_BUILTIN(ProxyGetProperty, ProxiesCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* proxy = Parameter(Descriptor::kProxy);
+ Node* name = Parameter(Descriptor::kName);
+ Node* receiver = Parameter(Descriptor::kReceiverValue);
+
+ CSA_ASSERT(this, IsJSProxy(proxy));
+
+ // 1. Assert: IsPropertyKey(P) is true.
+ CSA_ASSERT(this, TaggedIsNotSmi(name));
+ CSA_ASSERT(this, IsName(name));
+ CSA_ASSERT(this, Word32Equal(IsPrivateSymbol(name), Int32Constant(0)));
+
+ Label throw_proxy_handler_revoked(this, Label::kDeferred),
+ trap_undefined(this);
+
+ // 2. Let handler be O.[[ProxyHandler]].
+ Node* handler = LoadObjectField(proxy, JSProxy::kHandlerOffset);
+
+ // 3. If handler is null, throw a TypeError exception.
+ GotoIf(IsNull(handler), &throw_proxy_handler_revoked);
+
+ // 4. Assert: Type(handler) is Object.
+ CSA_ASSERT(this, IsJSReceiver(handler));
+
+ // 5. Let target be O.[[ProxyTarget]].
+ Node* target = LoadObjectField(proxy, JSProxy::kTargetOffset);
+
+ // 6. Let trap be ? GetMethod(handler, "get").
+ // 7. If trap is undefined, then (see 7.a below).
+ Handle<Name> trap_name = factory()->get_string();
+ Node* trap = GetMethod(context, handler, trap_name, &trap_undefined);
+
+ // 8. Let trapResult be ? Call(trap, handler, « target, P, Receiver »).
+ Node* trap_result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ context, trap, handler, target, name, receiver);
+
+ // 9. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ Label return_result(this);
+ CheckGetSetTrapResult(context, target, proxy, name, trap_result,
+ &return_result, JSProxy::kGet);
+
+ BIND(&return_result);
+ {
+ // 11. Return trapResult.
+ Return(trap_result);
+ }
+
+ BIND(&trap_undefined);
+ {
+ // 7.a. Return ? target.[[Get]](P, Receiver).
+ // TODO(mslekova): Introduce GetPropertyWithReceiver stub
+ Return(CallRuntime(Runtime::kGetPropertyWithReceiver, context, target, name,
+ receiver));
+ }
+
+ BIND(&throw_proxy_handler_revoked);
+ ThrowTypeError(context, MessageTemplate::kProxyRevoked, "get");
+}
+
+TF_BUILTIN(ProxySetProperty, ProxiesCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* proxy = Parameter(Descriptor::kProxy);
+ Node* name = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* receiver = Parameter(Descriptor::kReceiverValue);
+ Node* language_mode = Parameter(Descriptor::kLanguageMode);
+
+ CSA_ASSERT(this, IsJSProxy(proxy));
+
+ // 1. Assert: IsPropertyKey(P) is true.
+ CSA_ASSERT(this, TaggedIsNotSmi(name));
+ CSA_ASSERT(this, IsName(name));
+
+ Label throw_proxy_handler_revoked(this, Label::kDeferred),
+ trap_undefined(this), failure(this, Label::kDeferred),
+ continue_checks(this), success(this),
+ private_symbol(this, Label::kDeferred);
+
+ GotoIf(IsPrivateSymbol(name), &private_symbol);
+
+ // 2. Let handler be O.[[ProxyHandler]].
+ Node* handler = LoadObjectField(proxy, JSProxy::kHandlerOffset);
+
+ // 3. If handler is null, throw a TypeError exception.
+ GotoIfNot(IsJSReceiver(handler), &throw_proxy_handler_revoked);
+
+ // 4. Assert: Type(handler) is Object.
+ CSA_ASSERT(this, IsJSReceiver(handler));
+
+ // 5. Let target be O.[[ProxyTarget]].
+ Node* target = LoadObjectField(proxy, JSProxy::kTargetOffset);
+
+ // 6. Let trap be ? GetMethod(handler, "set").
+ // 7. If trap is undefined, then (see 7.a below).
+ Handle<Name> set_string = factory()->set_string();
+ Node* trap = GetMethod(context, handler, set_string, &trap_undefined);
+
+ // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler,
+ // « target, P, V, Receiver »)).
+ // 9. If booleanTrapResult is false, return false.
+ BranchIfToBooleanIsTrue(
+ CallJS(CodeFactory::Call(isolate(),
+ ConvertReceiverMode::kNotNullOrUndefined),
+ context, trap, handler, target, name, value, receiver),
+ &continue_checks, &failure);
+
+ BIND(&continue_checks);
+ {
+ // 9. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ Label return_result(this);
+ CheckGetSetTrapResult(context, target, proxy, name, value, &success,
+ JSProxy::kSet);
+ }
+
+ BIND(&failure);
+ {
+ Label if_throw(this, Label::kDeferred);
+ Branch(SmiEqual(language_mode, SmiConstant(STRICT)), &if_throw, &success);
+
+ BIND(&if_throw);
+ ThrowTypeError(context, MessageTemplate::kProxyTrapReturnedFalsishFor,
+ HeapConstant(set_string), name);
+ }
+
+ // 12. Return true.
+ BIND(&success);
+ Return(value);
+
+ BIND(&private_symbol);
+ {
+ Label failure(this), throw_error(this, Label::kDeferred);
+
+ Branch(SmiEqual(language_mode, SmiConstant(STRICT)), &throw_error,
+ &failure);
+
+ BIND(&failure);
+ Return(UndefinedConstant());
+
+ BIND(&throw_error);
+ ThrowTypeError(context, MessageTemplate::kProxyPrivate);
+ }
+
+ BIND(&trap_undefined);
+ {
+ // 7.a. Return ? target.[[Set]](P, V, Receiver).
+ CallRuntime(Runtime::kSetPropertyWithReceiver, context, target, name, value,
+ receiver, language_mode);
+ Return(value);
+ }
+
+ BIND(&throw_proxy_handler_revoked);
+ ThrowTypeError(context, MessageTemplate::kProxyRevoked, "set");
+}
+
+void ProxiesCodeStubAssembler::CheckGetSetTrapResult(
+ Node* context, Node* target, Node* proxy, Node* name, Node* trap_result,
+ Label* check_passed, JSProxy::AccessKind access_kind) {
+ Node* map = LoadMap(target);
+ VARIABLE(var_value, MachineRepresentation::kTagged);
+ VARIABLE(var_details, MachineRepresentation::kWord32);
+ VARIABLE(var_raw_value, MachineRepresentation::kTagged);
+
+ Label if_found_value(this), check_in_runtime(this, Label::kDeferred);
+
+ Node* instance_type = LoadInstanceType(target);
+ TryGetOwnProperty(context, target, target, map, instance_type, name,
+ &if_found_value, &var_value, &var_details, &var_raw_value,
+ check_passed, &check_in_runtime, kReturnAccessorPair);
+
+ BIND(&if_found_value);
+ {
+ Label throw_non_configurable_data(this, Label::kDeferred),
+ throw_non_configurable_accessor(this, Label::kDeferred),
+ check_accessor(this), check_data(this);
+
+ // If targetDesc is not undefined and targetDesc.[[Configurable]] is
+ // false, then:
+ GotoIfNot(IsSetWord32(var_details.value(),
+ PropertyDetails::kAttributesDontDeleteMask),
+ check_passed);
+
+ // If IsDataDescriptor(targetDesc) is true and
+ // targetDesc.[[Writable]] is false, then:
+ BranchIfAccessorPair(var_raw_value.value(), &check_accessor, &check_data);
+
+ BIND(&check_data);
+ {
+ Node* read_only = IsSetWord32(var_details.value(),
+ PropertyDetails::kAttributesReadOnlyMask);
+ GotoIfNot(read_only, check_passed);
+
+ // If SameValue(trapResult, targetDesc.[[Value]]) is false,
+ // throw a TypeError exception.
+ BranchIfSameValue(trap_result, var_value.value(), check_passed,
+ &throw_non_configurable_data);
+ }
+
+ BIND(&check_accessor);
+ {
+ Node* accessor_pair = var_raw_value.value();
+
+ if (access_kind == JSProxy::kGet) {
+ Label continue_check(this, Label::kDeferred);
+ // 10.b. If IsAccessorDescriptor(targetDesc) is true and
+ // targetDesc.[[Get]] is undefined, then:
+ Node* getter =
+ LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
+ // Here we check for null as well because if the getter was never
+ // defined it's set as null.
+ GotoIf(IsUndefined(getter), &continue_check);
+ GotoIf(IsNull(getter), &continue_check);
+ Goto(check_passed);
+
+ // 10.b.i. If trapResult is not undefined, throw a TypeError exception.
+ BIND(&continue_check);
+ GotoIfNot(IsUndefined(trap_result), &throw_non_configurable_accessor);
+ } else {
+ // 11.b.i. If targetDesc.[[Set]] is undefined, throw a TypeError
+ // exception.
+ Node* setter =
+ LoadObjectField(accessor_pair, AccessorPair::kSetterOffset);
+ GotoIf(IsUndefined(setter), &throw_non_configurable_accessor);
+ GotoIf(IsNull(setter), &throw_non_configurable_accessor);
+ }
+ Goto(check_passed);
+ }
+
+ BIND(&check_in_runtime);
+ {
+ CallRuntime(Runtime::kCheckProxyGetSetTrapResult, context, name, target,
+ trap_result, SmiConstant(access_kind));
+ Return(trap_result);
+ }
+
+ BIND(&throw_non_configurable_data);
+ {
+ ThrowTypeError(context, MessageTemplate::kProxyGetNonConfigurableData,
+ name, var_value.value(), trap_result);
+ }
+
+ BIND(&throw_non_configurable_accessor);
+ {
+ ThrowTypeError(context, MessageTemplate::kProxyGetNonConfigurableAccessor,
+ name, trap_result);
+ }
+ }
+}
+
void ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target,
Node* proxy, Node* name,
Label* check_passed,
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.h b/deps/v8/src/builtins/builtins-proxy-gen.h
index 34ef73debf..67203ee4d9 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.h
+++ b/deps/v8/src/builtins/builtins-proxy-gen.h
@@ -16,6 +16,22 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler {
explicit ProxiesCodeStubAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
+ void BranchIfAccessorPair(Node* value, Label* if_accessor_pair,
+ Label* if_not_accessor_pair) {
+ GotoIf(TaggedIsSmi(value), if_not_accessor_pair);
+ Branch(IsAccessorPair(value), if_accessor_pair, if_not_accessor_pair);
+ }
+
+ // ES6 section 9.5.8 [[Get]] ( P, Receiver )
+ // name should not be an index.
+ Node* ProxyGetProperty(Node* context, Node* proxy, Node* name,
+ Node* receiver);
+
+ // ES6 section 9.5.9 [[Set]] ( P, V, Receiver )
+ // name should not be an index.
+ Node* ProxySetProperty(Node* context, Node* proxy, Node* name, Node* value,
+ Node* receiver);
+
protected:
void GotoIfRevokedProxy(Node* object, Label* if_proxy_revoked);
Node* AllocateProxy(Node* target, Node* handler, Node* context);
@@ -24,6 +40,10 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler {
ParameterMode mode);
void CheckHasTrapResult(Node* context, Node* target, Node* proxy, Node* name,
Label* check_passed, Label* if_bailout);
+
+ void CheckGetSetTrapResult(Node* context, Node* target, Node* proxy,
+ Node* name, Node* trap_result, Label* if_not_found,
+ JSProxy::AccessKind access_kind);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-proxy-helpers-gen.cc b/deps/v8/src/builtins/builtins-proxy-helpers-gen.cc
deleted file mode 100644
index 06345d44f4..0000000000
--- a/deps/v8/src/builtins/builtins-proxy-helpers-gen.cc
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/builtins/builtins-proxy-helpers-gen.h"
-#include "src/builtins/builtins-utils-gen.h"
-
-namespace v8 {
-namespace internal {
-TF_BUILTIN(ProxyGetProperty, ProxyAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* proxy = Parameter(Descriptor::kProxy);
- Node* name = Parameter(Descriptor::kName);
- Node* receiver = Parameter(Descriptor::kReceiverValue);
-
- CSA_ASSERT(this, IsJSProxy(proxy));
-
- // 1. Assert: IsPropertyKey(P) is true.
- CSA_ASSERT(this, IsName(name));
-
- Label throw_proxy_handler_revoked(this, Label::kDeferred),
- trap_undefined(this), no_target_desc(this, Label::kDeferred),
- trap_not_callable(this, Label::kDeferred);
-
- // 2. Let handler be O.[[ProxyHandler]].
- Node* handler = LoadObjectField(proxy, JSProxy::kHandlerOffset);
-
- // 3. If handler is null, throw a TypeError exception.
- GotoIf(IsNull(handler), &throw_proxy_handler_revoked);
-
- // 4. Assert: Type(handler) is Object.
- CSA_ASSERT(this, IsJSReceiver(handler));
-
- // 5. Let target be O.[[ProxyTarget]].
- Node* target = LoadObjectField(proxy, JSProxy::kTargetOffset);
-
- // 6. Let trap be ? GetMethod(handler, "get").
- // 7. If trap is undefined, then (see 7.a below).
- Handle<Name> trap_name = factory()->get_string();
- Node* trap = GetMethod(context, handler, trap_name, &trap_undefined);
-
- GotoIf(TaggedIsSmi(trap), &trap_not_callable);
- GotoIfNot(IsCallable(trap), &trap_not_callable);
-
- // 8. Let trapResult be ? Call(trap, handler, « target, P, Receiver »).
- Node* trap_result = CallJS(CodeFactory::Call(isolate()), context, trap,
- handler, target, name, receiver);
- // 9. Let targetDesc be ? target.[[GetOwnProperty]](P).
- Label return_result(this);
- CheckGetTrapResult(context, target, proxy, name, trap_result, &return_result,
- &no_target_desc);
-
- BIND(&return_result);
- {
- // 11. Return trapResult.
- Return(trap_result);
- }
-
- BIND(&no_target_desc);
- {
- CSA_ASSERT(this, IsJSReceiver(target));
- CallRuntime(Runtime::kCheckProxyGetTrapResult, context, name, target,
- trap_result);
- Return(trap_result);
- }
-
- BIND(&trap_undefined);
- {
- // 7.a. Return ? target.[[Get]](P, Receiver).
- Return(CallRuntime(Runtime::kGetPropertyWithReceiver, context, target, name,
- receiver));
- }
-
- BIND(&throw_proxy_handler_revoked);
- { ThrowTypeError(context, MessageTemplate::kProxyRevoked, "get"); }
-
- BIND(&trap_not_callable);
- {
- ThrowTypeError(context, MessageTemplate::kPropertyNotFunction, trap,
- StringConstant("get"), receiver);
- }
-}
-
-void ProxyAssembler::CheckGetTrapResult(Node* context, Node* target,
- Node* proxy, Node* name,
- Node* trap_result, Label* check_passed,
- Label* if_bailout) {
- Node* map = LoadMap(target);
- VARIABLE(var_value, MachineRepresentation::kTagged);
- VARIABLE(var_details, MachineRepresentation::kWord32);
- VARIABLE(var_raw_value, MachineRepresentation::kTagged);
-
- Label if_found_value(this, Label::kDeferred);
-
- Node* instance_type = LoadInstanceType(target);
- TryGetOwnProperty(context, proxy, target, map, instance_type, name,
- &if_found_value, &var_value, &var_details, &var_raw_value,
- check_passed, if_bailout, kReturnAccessorPair);
-
- BIND(&if_found_value);
- {
- Label throw_non_configurable_data(this, Label::kDeferred),
- throw_non_configurable_accessor(this, Label::kDeferred),
- check_accessor(this), check_data(this);
-
- // 10. If targetDesc is not undefined and targetDesc.[[Configurable]] is
- // false, then:
- GotoIfNot(IsSetWord32(var_details.value(),
- PropertyDetails::kAttributesDontDeleteMask),
- check_passed);
-
- // 10.a. If IsDataDescriptor(targetDesc) is true and
- // targetDesc.[[Writable]] is false, then:
- BranchIfAccessorPair(var_raw_value.value(), &check_accessor, &check_data);
-
- BIND(&check_data);
- {
- Node* read_only = IsSetWord32(var_details.value(),
- PropertyDetails::kAttributesReadOnlyMask);
- GotoIfNot(read_only, check_passed);
-
- // 10.a.i. If SameValue(trapResult, targetDesc.[[Value]]) is false,
- // throw a TypeError exception.
- GotoIfNot(SameValue(trap_result, var_value.value()),
- &throw_non_configurable_data);
- Goto(check_passed);
- }
-
- BIND(&check_accessor);
- {
- // 10.b. If IsAccessorDescriptor(targetDesc) is true and
- // targetDesc.[[Get]] is undefined, then:
- Node* accessor_pair = var_raw_value.value();
- Node* getter =
- LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
-
- // Here we check for null as well because if the getter was never
- // defined it's set as null.
- GotoIfNot(Word32Or(IsUndefined(getter), IsNull(getter)), check_passed);
-
- // 10.b.i. If trapResult is not undefined, throw a TypeError exception.
- GotoIfNot(IsUndefined(trap_result), &throw_non_configurable_accessor);
- Goto(check_passed);
- }
-
- BIND(&throw_non_configurable_data);
- {
- ThrowTypeError(context, MessageTemplate::kProxyGetNonConfigurableData,
- name, var_value.value(), trap_result);
- }
-
- BIND(&throw_non_configurable_accessor);
- {
- ThrowTypeError(context, MessageTemplate::kProxyGetNonConfigurableAccessor,
- name, trap_result);
- }
- }
-}
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-proxy-helpers-gen.h b/deps/v8/src/builtins/builtins-proxy-helpers-gen.h
deleted file mode 100644
index a72abbdd4a..0000000000
--- a/deps/v8/src/builtins/builtins-proxy-helpers-gen.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_BUILTINS_BUILTINS_PROXY_HELPERS_GEN_H_
-#define V8_BUILTINS_BUILTINS_PROXY_HELPERS_GEN_H_
-
-#include "src/code-stub-assembler.h"
-
-namespace v8 {
-namespace internal {
-using compiler::Node;
-
-class ProxyAssembler : public CodeStubAssembler {
- public:
- explicit ProxyAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
-
- void BranchIfAccessorPair(Node* value, Label* if_accessor_pair,
- Label* if_not_accessor_pair) {
- GotoIf(TaggedIsSmi(value), if_not_accessor_pair);
- Branch(IsAccessorPair(value), if_accessor_pair, if_not_accessor_pair);
- }
-
- // ES6 section 9.5.8 [[Get]] ( P, Receiver )
- Node* ProxyGetProperty(Node* context, Node* proxy, Node* name,
- Node* receiver);
-
- protected:
- void CheckGetTrapResult(Node* context, Node* target, Node* proxy, Node* name,
- Node* trap_result, Label* if_not_found,
- Label* if_bailout);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_BUILTINS_BUILTINS_PROXY_HELPERS_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index f761cf8928..8a760d0efa 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -9,6 +9,8 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
+#include "src/counters.h"
+#include "src/factory-inl.h"
#include "src/objects/regexp-match-info.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -1704,7 +1706,7 @@ Node* RegExpBuiltinsAssembler::RegExpExec(Node* context, Node* regexp,
GotoIf(WordEqual(result, NullConstant()), &out);
ThrowIfNotJSReceiver(context, result,
- MessageTemplate::kInvalidRegExpExecResult, "unused");
+ MessageTemplate::kInvalidRegExpExecResult, "");
Goto(&out);
}
@@ -1904,7 +1906,7 @@ class GrowableFixedArray {
Node* const result_length = a->SmiTag(length());
Node* const result = a->AllocateUninitializedJSArrayWithoutElements(
- kind, array_map, result_length, nullptr);
+ array_map, result_length, nullptr);
// Note: We do not currently shrink the fixed array.
@@ -2206,9 +2208,10 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow(
// Ensure last index is 0.
{
- Label next(this);
- GotoIf(SameValue(previous_last_index, smi_zero), &next);
+ Label next(this), slow(this, Label::kDeferred);
+ BranchIfSameValue(previous_last_index, smi_zero, &next, &slow);
+ BIND(&slow);
SlowStoreLastIndex(context, regexp, smi_zero);
Goto(&next);
BIND(&next);
@@ -2219,14 +2222,14 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow(
// Reset last index if necessary.
{
- Label next(this);
+ Label next(this), slow(this, Label::kDeferred);
Node* const current_last_index = SlowLoadLastIndex(context, regexp);
- GotoIf(SameValue(current_last_index, previous_last_index), &next);
+ BranchIfSameValue(current_last_index, previous_last_index, &next, &slow);
+ BIND(&slow);
SlowStoreLastIndex(context, regexp, previous_last_index);
Goto(&next);
-
BIND(&next);
}
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 2f8d2f5835..8d407b35e6 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -8,6 +8,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/factory-inl.h"
#include "src/objects.h"
namespace v8 {
@@ -1187,6 +1188,116 @@ compiler::Node* StringBuiltinsAssembler::GetSubstitution(
return var_result.value();
}
+// ES6 #sec-string.prototype.repeat
+TF_BUILTIN(StringPrototypeRepeat, StringBuiltinsAssembler) {
+ Label invalid_count(this), invalid_string_length(this),
+ return_emptystring(this);
+
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const count = Parameter(Descriptor::kCount);
+ Node* const string =
+ ToThisString(context, receiver, "String.prototype.repeat");
+ Node* const is_stringempty =
+ SmiEqual(LoadStringLength(string), SmiConstant(0));
+
+ VARIABLE(var_count, MachineRepresentation::kTagged,
+ ToInteger(context, count, CodeStubAssembler::kTruncateMinusZero));
+
+ // Verifies a valid count and takes a fast path when the result will be an
+ // empty string.
+ {
+ Label next(this), if_count_isheapnumber(this, Label::kDeferred);
+
+ GotoIfNot(TaggedIsSmi(var_count.value()), &if_count_isheapnumber);
+
+ // If count is a SMI, throw a RangeError if less than 0 or greater than
+ // the maximum string length.
+ {
+ GotoIf(SmiLessThan(var_count.value(), SmiConstant(0)), &invalid_count);
+ GotoIf(SmiEqual(var_count.value(), SmiConstant(0)), &return_emptystring);
+ GotoIf(is_stringempty, &return_emptystring);
+ GotoIf(SmiGreaterThan(var_count.value(), SmiConstant(String::kMaxLength)),
+ &invalid_string_length);
+ Goto(&next);
+ }
+
+ // If count is a Heap Number...
+ // 1) If count is Infinity, throw a RangeError exception
+ // 2) If receiver is an empty string, return an empty string
+ // 3) Otherwise, throw RangeError exception
+ BIND(&if_count_isheapnumber);
+ {
+ CSA_ASSERT(this, IsNumberNormalized(var_count.value()));
+ Node* const number_value = LoadHeapNumberValue(var_count.value());
+ GotoIf(Float64Equal(number_value, Float64Constant(V8_INFINITY)),
+ &invalid_count);
+ GotoIf(Float64LessThan(number_value, Float64Constant(0.0)),
+ &invalid_count);
+ Branch(is_stringempty, &return_emptystring, &invalid_string_length);
+ }
+ BIND(&next);
+ }
+
+ // The receiver is repeated with the following algorithm:
+ // let n = count;
+ // let power_of_two_repeats = receiver;
+ // let result = "";
+ // while (true) {
+ // if (n & 1) result += s;
+ // n >>= 1;
+ // if (n === 0) return result;
+ // power_of_two_repeats += power_of_two_repeats;
+ // }
+ {
+ VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant());
+ VARIABLE(var_temp, MachineRepresentation::kTagged, string);
+
+ Callable stringadd_callable =
+ CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+
+ Label loop(this, {&var_count, &var_result, &var_temp}), return_result(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ {
+ Label next(this);
+ GotoIfNot(SmiToWord32(SmiAnd(var_count.value(), SmiConstant(1))),
+ &next);
+ var_result.Bind(CallStub(stringadd_callable, context,
+ var_result.value(), var_temp.value()));
+ Goto(&next);
+ BIND(&next);
+ }
+
+ var_count.Bind(SmiShr(var_count.value(), 1));
+ GotoIf(SmiEqual(var_count.value(), SmiConstant(0)), &return_result);
+ var_temp.Bind(CallStub(stringadd_callable, context, var_temp.value(),
+ var_temp.value()));
+ Goto(&loop);
+ }
+
+ BIND(&return_result);
+ Return(var_result.value());
+ }
+
+ BIND(&return_emptystring);
+ Return(EmptyStringConstant());
+
+ BIND(&invalid_count);
+ {
+ CallRuntime(Runtime::kThrowRangeError, context,
+ SmiConstant(MessageTemplate::kInvalidCountValue),
+ var_count.value());
+ Unreachable();
+ }
+ BIND(&invalid_string_length);
+ {
+ CallRuntime(Runtime::kThrowInvalidStringLength, context);
+ Unreachable();
+ }
+}
+
// ES6 #sec-string.prototype.replace
TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
Label out(this);
@@ -1722,6 +1833,167 @@ TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
}
}
+// ES6 #sec-string.prototype.trim
+TF_BUILTIN(StringPrototypeTrim, StringTrimAssembler) {
+ Generate(String::kTrim, "String.prototype.trim");
+}
+
+// Non-standard WebKit extension
+TF_BUILTIN(StringPrototypeTrimLeft, StringTrimAssembler) {
+ Generate(String::kTrimLeft, "String.prototype.trimLeft");
+}
+
+// Non-standard WebKit extension
+TF_BUILTIN(StringPrototypeTrimRight, StringTrimAssembler) {
+ Generate(String::kTrimRight, "String.prototype.trimRight");
+}
+
+void StringTrimAssembler::Generate(String::TrimMode mode,
+ const char* method_name) {
+ Label return_emptystring(this), if_runtime(this);
+
+ Node* const argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+ CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
+ Node* const receiver = arguments.GetReceiver();
+
+ // Check that {receiver} is coercible to Object and convert it to a String.
+ Node* const string = ToThisString(context, receiver, method_name);
+ Node* const string_length = SmiUntag(LoadStringLength(string));
+
+ ToDirectStringAssembler to_direct(state(), string);
+ to_direct.TryToDirect(&if_runtime);
+ Node* const string_data = to_direct.PointerToData(&if_runtime);
+ Node* const instance_type = to_direct.instance_type();
+ Node* const is_stringonebyte = IsOneByteStringInstanceType(instance_type);
+ Node* const string_data_offset = to_direct.offset();
+
+ VARIABLE(var_start, MachineType::PointerRepresentation(), IntPtrConstant(0));
+ VARIABLE(var_end, MachineType::PointerRepresentation(),
+ IntPtrSub(string_length, IntPtrConstant(1)));
+
+ if (mode == String::kTrimLeft || mode == String::kTrim) {
+ ScanForNonWhiteSpaceOrLineTerminator(string_data, string_data_offset,
+ is_stringonebyte, &var_start,
+ string_length, 1, &return_emptystring);
+ }
+ if (mode == String::kTrimRight || mode == String::kTrim) {
+ ScanForNonWhiteSpaceOrLineTerminator(
+ string_data, string_data_offset, is_stringonebyte, &var_end,
+ IntPtrConstant(-1), -1, &return_emptystring);
+ }
+
+ arguments.PopAndReturn(
+ SubString(context, string, SmiTag(var_start.value()),
+ SmiAdd(SmiTag(var_end.value()), SmiConstant(1)),
+ SubStringFlags::FROM_TO_ARE_BOUNDED));
+
+ BIND(&if_runtime);
+ arguments.PopAndReturn(CallRuntime(Runtime::kStringTrim, context, string,
+ SmiConstant(static_cast<int>(mode))));
+
+ BIND(&return_emptystring);
+ arguments.PopAndReturn(EmptyStringConstant());
+}
+
+void StringTrimAssembler::ScanForNonWhiteSpaceOrLineTerminator(
+ Node* const string_data, Node* const string_data_offset,
+ Node* const is_stringonebyte, Variable* const var_index, Node* const end,
+ int increment, Label* const if_none_found) {
+ Label if_stringisonebyte(this), out(this);
+
+ GotoIf(is_stringonebyte, &if_stringisonebyte);
+
+ // Two Byte String
+ BuildLoop(
+ var_index, end, increment, if_none_found, &out, [&](Node* const index) {
+ return Load(
+ MachineType::Uint16(), string_data,
+ WordShl(IntPtrAdd(index, string_data_offset), IntPtrConstant(1)));
+ });
+
+ BIND(&if_stringisonebyte);
+ BuildLoop(var_index, end, increment, if_none_found, &out,
+ [&](Node* const index) {
+ return Load(MachineType::Uint8(), string_data,
+ IntPtrAdd(index, string_data_offset));
+ });
+
+ BIND(&out);
+}
+
+void StringTrimAssembler::BuildLoop(Variable* const var_index, Node* const end,
+ int increment, Label* const if_none_found,
+ Label* const out,
+ std::function<Node*(Node*)> get_character) {
+ Label loop(this, var_index);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Node* const index = var_index->value();
+ GotoIf(IntPtrEqual(index, end), if_none_found);
+ GotoIfNotWhiteSpaceOrLineTerminator(
+ UncheckedCast<Uint32T>(get_character(index)), out);
+ Increment(var_index, increment);
+ Goto(&loop);
+ }
+}
+
+void StringTrimAssembler::GotoIfNotWhiteSpaceOrLineTerminator(
+ Node* const char_code, Label* const if_not_whitespace) {
+ Label out(this);
+
+ // 0x0020 - SPACE (Intentionally out of order to fast path a commmon case)
+ GotoIf(Word32Equal(char_code, Int32Constant(0x0020)), &out);
+
+ // 0x0009 - HORIZONTAL TAB
+ GotoIf(Uint32LessThan(char_code, Int32Constant(0x0009)), if_not_whitespace);
+ // 0x000A - LINE FEED OR NEW LINE
+ // 0x000B - VERTICAL TAB
+ // 0x000C - FORMFEED
+ // 0x000D - HORIZONTAL TAB
+ GotoIf(Uint32LessThanOrEqual(char_code, Int32Constant(0x000D)), &out);
+
+ // Common Non-whitespace characters
+ GotoIf(Uint32LessThan(char_code, Int32Constant(0x00A0)), if_not_whitespace);
+
+ // 0x00A0 - NO-BREAK SPACE
+ GotoIf(Word32Equal(char_code, Int32Constant(0x00A0)), &out);
+
+ // 0x1680 - Ogham Space Mark
+ GotoIf(Word32Equal(char_code, Int32Constant(0x1680)), &out);
+
+ // 0x2000 - EN QUAD
+ GotoIf(Uint32LessThan(char_code, Int32Constant(0x2000)), if_not_whitespace);
+ // 0x2001 - EM QUAD
+ // 0x2002 - EN SPACE
+ // 0x2003 - EM SPACE
+ // 0x2004 - THREE-PER-EM SPACE
+ // 0x2005 - FOUR-PER-EM SPACE
+ // 0x2006 - SIX-PER-EM SPACE
+ // 0x2007 - FIGURE SPACE
+ // 0x2008 - PUNCTUATION SPACE
+ // 0x2009 - THIN SPACE
+ // 0x200A - HAIR SPACE
+ GotoIf(Uint32LessThanOrEqual(char_code, Int32Constant(0x200A)), &out);
+
+ // 0x2028 - LINE SEPARATOR
+ GotoIf(Word32Equal(char_code, Int32Constant(0x2028)), &out);
+ // 0x2029 - PARAGRAPH SEPARATOR
+ GotoIf(Word32Equal(char_code, Int32Constant(0x2029)), &out);
+ // 0x202F - NARROW NO-BREAK SPACE
+ GotoIf(Word32Equal(char_code, Int32Constant(0x202F)), &out);
+ // 0x205F - MEDIUM MATHEMATICAL SPACE
+ GotoIf(Word32Equal(char_code, Int32Constant(0x205F)), &out);
+ // 0xFEFF - BYTE ORDER MARK
+ GotoIf(Word32Equal(char_code, Int32Constant(0xFEFF)), &out);
+ // 0x3000 - IDEOGRAPHIC SPACE
+ Branch(Word32Equal(char_code, Int32Constant(0x3000)), &out,
+ if_not_whitespace);
+
+ BIND(&out);
+}
+
// ES6 #sec-string.prototype.tostring
TF_BUILTIN(StringPrototypeToString, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
@@ -1885,5 +2157,166 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
}
}
+// -----------------------------------------------------------------------------
+// ES6 section B.2.3 Additional Properties of the String.prototype object
+
+class StringHtmlAssembler : public StringBuiltinsAssembler {
+ public:
+ explicit StringHtmlAssembler(compiler::CodeAssemblerState* state)
+ : StringBuiltinsAssembler(state) {}
+
+ protected:
+ void Generate(Node* const context, Node* const receiver,
+ const char* method_name, const char* tag_name) {
+ Node* const string = ToThisString(context, receiver, method_name);
+ std::string open_tag = "<" + std::string(tag_name) + ">";
+ std::string close_tag = "</" + std::string(tag_name) + ">";
+
+ Node* strings[] = {StringConstant(open_tag.c_str()), string,
+ StringConstant(close_tag.c_str())};
+ Return(ConcatStrings(context, strings, arraysize(strings)));
+ }
+
+ void GenerateWithAttribute(Node* const context, Node* const receiver,
+ const char* method_name, const char* tag_name,
+ const char* attr, Node* const value) {
+ Node* const string = ToThisString(context, receiver, method_name);
+ Node* const value_string =
+ EscapeQuotes(context, ToString_Inline(context, value));
+ std::string open_tag_attr =
+ "<" + std::string(tag_name) + " " + std::string(attr) + "=\"";
+ std::string close_tag = "</" + std::string(tag_name) + ">";
+
+ Node* strings[] = {StringConstant(open_tag_attr.c_str()), value_string,
+ StringConstant("\">"), string,
+ StringConstant(close_tag.c_str())};
+ Return(ConcatStrings(context, strings, arraysize(strings)));
+ }
+
+ Node* ConcatStrings(Node* const context, Node** strings, int len) {
+ VARIABLE(var_result, MachineRepresentation::kTagged, strings[0]);
+ for (int i = 1; i < len; i++) {
+ var_result.Bind(CallStub(CodeFactory::StringAdd(isolate()), context,
+ var_result.value(), strings[i]));
+ }
+ return var_result.value();
+ }
+
+ Node* EscapeQuotes(Node* const context, Node* const string) {
+ CSA_ASSERT(this, IsString(string));
+ Node* const regexp_function = LoadContextElement(
+ LoadNativeContext(context), Context::REGEXP_FUNCTION_INDEX);
+ Node* const initial_map = LoadObjectField(
+ regexp_function, JSFunction::kPrototypeOrInitialMapOffset);
+ // TODO(pwong): Refactor to not allocate RegExp
+ Node* const regexp =
+ CallRuntime(Runtime::kRegExpInitializeAndCompile, context,
+ AllocateJSObjectFromMap(initial_map), StringConstant("\""),
+ StringConstant("g"));
+
+ return CallRuntime(Runtime::kRegExpInternalReplace, context, regexp, string,
+ StringConstant("&quot;"));
+ }
+};
+
+// ES6 #sec-string.prototype.anchor
+TF_BUILTIN(StringPrototypeAnchor, StringHtmlAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const value = Parameter(Descriptor::kValue);
+ GenerateWithAttribute(context, receiver, "String.prototype.anchor", "a",
+ "name", value);
+}
+
+// ES6 #sec-string.prototype.big
+TF_BUILTIN(StringPrototypeBig, StringHtmlAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Generate(context, receiver, "String.prototype.big", "big");
+}
+
+// ES6 #sec-string.prototype.blink
+TF_BUILTIN(StringPrototypeBlink, StringHtmlAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Generate(context, receiver, "String.prototype.blink", "blink");
+}
+
+// ES6 #sec-string.prototype.bold
+TF_BUILTIN(StringPrototypeBold, StringHtmlAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Generate(context, receiver, "String.prototype.bold", "b");
+}
+
+// ES6 #sec-string.prototype.fontcolor
+TF_BUILTIN(StringPrototypeFontcolor, StringHtmlAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const value = Parameter(Descriptor::kValue);
+ GenerateWithAttribute(context, receiver, "String.prototype.fontcolor", "font",
+ "color", value);
+}
+
+// ES6 #sec-string.prototype.fontsize
+TF_BUILTIN(StringPrototypeFontsize, StringHtmlAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const value = Parameter(Descriptor::kValue);
+ GenerateWithAttribute(context, receiver, "String.prototype.fontsize", "font",
+ "size", value);
+}
+
+// ES6 #sec-string.prototype.fixed
+TF_BUILTIN(StringPrototypeFixed, StringHtmlAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Generate(context, receiver, "String.prototype.fixed", "tt");
+}
+
+// ES6 #sec-string.prototype.italics
+TF_BUILTIN(StringPrototypeItalics, StringHtmlAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Generate(context, receiver, "String.prototype.italics", "i");
+}
+
+// ES6 #sec-string.prototype.link
+TF_BUILTIN(StringPrototypeLink, StringHtmlAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const value = Parameter(Descriptor::kValue);
+ GenerateWithAttribute(context, receiver, "String.prototype.link", "a", "href",
+ value);
+}
+
+// ES6 #sec-string.prototype.small
+TF_BUILTIN(StringPrototypeSmall, StringHtmlAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Generate(context, receiver, "String.prototype.small", "small");
+}
+
+// ES6 #sec-string.prototype.strike
+TF_BUILTIN(StringPrototypeStrike, StringHtmlAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Generate(context, receiver, "String.prototype.strike", "strike");
+}
+
+// ES6 #sec-string.prototype.sub
+TF_BUILTIN(StringPrototypeSub, StringHtmlAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Generate(context, receiver, "String.prototype.sub", "sub");
+}
+
+// ES6 #sec-string.prototype.sup
+TF_BUILTIN(StringPrototypeSup, StringHtmlAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Generate(context, receiver, "String.prototype.sup", "sup");
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index 5c0b6f0eaa..c9af380270 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -104,6 +104,29 @@ class StringIncludesIndexOfAssembler : public StringBuiltinsAssembler {
void Generate(SearchVariant variant);
};
+class StringTrimAssembler : public StringBuiltinsAssembler {
+ public:
+ explicit StringTrimAssembler(compiler::CodeAssemblerState* state)
+ : StringBuiltinsAssembler(state) {}
+
+ void GotoIfNotWhiteSpaceOrLineTerminator(Node* const char_code,
+ Label* const if_not_whitespace);
+
+ protected:
+ void Generate(String::TrimMode mode, const char* method);
+
+ void ScanForNonWhiteSpaceOrLineTerminator(Node* const string_data,
+ Node* const string_data_offset,
+ Node* const is_stringonebyte,
+ Variable* const var_index,
+ Node* const end, int increment,
+ Label* const if_none_found);
+
+ void BuildLoop(Variable* const var_index, Node* const end, int increment,
+ Label* const if_none_found, Label* const out,
+ std::function<Node*(Node*)> get_character);
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index 47874aba96..4e3058c220 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -317,27 +317,6 @@ BUILTIN(StringPrototypeStartsWith) {
return isolate->heap()->true_value();
}
-// ES6 section 21.1.3.27 String.prototype.trim ()
-BUILTIN(StringPrototypeTrim) {
- HandleScope scope(isolate);
- TO_THIS_STRING(string, "String.prototype.trim");
- return *String::Trim(string, String::kTrim);
-}
-
-// Non-standard WebKit extension
-BUILTIN(StringPrototypeTrimLeft) {
- HandleScope scope(isolate);
- TO_THIS_STRING(string, "String.prototype.trimLeft");
- return *String::Trim(string, String::kTrimLeft);
-}
-
-// Non-standard WebKit extension
-BUILTIN(StringPrototypeTrimRight) {
- HandleScope scope(isolate);
- TO_THIS_STRING(string, "String.prototype.trimRight");
- return *String::Trim(string, String::kTrimRight);
-}
-
#ifndef V8_INTL_SUPPORT
namespace {
diff --git a/deps/v8/src/builtins/builtins-typedarray-gen.cc b/deps/v8/src/builtins/builtins-typedarray-gen.cc
index ca3e4fdc39..07f122b909 100644
--- a/deps/v8/src/builtins/builtins-typedarray-gen.cc
+++ b/deps/v8/src/builtins/builtins-typedarray-gen.cc
@@ -5,10 +5,21 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
+#include "src/handles-inl.h"
namespace v8 {
namespace internal {
+using compiler::Node;
+template <class T>
+using TNode = compiler::TNode<T>;
+
+// This is needed for gc_mole which will compile this file without the full set
+// of GN defined macros.
+#ifndef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
+#define V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP 64
+#endif
+
// -----------------------------------------------------------------------------
// ES6 section 22.2 TypedArray Objects
@@ -34,9 +45,37 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
Node* CalculateExternalPointer(Node* backing_store, Node* byte_offset);
Node* LoadDataPtr(Node* typed_array);
Node* ByteLengthIsValid(Node* byte_length);
+
+ // Loads the element kind of TypedArray instance.
+ TNode<Word32T> LoadElementsKind(TNode<Object> typed_array);
+
+ // Returns the byte size of an element for a TypedArray elements kind.
+ TNode<IntPtrT> GetTypedArrayElementSize(TNode<Word32T> elements_kind);
+
+ // Fast path for setting a TypedArray (source) onto another TypedArray
+ // (target) at an element offset.
+ void SetTypedArraySource(TNode<Context> context, TNode<JSTypedArray> source,
+ TNode<JSTypedArray> target, TNode<IntPtrT> offset,
+ Label* call_runtime, Label* if_source_too_large);
+
+ void SetJSArraySource(TNode<Context> context, TNode<JSArray> source,
+ TNode<JSTypedArray> target, TNode<IntPtrT> offset,
+ Label* call_runtime, Label* if_source_too_large);
+
+ void CallCMemmove(TNode<IntPtrT> dest_ptr, TNode<IntPtrT> src_ptr,
+ TNode<IntPtrT> byte_length);
+
+ void CallCCopyFastNumberJSArrayElementsToTypedArray(
+ TNode<Context> context, TNode<JSArray> source, TNode<JSTypedArray> dest,
+ TNode<IntPtrT> source_length, TNode<IntPtrT> offset);
+
+ void CallCCopyTypedArrayElementsToTypedArray(TNode<JSTypedArray> source,
+ TNode<JSTypedArray> dest,
+ TNode<IntPtrT> source_length,
+ TNode<IntPtrT> offset);
};
-compiler::Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) {
+Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) {
CSA_ASSERT(this, IsJSTypedArray(array));
Label unreachable(this), done(this);
@@ -89,8 +128,8 @@ compiler::Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) {
// can't allocate an array bigger than our 32-bit arithmetic range anyway. 64
// bit platforms could theoretically have an offset up to 2^35 - 1, so we may
// need to convert the float heap number to an intptr.
-compiler::Node* TypedArrayBuiltinsAssembler::CalculateExternalPointer(
- Node* backing_store, Node* byte_offset) {
+Node* TypedArrayBuiltinsAssembler::CalculateExternalPointer(Node* backing_store,
+ Node* byte_offset) {
return IntPtrAdd(backing_store, ChangeNumberToIntPtr(byte_offset));
}
@@ -199,9 +238,9 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
Node* fixed_typed_map = LoadMapForType(holder);
GotoIf(TaggedIsNotSmi(byte_length), &allocate_off_heap);
- GotoIf(SmiGreaterThan(byte_length,
- SmiConstant(FLAG_typed_array_max_size_in_heap)),
- &allocate_off_heap);
+ GotoIf(
+ SmiGreaterThan(byte_length, SmiConstant(V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP)),
+ &allocate_off_heap);
Goto(&allocate_on_heap);
BIND(&allocate_on_heap);
@@ -511,7 +550,7 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
}
}
-compiler::Node* TypedArrayBuiltinsAssembler::LoadDataPtr(Node* typed_array) {
+Node* TypedArrayBuiltinsAssembler::LoadDataPtr(Node* typed_array) {
CSA_ASSERT(this, IsJSTypedArray(typed_array));
Node* elements = LoadElements(typed_array);
CSA_ASSERT(this, IsFixedTypedArray(elements));
@@ -522,8 +561,7 @@ compiler::Node* TypedArrayBuiltinsAssembler::LoadDataPtr(Node* typed_array) {
return IntPtrAdd(base_pointer, external_pointer);
}
-compiler::Node* TypedArrayBuiltinsAssembler::ByteLengthIsValid(
- Node* byte_length) {
+Node* TypedArrayBuiltinsAssembler::ByteLengthIsValid(Node* byte_length) {
Label smi(this), done(this);
VARIABLE(is_valid, MachineRepresentation::kWord32);
GotoIf(TaggedIsSmi(byte_length), &smi);
@@ -667,6 +705,352 @@ TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) {
JSTypedArray::kLengthOffset);
}
+TNode<Word32T> TypedArrayBuiltinsAssembler::LoadElementsKind(
+ TNode<Object> typed_array) {
+ CSA_ASSERT(this, IsJSTypedArray(typed_array));
+ return Int32Sub(LoadMapElementsKind(LoadMap(CAST(typed_array))),
+ Int32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND));
+}
+
+TNode<IntPtrT> TypedArrayBuiltinsAssembler::GetTypedArrayElementSize(
+ TNode<Word32T> elements_kind) {
+ TVARIABLE(IntPtrT, element_size);
+ Label next(this), if_unknown_type(this, Label::kDeferred);
+
+ size_t const kTypedElementsKindCount = LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
+ FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
+ 1;
+
+ int32_t elements_kinds[kTypedElementsKindCount] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ TYPE##_ELEMENTS - FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND,
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ };
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ Label if_##type##array(this);
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ Label* elements_kind_labels[kTypedElementsKindCount] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) &if_##type##array,
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ };
+
+ Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels,
+ kTypedElementsKindCount);
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ BIND(&if_##type##array); \
+ { \
+ element_size = IntPtrConstant(size); \
+ Goto(&next); \
+ }
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ BIND(&if_unknown_type);
+ {
+ element_size = IntPtrConstant(0);
+ Goto(&next);
+ }
+ BIND(&next);
+ return element_size;
+}
+
+void TypedArrayBuiltinsAssembler::SetTypedArraySource(
+ TNode<Context> context, TNode<JSTypedArray> source,
+ TNode<JSTypedArray> target, TNode<IntPtrT> offset, Label* call_runtime,
+ Label* if_source_too_large) {
+ CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer(
+ LoadObjectField(source, JSTypedArray::kBufferOffset))));
+ CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer(
+ LoadObjectField(target, JSTypedArray::kBufferOffset))));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(offset, IntPtrConstant(0)));
+ CSA_ASSERT(this,
+ IntPtrLessThanOrEqual(offset, IntPtrConstant(Smi::kMaxValue)));
+
+ // Check for possible range errors.
+
+ TNode<IntPtrT> source_length =
+ LoadAndUntagObjectField(source, JSTypedArray::kLengthOffset);
+ TNode<IntPtrT> target_length =
+ LoadAndUntagObjectField(target, JSTypedArray::kLengthOffset);
+ TNode<IntPtrT> required_target_length = IntPtrAdd(source_length, offset);
+
+ GotoIf(IntPtrGreaterThan(required_target_length, target_length),
+ if_source_too_large);
+
+ // Grab pointers and byte lengths we need later on.
+
+ TNode<IntPtrT> target_data_ptr = UncheckedCast<IntPtrT>(LoadDataPtr(target));
+ TNode<IntPtrT> source_data_ptr = UncheckedCast<IntPtrT>(LoadDataPtr(source));
+
+ TNode<Word32T> source_el_kind = LoadElementsKind(source);
+ TNode<Word32T> target_el_kind = LoadElementsKind(target);
+
+ TNode<IntPtrT> source_el_size = GetTypedArrayElementSize(source_el_kind);
+ TNode<IntPtrT> target_el_size = GetTypedArrayElementSize(target_el_kind);
+
+ // A note on byte lengths: both source- and target byte lengths must be valid,
+ // i.e. it must be possible to allocate an array of the given length. That
+ // means we're safe from overflows in the following multiplication.
+ TNode<IntPtrT> source_byte_length = IntPtrMul(source_length, source_el_size);
+ CSA_ASSERT(this,
+ IntPtrGreaterThanOrEqual(source_byte_length, IntPtrConstant(0)));
+
+ Label call_memmove(this), fast_c_call(this), out(this);
+ Branch(Word32Equal(source_el_kind, target_el_kind), &call_memmove,
+ &fast_c_call);
+
+ BIND(&call_memmove);
+ {
+ TNode<IntPtrT> target_start =
+ IntPtrAdd(target_data_ptr, IntPtrMul(offset, target_el_size));
+ CallCMemmove(target_start, source_data_ptr, source_byte_length);
+ Goto(&out);
+ }
+
+ BIND(&fast_c_call);
+ {
+ // Overlapping backing stores of different element kinds are handled in
+ // runtime. We're a bit conservative here and bail to runtime if ranges
+ // overlap and element kinds differ.
+
+ TNode<IntPtrT> target_byte_length =
+ IntPtrMul(target_length, target_el_size);
+ CSA_ASSERT(this,
+ IntPtrGreaterThanOrEqual(target_byte_length, IntPtrConstant(0)));
+
+ TNode<IntPtrT> target_data_end_ptr =
+ IntPtrAdd(target_data_ptr, target_byte_length);
+ TNode<IntPtrT> source_data_end_ptr =
+ IntPtrAdd(source_data_ptr, source_byte_length);
+
+ GotoIfNot(
+ Word32Or(IntPtrLessThanOrEqual(target_data_end_ptr, source_data_ptr),
+ IntPtrLessThanOrEqual(source_data_end_ptr, target_data_ptr)),
+ call_runtime);
+
+ TNode<IntPtrT> source_length =
+ LoadAndUntagObjectField(source, JSTypedArray::kLengthOffset);
+ CallCCopyTypedArrayElementsToTypedArray(source, target, source_length,
+ offset);
+ Goto(&out);
+ }
+
+ BIND(&out);
+}
+
+void TypedArrayBuiltinsAssembler::SetJSArraySource(
+ TNode<Context> context, TNode<JSArray> source, TNode<JSTypedArray> target,
+ TNode<IntPtrT> offset, Label* call_runtime, Label* if_source_too_large) {
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(offset, IntPtrConstant(0)));
+ CSA_ASSERT(this,
+ IntPtrLessThanOrEqual(offset, IntPtrConstant(Smi::kMaxValue)));
+
+ TNode<IntPtrT> source_length = SmiUntag(LoadFastJSArrayLength(source));
+ TNode<IntPtrT> target_length =
+ LoadAndUntagObjectField(target, JSTypedArray::kLengthOffset);
+
+ // Maybe out of bounds?
+ GotoIf(IntPtrGreaterThan(IntPtrAdd(source_length, offset), target_length),
+ if_source_too_large);
+
+ // Nothing to do if {source} is empty.
+ Label out(this), fast_c_call(this);
+ GotoIf(IntPtrEqual(source_length, IntPtrConstant(0)), &out);
+
+ // Dispatch based on the source elements kind.
+ {
+ // These are the supported elements kinds in TryCopyElementsFastNumber.
+ int32_t values[] = {
+ PACKED_SMI_ELEMENTS, HOLEY_SMI_ELEMENTS, PACKED_DOUBLE_ELEMENTS,
+ HOLEY_DOUBLE_ELEMENTS,
+ };
+ Label* labels[] = {
+ &fast_c_call, &fast_c_call, &fast_c_call, &fast_c_call,
+ };
+ STATIC_ASSERT(arraysize(values) == arraysize(labels));
+
+ TNode<Int32T> source_elements_kind = LoadMapElementsKind(LoadMap(source));
+ Switch(source_elements_kind, call_runtime, values, labels,
+ arraysize(values));
+ }
+
+ BIND(&fast_c_call);
+ CallCCopyFastNumberJSArrayElementsToTypedArray(context, source, target,
+ source_length, offset);
+ Goto(&out);
+ BIND(&out);
+}
+
+void TypedArrayBuiltinsAssembler::CallCMemmove(TNode<IntPtrT> dest_ptr,
+ TNode<IntPtrT> src_ptr,
+ TNode<IntPtrT> byte_length) {
+ TNode<ExternalReference> memmove =
+ ExternalConstant(ExternalReference::libc_memmove_function(isolate()));
+ CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
+ MachineType::Pointer(), MachineType::UintPtr(), memmove,
+ dest_ptr, src_ptr, byte_length);
+}
+
+void TypedArrayBuiltinsAssembler::
+ CallCCopyFastNumberJSArrayElementsToTypedArray(TNode<Context> context,
+ TNode<JSArray> source,
+ TNode<JSTypedArray> dest,
+ TNode<IntPtrT> source_length,
+ TNode<IntPtrT> offset) {
+ TNode<ExternalReference> f = ExternalConstant(
+ ExternalReference::copy_fast_number_jsarray_elements_to_typed_array(
+ isolate()));
+ CallCFunction5(MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::UintPtr(), MachineType::UintPtr(), f, context,
+ source, dest, source_length, offset);
+}
+
+void TypedArrayBuiltinsAssembler::CallCCopyTypedArrayElementsToTypedArray(
+ TNode<JSTypedArray> source, TNode<JSTypedArray> dest,
+ TNode<IntPtrT> source_length, TNode<IntPtrT> offset) {
+ TNode<ExternalReference> f = ExternalConstant(
+ ExternalReference::copy_typed_array_elements_to_typed_array(isolate()));
+ CallCFunction4(MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::UintPtr(),
+ MachineType::UintPtr(), f, source, dest, source_length,
+ offset);
+}
+
+// ES #sec-get-%typedarray%.prototype.set
+TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ CodeStubArguments args(
+ this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+
+ Label if_source_is_typed_array(this), if_source_is_fast_jsarray(this),
+ if_offset_is_out_of_bounds(this, Label::kDeferred),
+ if_source_too_large(this, Label::kDeferred),
+ if_typed_array_is_neutered(this, Label::kDeferred),
+ if_receiver_is_not_typedarray(this, Label::kDeferred);
+
+ // Check the receiver is a typed array.
+ TNode<Object> receiver = args.GetReceiver();
+ GotoIf(TaggedIsSmi(receiver), &if_receiver_is_not_typedarray);
+ GotoIfNot(IsJSTypedArray(receiver), &if_receiver_is_not_typedarray);
+
+ // Normalize offset argument (using ToInteger) and handle heap number cases.
+ TNode<Object> offset = args.GetOptionalArgumentValue(1, SmiConstant(0));
+ TNode<Object> offset_num = ToInteger(context, offset, kTruncateMinusZero);
+ CSA_ASSERT(this, IsNumberNormalized(offset_num));
+
+ // Since ToInteger always returns a Smi if the given value is within Smi
+ // range, and the only corner case of -0.0 has already been truncated to 0.0,
+ // we can simply throw unless the offset is a non-negative Smi.
+ // TODO(jgruber): It's an observable spec violation to throw here if
+ // {offset_num} is a positive number outside the Smi range. Per spec, we need
+ // to check for detached buffers and call the observable ToObject/ToLength
+ // operations first.
+ GotoIfNot(TaggedIsPositiveSmi(offset_num), &if_offset_is_out_of_bounds);
+ TNode<Smi> offset_smi = CAST(offset_num);
+
+ // Check the receiver is not neutered.
+ TNode<Object> receiver_buffer =
+ LoadObjectField(CAST(receiver), JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(receiver_buffer), &if_typed_array_is_neutered);
+
+ // Check the source argument is valid and whether a fast path can be taken.
+ Label call_runtime(this);
+ TNode<Object> source = args.GetOptionalArgumentValue(0);
+ GotoIf(TaggedIsSmi(source), &call_runtime);
+ GotoIf(IsJSTypedArray(source), &if_source_is_typed_array);
+ BranchIfFastJSArray(source, context, &if_source_is_fast_jsarray,
+ &call_runtime);
+
+ // Fast path for a typed array source argument.
+ BIND(&if_source_is_typed_array);
+ {
+ // Check the source argument is not neutered.
+ TNode<Object> source_buffer =
+ LoadObjectField(CAST(source), JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(source_buffer), &if_typed_array_is_neutered);
+
+ SetTypedArraySource(context, CAST(source), CAST(receiver),
+ SmiUntag(offset_smi), &call_runtime,
+ &if_source_too_large);
+ args.PopAndReturn(UndefinedConstant());
+ }
+
+ // Fast path for a fast JSArray source argument.
+ BIND(&if_source_is_fast_jsarray);
+ {
+ SetJSArraySource(context, CAST(source), CAST(receiver),
+ SmiUntag(offset_smi), &call_runtime, &if_source_too_large);
+ args.PopAndReturn(UndefinedConstant());
+ }
+
+ BIND(&call_runtime);
+ args.PopAndReturn(CallRuntime(Runtime::kTypedArraySet, context, receiver,
+ source, offset_smi));
+
+ BIND(&if_offset_is_out_of_bounds);
+ ThrowRangeError(context, MessageTemplate::kTypedArraySetOffsetOutOfBounds);
+
+ BIND(&if_source_too_large);
+ ThrowRangeError(context, MessageTemplate::kTypedArraySetSourceTooLarge);
+
+ BIND(&if_typed_array_is_neutered);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation,
+ "%TypedArray%.prototype.set");
+
+ BIND(&if_receiver_is_not_typedarray);
+ ThrowTypeError(context, MessageTemplate::kNotTypedArray);
+}
+
+// ES #sec-get-%typedarray%.prototype-@@tostringtag
+TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) {
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Label if_receiverisheapobject(this), return_undefined(this);
+ Branch(TaggedIsSmi(receiver), &return_undefined, &if_receiverisheapobject);
+
+ // Dispatch on the elements kind, offset by
+ // FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND.
+ size_t const kTypedElementsKindCount = LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
+ FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
+ 1;
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ Label return_##type##array(this); \
+ BIND(&return_##type##array); \
+ Return(StringConstant(#Type "Array"));
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ Label* elements_kind_labels[kTypedElementsKindCount] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) &return_##type##array,
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ };
+ int32_t elements_kinds[kTypedElementsKindCount] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ TYPE##_ELEMENTS - FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND,
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ };
+
+ // We offset the dispatch by FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND, so
+ // that this can be turned into a non-sparse table switch for ideal
+ // performance.
+ BIND(&if_receiverisheapobject);
+ Node* elements_kind =
+ Int32Sub(LoadMapElementsKind(LoadMap(receiver)),
+ Int32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND));
+ Switch(elements_kind, &return_undefined, elements_kinds, elements_kind_labels,
+ kTypedElementsKindCount);
+
+ BIND(&return_undefined);
+ Return(UndefinedConstant());
+}
+
void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
Node* context, Node* receiver, const char* method_name,
IterationKind iteration_kind) {
@@ -733,5 +1117,7 @@ TF_BUILTIN(TypedArrayPrototypeKeys, TypedArrayBuiltinsAssembler) {
context, receiver, "%TypedArray%.prototype.keys()", IterationKind::kKeys);
}
+#undef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 0aac844e12..9cd6821907 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -19,6 +19,7 @@ namespace internal {
#define FORWARD_DECLARE(Name) \
Object* Builtin_##Name(int argc, Object** args, Isolate* isolate);
BUILTIN_LIST_C(FORWARD_DECLARE)
+#undef FORWARD_DECLARE
namespace {
@@ -112,19 +113,6 @@ Handle<Code> Builtins::NewFunctionContext(ScopeType scope_type) {
return Handle<Code>::null();
}
-Handle<Code> Builtins::NewCloneShallowArray(
- AllocationSiteMode allocation_mode) {
- switch (allocation_mode) {
- case TRACK_ALLOCATION_SITE:
- return builtin_handle(kFastCloneShallowArrayTrack);
- case DONT_TRACK_ALLOCATION_SITE:
- return builtin_handle(kFastCloneShallowArrayDontTrack);
- default:
- UNREACHABLE();
- }
- return Handle<Code>::null();
-}
-
Handle<Code> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
switch (hint) {
case ToPrimitiveHint::kDefault:
@@ -147,8 +135,17 @@ Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
UNREACHABLE();
}
-Handle<Code> Builtins::builtin_handle(Name name) {
- return Handle<Code>(reinterpret_cast<Code**>(builtin_address(name)));
+void Builtins::set_builtin(int index, HeapObject* builtin) {
+ DCHECK(Builtins::IsBuiltinId(index));
+ DCHECK(Internals::HasHeapObjectTag(builtin));
+ // The given builtin may be completely uninitialized thus we cannot check its
+ // type here.
+ builtins_[index] = builtin;
+}
+
+Handle<Code> Builtins::builtin_handle(int index) {
+ DCHECK(IsBuiltinId(index));
+ return Handle<Code>(reinterpret_cast<Code**>(builtin_address(index)));
}
// static
@@ -209,7 +206,7 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) {
// static
const char* Builtins::name(int index) {
- DCHECK(0 <= index && index < builtin_count);
+ DCHECK(IsBuiltinId(index));
return builtin_metadata[index].name;
}
@@ -220,8 +217,61 @@ Address Builtins::CppEntryOf(int index) {
}
// static
+bool Builtins::IsLazy(int index) {
+ DCHECK(IsBuiltinId(index));
+ // There are a couple of reasons that builtins can require eager-loading,
+ // i.e. deserialization at isolate creation instead of on-demand. For
+ // instance:
+ // * DeserializeLazy implements lazy loading.
+ // * Immovability requirement. This can only conveniently be guaranteed at
+ // isolate creation (at runtime, we'd have to allocate in LO space).
+ // * To avoid conflicts in SharedFunctionInfo::function_data (Illegal,
+ // HandleApiCall, interpreter entry trampolines).
+ // * Frequent use makes lazy loading unnecessary (CompileLazy).
+ // TODO(wasm): Remove wasm builtins once immovability is no longer required.
+ switch (index) {
+ case kAbort: // Required by wasm.
+ case kArrayForEachLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayForEachLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayMapLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayMapLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kCheckOptimizationMarker:
+ case kCompileLazy:
+ case kDeserializeLazy:
+ case kFunctionPrototypeHasInstance: // https://crbug.com/v8/6786.
+ case kHandleApiCall:
+ case kIllegal:
+ case kInterpreterEnterBytecodeAdvance:
+ case kInterpreterEnterBytecodeDispatch:
+ case kInterpreterEntryTrampoline:
+ case kObjectConstructor_ConstructStub: // https://crbug.com/v8/6787.
+ case kProxyConstructor_ConstructStub: // https://crbug.com/v8/6787.
+ case kNumberConstructor_ConstructStub: // https://crbug.com/v8/6787.
+ case kStringConstructor_ConstructStub: // https://crbug.com/v8/6787.
+ case kProxyConstructor: // https://crbug.com/v8/6787.
+ case kRecordWrite: // https://crbug.com/chromium/765301.
+ case kThrowWasmTrapDivByZero: // Required by wasm.
+ case kThrowWasmTrapDivUnrepresentable: // Required by wasm.
+ case kThrowWasmTrapFloatUnrepresentable: // Required by wasm.
+ case kThrowWasmTrapFuncInvalid: // Required by wasm.
+ case kThrowWasmTrapFuncSigMismatch: // Required by wasm.
+ case kThrowWasmTrapMemOutOfBounds: // Required by wasm.
+ case kThrowWasmTrapRemByZero: // Required by wasm.
+ case kThrowWasmTrapUnreachable: // Required by wasm.
+ case kToNumber: // Required by wasm.
+ case kWasmCompileLazy: // Required by wasm.
+ case kWasmStackGuard: // Required by wasm.
+ return false;
+ default:
+ // TODO(6624): Extend to other kinds.
+ return KindOf(index) == TFJ;
+ }
+ UNREACHABLE();
+}
+
+// static
Builtins::Kind Builtins::KindOf(int index) {
- DCHECK(0 <= index && index < builtin_count);
+ DCHECK(IsBuiltinId(index));
return builtin_metadata[index].kind;
}
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index 4925b2c5b1..e28feb7efe 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -48,6 +48,10 @@ class Builtins {
builtin_count
};
+ static bool IsBuiltinId(int maybe_id) {
+ return 0 <= maybe_id && maybe_id < builtin_count;
+ }
+
// The different builtin kinds are documented in builtins-definitions.h.
enum Kind { CPP, API, TFJ, TFC, TFS, TFH, ASM };
@@ -64,20 +68,28 @@ class Builtins {
InterpreterPushArgsMode mode);
Handle<Code> InterpreterPushArgsThenConstruct(InterpreterPushArgsMode mode);
Handle<Code> NewFunctionContext(ScopeType scope_type);
- Handle<Code> NewCloneShallowArray(AllocationSiteMode allocation_mode);
Handle<Code> JSConstructStubGeneric();
- Code* builtin(Name name) {
+ // Used by BuiltinDeserializer.
+ void set_builtin(int index, HeapObject* builtin);
+
+ Code* builtin(int index) {
+ DCHECK(IsBuiltinId(index));
// Code::cast cannot be used here since we access builtins
// during the marking phase of mark sweep. See IC::Clear.
- return reinterpret_cast<Code*>(builtins_[name]);
+ return reinterpret_cast<Code*>(builtins_[index]);
}
- Address builtin_address(Name name) {
- return reinterpret_cast<Address>(&builtins_[name]);
+ Address builtin_address(int index) {
+ DCHECK(IsBuiltinId(index));
+ return reinterpret_cast<Address>(&builtins_[index]);
}
- V8_EXPORT_PRIVATE Handle<Code> builtin_handle(Name name);
+ V8_EXPORT_PRIVATE Handle<Code> builtin_handle(int index);
+
+ // Used by lazy deserialization to determine whether a given builtin has been
+ // deserialized. See the DeserializeLazy builtin.
+ Object** builtins_table_address() { return &builtins_[0]; }
V8_EXPORT_PRIVATE static Callable CallableFor(Isolate* isolate, Name name);
@@ -95,6 +107,11 @@ class Builtins {
static bool IsCpp(int index);
static bool HasCppImplementation(int index);
+ // Returns true iff the given builtin can be lazy-loaded from the snapshot.
+ // This is true in general for most builtins with the exception of a few
+ // special cases such as CompileLazy and DeserializeLazy.
+ static bool IsLazy(int index);
+
bool is_initialized() const { return initialized_; }
// Used by SetupIsolateDelegate and Deserializer.
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 03db488b7e..ee15025520 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -6,9 +6,11 @@
#include "src/code-factory.h"
#include "src/codegen.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -483,7 +485,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// context and the function left on the stack by the code
// invocation.
}
- __ ret(kPointerSize); // Remove receiver.
+ __ ret(0);
}
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
@@ -615,31 +617,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
- Register native_context = scratch1;
// Store the optimized code in the closure.
__ mov(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code);
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Link the closure into the optimized function list.
- __ mov(native_context, NativeContextOperand());
- __ mov(scratch3,
- ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), scratch3);
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch3,
- scratch2, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
- closure);
- // Save closure before the write barrier.
- __ mov(scratch3, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, closure,
- scratch2, kDontSaveFPRegs);
- __ mov(closure, scratch3);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -1354,6 +1337,18 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
+void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
+ // Set the code slot inside the JSFunction to the trampoline to the
+ // interpreter entry.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
+ __ mov(FieldOperand(edi, JSFunction::kCodeOffset), ecx);
+ __ RecordWriteField(edi, JSFunction::kCodeOffset, ecx, ebx, kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ // Jump to compile lazy.
+ Generate_CompileLazy(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)
@@ -1398,6 +1393,92 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
+void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argument count (preserved for callee)
+ // -- edx : new target (preserved for callee)
+ // -- edi : target function (preserved for callee)
+ // -----------------------------------
+
+ Label deserialize_in_runtime;
+
+ Register target = edi; // Must be preserved
+ Register scratch0 = ebx;
+ Register scratch1 = ecx;
+
+ CHECK(scratch0 != eax && scratch0 != edx && scratch0 != edi);
+ CHECK(scratch1 != eax && scratch1 != edx && scratch1 != edi);
+ CHECK(scratch0 != scratch1);
+
+ // Load the builtin id for lazy deserialization from SharedFunctionInfo.
+
+ __ AssertFunction(target);
+ __ mov(scratch0, FieldOperand(target, JSFunction::kSharedFunctionInfoOffset));
+
+ __ mov(scratch1,
+ FieldOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
+ __ AssertSmi(scratch1);
+
+ // The builtin may already have been deserialized. If that is the case, it is
+ // stored in the builtins table, and we can copy to correct code object to
+ // both the shared function info and function without calling into runtime.
+ //
+ // Otherwise, we need to call into runtime to deserialize.
+
+ {
+ // Load the code object at builtins_table[builtin_id] into scratch1.
+
+ __ SmiUntag(scratch1);
+ __ mov(scratch0,
+ Immediate(ExternalReference::builtins_address(masm->isolate())));
+ __ mov(scratch1, Operand(scratch0, scratch1, times_pointer_size, 0));
+
+ // Check if the loaded code object has already been deserialized. This is
+ // the case iff it does not equal DeserializeLazy.
+
+ __ Move(scratch0, masm->CodeObject());
+ __ cmp(scratch1, scratch0);
+ __ j(equal, &deserialize_in_runtime);
+ }
+
+ {
+ // If we've reached this spot, the target builtin has been deserialized and
+ // we simply need to copy it over. First to the shared function info.
+
+ Register target_builtin = scratch1;
+ Register shared = scratch0;
+
+ __ mov(shared, FieldOperand(target, JSFunction::kSharedFunctionInfoOffset));
+
+ __ mov(FieldOperand(shared, SharedFunctionInfo::kCodeOffset),
+ target_builtin);
+ __ push(eax); // Write barrier clobbers these below.
+ __ push(target_builtin);
+ __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, target_builtin,
+ eax, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ pop(target_builtin); // eax is popped later, shared is now available.
+
+ // And second to the target function.
+
+ __ mov(FieldOperand(target, JSFunction::kCodeOffset), target_builtin);
+ __ push(target_builtin); // Write barrier clobbers these below.
+ __ RecordWriteField(target, JSFunction::kCodeOffset, target_builtin, eax,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ pop(target_builtin);
+ __ pop(eax);
+
+ // All copying is done. Jump to the deserialized code object.
+
+ __ lea(target_builtin, FieldOperand(target_builtin, Code::kHeaderSize));
+ __ jmp(target_builtin);
+ }
+
+ __ bind(&deserialize_in_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
+}
+
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)
@@ -1481,8 +1562,7 @@ void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
// Tear down internal frame.
}
- __ pop(MemOperand(esp, 0)); // Ignore state offset
- __ ret(0); // Return to ContinueToBuiltin stub still on stack.
+ __ Ret(); // Return to ContinueToBuiltin stub still on stack.
}
namespace {
@@ -1536,49 +1616,16 @@ void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
Generate_ContinueToBuiltinHelper(masm, true, true);
}
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass deoptimization type to the runtime system.
- __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
__ CallRuntime(Runtime::kNotifyDeoptimized);
-
// Tear down internal frame.
}
- // Get the full codegen state from the stack and untag it.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ SmiUntag(ecx);
-
- // Switch on the state.
- Label not_no_registers, not_tos_eax;
- __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS));
- __ j(not_equal, &not_no_registers, Label::kNear);
- __ ret(1 * kPointerSize); // Remove state.
-
- __ bind(&not_no_registers);
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), eax.code());
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER));
- __ j(not_equal, &not_tos_eax, Label::kNear);
- __ ret(2 * kPointerSize); // Remove state, eax.
-
- __ bind(&not_tos_eax);
- __ Abort(kNoCasesLeft);
-}
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ ret(1 * kPointerSize); // Remove eax.
}
// static
@@ -1853,298 +1900,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-// static
-void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments
- // -- edi : constructor function
- // -- esi : context
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // 1. Load the first argument into ebx.
- Label no_arguments;
- {
- __ test(eax, eax);
- __ j(zero, &no_arguments, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
- }
-
- // 2a. Convert the first argument to a number.
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(eax);
- __ EnterBuiltinFrame(esi, edi, eax);
- __ mov(eax, ebx);
- __ Call(BUILTIN_CODE(masm->isolate(), ToNumber), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(esi, edi, ebx); // Argc popped to ebx.
- __ SmiUntag(ebx);
- }
-
- {
- // Drop all arguments including the receiver.
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(ecx);
- __ Ret();
- }
-
- // 2b. No arguments, return +0 (already in eax).
- __ bind(&no_arguments);
- __ ret(1 * kPointerSize);
-}
-
-// static
-void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments
- // -- edi : constructor function
- // -- edx : new target
- // -- esi : context
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Store argc in r8.
- __ mov(ecx, eax);
- __ SmiTag(ecx);
-
- // 2. Load the first argument into ebx.
- {
- Label no_arguments, done;
- __ test(eax, eax);
- __ j(zero, &no_arguments, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
- __ jmp(&done, Label::kNear);
- __ bind(&no_arguments);
- __ Move(ebx, Smi::kZero);
- __ bind(&done);
- }
-
- // 3. Make sure ebx is a number.
- {
- Label done_convert;
- __ JumpIfSmi(ebx, &done_convert);
- __ CompareRoot(FieldOperand(ebx, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(equal, &done_convert);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterBuiltinFrame(esi, edi, ecx);
- __ Push(edx);
- __ Move(eax, ebx);
- __ Call(BUILTIN_CODE(masm->isolate(), ToNumber), RelocInfo::CODE_TARGET);
- __ Move(ebx, eax);
- __ Pop(edx);
- __ LeaveBuiltinFrame(esi, edi, ecx);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, done_alloc, new_object;
- __ cmp(edx, edi);
- __ j(not_equal, &new_object);
-
- // 5. Allocate a JSValue wrapper for the number.
- __ AllocateJSValue(eax, edi, ebx, esi, &done_alloc);
- __ jmp(&drop_frame_and_ret);
-
- __ bind(&done_alloc);
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); // Restore esi.
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterBuiltinFrame(esi, edi, ecx);
- __ Push(ebx); // the first argument
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ Pop(FieldOperand(eax, JSValue::kValueOffset));
- __ LeaveBuiltinFrame(esi, edi, ecx);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- // Drop all arguments including the receiver.
- __ PopReturnAddressTo(esi);
- __ SmiUntag(ecx);
- __ lea(esp, Operand(esp, ecx, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(esi);
- __ Ret();
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments
- // -- edi : constructor function
- // -- esi : context
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // 1. Load the first argument into eax.
- Label no_arguments;
- {
- __ mov(ebx, eax); // Store argc in ebx.
- __ test(eax, eax);
- __ j(zero, &no_arguments, Label::kNear);
- __ mov(eax, Operand(esp, eax, times_pointer_size, 0));
- }
-
- // 2a. At least one argument, return eax if it's a string, otherwise
- // dispatch to appropriate conversion.
- Label drop_frame_and_ret, to_string, symbol_descriptive_string;
- {
- __ JumpIfSmi(eax, &to_string, Label::kNear);
- STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
- __ j(above, &to_string, Label::kNear);
- __ j(equal, &symbol_descriptive_string, Label::kNear);
- __ jmp(&drop_frame_and_ret, Label::kNear);
- }
-
- // 2b. No arguments, return the empty string (and pop the receiver).
- __ bind(&no_arguments);
- {
- __ LoadRoot(eax, Heap::kempty_stringRootIndex);
- __ ret(1 * kPointerSize);
- }
-
- // 3a. Convert eax to a string.
- __ bind(&to_string);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(ebx);
- __ EnterBuiltinFrame(esi, edi, ebx);
- __ Call(BUILTIN_CODE(masm->isolate(), ToString), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(esi, edi, ebx);
- __ SmiUntag(ebx);
- }
- __ jmp(&drop_frame_and_ret, Label::kNear);
-
- // 3b. Convert symbol in eax to a string.
- __ bind(&symbol_descriptive_string);
- {
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
- __ Push(eax);
- __ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- // Drop all arguments including the receiver.
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(ecx);
- __ Ret();
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments
- // -- edi : constructor function
- // -- edx : new target
- // -- esi : context
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- __ mov(ebx, eax);
-
- // 2. Load the first argument into eax.
- {
- Label no_arguments, done;
- __ test(ebx, ebx);
- __ j(zero, &no_arguments, Label::kNear);
- __ mov(eax, Operand(esp, ebx, times_pointer_size, 0));
- __ jmp(&done, Label::kNear);
- __ bind(&no_arguments);
- __ LoadRoot(eax, Heap::kempty_stringRootIndex);
- __ bind(&done);
- }
-
- // 3. Make sure eax is a string.
- {
- Label convert, done_convert;
- __ JumpIfSmi(eax, &convert, Label::kNear);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
- __ j(below, &done_convert);
- __ bind(&convert);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(ebx);
- __ EnterBuiltinFrame(esi, edi, ebx);
- __ Push(edx);
- __ Call(BUILTIN_CODE(masm->isolate(), ToString), RelocInfo::CODE_TARGET);
- __ Pop(edx);
- __ LeaveBuiltinFrame(esi, edi, ebx);
- __ SmiUntag(ebx);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, done_alloc, new_object;
- __ cmp(edx, edi);
- __ j(not_equal, &new_object);
-
- // 5. Allocate a JSValue wrapper for the string.
- // AllocateJSValue can't handle src == dst register. Reuse esi and restore it
- // as needed after the call.
- __ mov(esi, eax);
- __ AllocateJSValue(eax, edi, esi, ecx, &done_alloc);
- __ jmp(&drop_frame_and_ret);
-
- __ bind(&done_alloc);
- {
- // Restore eax to the first argument and esi to the context.
- __ mov(eax, esi);
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- }
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(ebx);
- __ EnterBuiltinFrame(esi, edi, ebx);
- __ Push(eax); // the first argument
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ Pop(FieldOperand(eax, JSValue::kValueOffset));
- __ LeaveBuiltinFrame(esi, edi, ebx);
- __ SmiUntag(ebx);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- // Drop all arguments including the receiver.
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(ecx);
- __ Ret();
- }
-}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ebp);
__ mov(ebp, esp);
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index b280c161d6..e8f846c10a 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -5,10 +5,12 @@
#if V8_TARGET_ARCH_MIPS
#include "src/codegen.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+#include "src/objects-inl.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -152,279 +154,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-// static
-void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- cp : context
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // 1. Load the first argument into a0.
- Label no_arguments;
- {
- __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
- __ Subu(t1, a0, Operand(1)); // In delay slot.
- __ mov(t0, a0); // Store argc in t0.
- __ Lsa(at, sp, t1, kPointerSizeLog2);
- __ lw(a0, MemOperand(at));
- }
-
- // 2a. Convert first argument to number.
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(t0);
- __ EnterBuiltinFrame(cp, a1, t0);
- __ Call(BUILTIN_CODE(masm->isolate(), ToNumber), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(cp, a1, t0);
- __ SmiUntag(t0);
- }
-
- {
- // Drop all arguments including the receiver.
- __ Lsa(sp, sp, t0, kPointerSizeLog2);
- __ DropAndRet(1);
- }
-
- // 2b. No arguments, return +0.
- __ bind(&no_arguments);
- __ Move(v0, Smi::kZero);
- __ DropAndRet(1);
-}
-
-// static
-void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- a3 : new target
- // -- cp : context
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // 2. Load the first argument into a0.
- {
- Label no_arguments, done;
- __ mov(t0, a0); // Store argc in t0.
- __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
- __ Subu(t1, a0, Operand(1)); // In delay slot.
- __ Lsa(at, sp, t1, kPointerSizeLog2);
- __ lw(a0, MemOperand(at));
- __ jmp(&done);
- __ bind(&no_arguments);
- __ Move(a0, Smi::kZero);
- __ bind(&done);
- }
-
- // 3. Make sure a0 is a number.
- {
- Label done_convert;
- __ JumpIfSmi(a0, &done_convert);
- __ GetObjectType(a0, a2, a2);
- __ Branch(&done_convert, eq, a2, Operand(HEAP_NUMBER_TYPE));
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(t0);
- __ EnterBuiltinFrame(cp, a1, t0);
- __ Push(a3);
- __ Call(BUILTIN_CODE(masm->isolate(), ToNumber), RelocInfo::CODE_TARGET);
- __ Move(a0, v0);
- __ Pop(a3);
- __ LeaveBuiltinFrame(cp, a1, t0);
- __ SmiUntag(t0);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, new_object;
- __ Branch(&new_object, ne, a1, Operand(a3));
-
- // 5. Allocate a JSValue wrapper for the number.
- __ AllocateJSValue(v0, a1, a0, a2, t1, &new_object);
- __ jmp(&drop_frame_and_ret);
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(t0);
- __ EnterBuiltinFrame(cp, a1, t0);
- __ Push(a0); // first argument
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ Pop(a0);
- __ LeaveBuiltinFrame(cp, a1, t0);
- __ SmiUntag(t0);
- }
- __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset));
-
- __ bind(&drop_frame_and_ret);
- {
- __ Lsa(sp, sp, t0, kPointerSizeLog2);
- __ DropAndRet(1);
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- cp : context
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // 1. Load the first argument into a0.
- Label no_arguments;
- {
- __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
- __ Subu(t1, a0, Operand(1));
- __ mov(t0, a0); // Store argc in t0.
- __ Lsa(at, sp, t1, kPointerSizeLog2);
- __ lw(a0, MemOperand(at));
- }
-
- // 2a. At least one argument, return a0 if it's a string, otherwise
- // dispatch to appropriate conversion.
- Label drop_frame_and_ret, to_string, symbol_descriptive_string;
- {
- __ JumpIfSmi(a0, &to_string);
- __ GetObjectType(a0, t1, t1);
- STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
- __ Subu(t1, t1, Operand(FIRST_NONSTRING_TYPE));
- __ Branch(&symbol_descriptive_string, eq, t1, Operand(zero_reg));
- __ Branch(&to_string, gt, t1, Operand(zero_reg));
- __ mov(v0, a0);
- __ jmp(&drop_frame_and_ret);
- }
-
- // 2b. No arguments, return the empty string (and pop the receiver).
- __ bind(&no_arguments);
- {
- __ LoadRoot(v0, Heap::kempty_stringRootIndex);
- __ DropAndRet(1);
- }
-
- // 3a. Convert a0 to a string.
- __ bind(&to_string);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(t0);
- __ EnterBuiltinFrame(cp, a1, t0);
- __ Call(BUILTIN_CODE(masm->isolate(), ToString), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(cp, a1, t0);
- __ SmiUntag(t0);
- }
- __ jmp(&drop_frame_and_ret);
-
- // 3b. Convert symbol in a0 to a string.
- __ bind(&symbol_descriptive_string);
- {
- __ Lsa(sp, sp, t0, kPointerSizeLog2);
- __ Drop(1);
- __ Push(a0);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- __ Lsa(sp, sp, t0, kPointerSizeLog2);
- __ DropAndRet(1);
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- a3 : new target
- // -- cp : context
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // 2. Load the first argument into a0.
- {
- Label no_arguments, done;
- __ mov(t0, a0); // Store argc in t0.
- __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
- __ Subu(t1, a0, Operand(1));
- __ Lsa(at, sp, t1, kPointerSizeLog2);
- __ lw(a0, MemOperand(at));
- __ jmp(&done);
- __ bind(&no_arguments);
- __ LoadRoot(a0, Heap::kempty_stringRootIndex);
- __ bind(&done);
- }
-
- // 3. Make sure a0 is a string.
- {
- Label convert, done_convert;
- __ JumpIfSmi(a0, &convert);
- __ GetObjectType(a0, a2, a2);
- __ And(t1, a2, Operand(kIsNotStringMask));
- __ Branch(&done_convert, eq, t1, Operand(zero_reg));
- __ bind(&convert);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(t0);
- __ EnterBuiltinFrame(cp, a1, t0);
- __ Push(a3);
- __ Call(BUILTIN_CODE(masm->isolate(), ToString), RelocInfo::CODE_TARGET);
- __ Move(a0, v0);
- __ Pop(a3);
- __ LeaveBuiltinFrame(cp, a1, t0);
- __ SmiUntag(t0);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, new_object;
- __ Branch(&new_object, ne, a1, Operand(a3));
-
- // 5. Allocate a JSValue wrapper for the string.
- __ AllocateJSValue(v0, a1, a0, a2, t1, &new_object);
- __ jmp(&drop_frame_and_ret);
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(t0);
- __ EnterBuiltinFrame(cp, a1, t0);
- __ Push(a0); // first argument
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ Pop(a0);
- __ LeaveBuiltinFrame(cp, a1, t0);
- __ SmiUntag(t0);
- }
- __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset));
-
- __ bind(&drop_frame_and_ret);
- {
- __ Lsa(sp, sp, t0, kPointerSizeLog2);
- __ DropAndRet(1);
- }
-}
-
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
@@ -954,33 +683,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
- Register native_context = scratch1;
-
// Store code entry in the closure.
__ sw(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
-
- // Link the closure into the optimized function list.
- __ lw(native_context, NativeContextMemOperand());
- __ lw(scratch2,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ sw(scratch2,
- FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
- scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ sw(closure,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- // Save closure before the write barrier.
- __ mov(scratch2, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, closure,
- scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs);
- __ mov(closure, scratch2);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
@@ -993,7 +701,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
// Leave the frame (also dropping the register file).
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ __ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
__ Addu(sp, sp, args_count);
@@ -1565,6 +1273,18 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
+void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
+ // Set the code slot inside the JSFunction to the trampoline to the
+ // interpreter entry.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ __ sw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ RecordWriteField(a1, JSFunction::kCodeOffset, a2, t0, kRAHasNotBeenSaved,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ // Jump to compile lazy.
+ Generate_CompileLazy(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1608,6 +1328,93 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
+void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -- a1 : target function (preserved for callee)
+ // -----------------------------------
+
+ Label deserialize_in_runtime;
+
+ Register target = a1; // Must be preserved
+ Register scratch0 = a2;
+ Register scratch1 = t0;
+
+ CHECK(scratch0 != a0 && scratch0 != a3 && scratch0 != a1);
+ CHECK(scratch1 != a0 && scratch1 != a3 && scratch1 != a1);
+ CHECK(scratch0 != scratch1);
+
+ // Load the builtin id for lazy deserialization from SharedFunctionInfo.
+
+ __ AssertFunction(target);
+ __ lw(scratch0,
+ FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
+
+ __ lw(scratch1,
+ FieldMemOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
+ __ AssertSmi(scratch1);
+
+ // The builtin may already have been deserialized. If that is the case, it is
+ // stored in the builtins table, and we can copy to correct code object to
+ // both the shared function info and function without calling into runtime.
+ //
+ // Otherwise, we need to call into runtime to deserialize.
+
+ {
+ // Load the code object at builtins_table[builtin_id] into scratch1.
+
+ __ SmiUntag(scratch1);
+ __ li(scratch0,
+ Operand(ExternalReference::builtins_address(masm->isolate())));
+ __ Lsa(scratch1, scratch0, scratch1, kPointerSizeLog2);
+ __ lw(scratch1, MemOperand(scratch1));
+
+ // Check if the loaded code object has already been deserialized. This is
+ // the case iff it does not equal DeserializeLazy.
+
+ __ Move(scratch0, masm->CodeObject());
+ __ Branch(&deserialize_in_runtime, eq, scratch1, Operand(scratch0));
+ }
+
+ {
+ // If we've reached this spot, the target builtin has been deserialized and
+ // we simply need to copy it over. First to the shared function info.
+
+ Register target_builtin = scratch1;
+ Register shared = scratch0;
+
+ __ lw(shared,
+ FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
+
+ CHECK(t1 != target && t1 != scratch0 && t1 != scratch1);
+ CHECK(t3 != target && t3 != scratch0 && t3 != scratch1);
+
+ __ sw(target_builtin,
+ FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset));
+ __ mov(t3, target_builtin); // Write barrier clobbers t3 below.
+ __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, t3, t1,
+ kRAHasNotBeenSaved, kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // And second to the target function.
+
+ __ sw(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
+ __ mov(t3, target_builtin); // Write barrier clobbers t3 below.
+ __ RecordWriteField(target, JSFunction::kCodeOffset, t3, t1,
+ kRAHasNotBeenSaved, kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // All copying is done. Jump to the deserialized code object.
+
+ __ Jump(target_builtin, Code::kHeaderSize - kHeapObjectTag);
+ }
+
+ __ bind(&deserialize_in_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
+}
+
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1681,7 +1488,6 @@ void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
__ Pop(v0);
}
- __ Addu(sp, sp, Operand(kPointerSize)); // Ignore state
__ Jump(ra); // Jump to the ContinueToBuiltin stub
}
@@ -1735,51 +1541,17 @@ void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
Generate_ContinueToBuiltinHelper(masm, true, true);
}
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass the function and deoptimization type to the runtime system.
- __ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(a0);
__ CallRuntime(Runtime::kNotifyDeoptimized);
}
- // Get the full codegen state from the stack and untag it -> t2.
- __ lw(t2, MemOperand(sp, 0 * kPointerSize));
- __ SmiUntag(t2);
- // Switch on the state.
- Label with_tos_register, unknown_state;
- __ Branch(&with_tos_register, ne, t2,
- Operand(static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS)));
- __ Ret(USE_DELAY_SLOT);
- // Safe to fill delay slot Addu will emit one instruction.
- __ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
-
- __ bind(&with_tos_register);
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code());
- __ lw(v0, MemOperand(sp, 1 * kPointerSize));
- __ Branch(&unknown_state, ne, t2,
- Operand(static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER)));
-
+ __ lw(v0, MemOperand(sp, 0 * kPointerSize));
__ Ret(USE_DELAY_SLOT);
// Safe to fill delay slot Addu will emit one instruction.
- __ Addu(sp, sp, Operand(2 * kPointerSize)); // Remove state.
-
- __ bind(&unknown_state);
- __ stop("no cases left");
-}
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+ __ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove accumulator.
}
static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index b65a796785..f62750b061 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -5,10 +5,12 @@
#if V8_TARGET_ARCH_MIPS64
#include "src/codegen.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+#include "src/objects-inl.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -152,281 +154,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-// static
-void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- cp : context
- // -- ra : return address
- // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
- // -- sp[argc * 8] : receiver
- // -----------------------------------
-
- // 1. Load the first argument into a0 and get rid of the rest (including the
- // receiver).
- Label no_arguments;
- {
- __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
- __ Dsubu(t1, a0, Operand(1)); // In delay slot.
- __ mov(t0, a0); // Store argc in t0.
- __ Dlsa(at, sp, t1, kPointerSizeLog2);
- __ Ld(a0, MemOperand(at));
- }
-
- // 2a. Convert first argument to number.
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(t0);
- __ EnterBuiltinFrame(cp, a1, t0);
- __ Call(BUILTIN_CODE(masm->isolate(), ToNumber), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(cp, a1, t0);
- __ SmiUntag(t0);
- }
-
- {
- // Drop all arguments including the receiver.
- __ Dlsa(sp, sp, t0, kPointerSizeLog2);
- __ DropAndRet(1);
- }
-
- // 2b. No arguments, return +0.
- __ bind(&no_arguments);
- __ Move(v0, Smi::kZero);
- __ DropAndRet(1);
-}
-
-void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- a3 : new target
- // -- cp : context
- // -- ra : return address
- // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
- // -- sp[argc * 8] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // 2. Load the first argument into a0 and get rid of the rest (including the
- // receiver).
- {
- Label no_arguments, done;
- __ mov(t0, a0); // Store argc in t0.
- __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
- __ Dsubu(a0, a0, Operand(1)); // In delay slot.
- __ Dlsa(at, sp, a0, kPointerSizeLog2);
- __ Ld(a0, MemOperand(at));
- __ jmp(&done);
- __ bind(&no_arguments);
- __ Move(a0, Smi::kZero);
- __ bind(&done);
- }
-
- // 3. Make sure a0 is a number.
- {
- Label done_convert;
- __ JumpIfSmi(a0, &done_convert);
- __ GetObjectType(a0, a2, a2);
- __ Branch(&done_convert, eq, a2, Operand(HEAP_NUMBER_TYPE));
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(t0);
- __ EnterBuiltinFrame(cp, a1, t0);
- __ Push(a3);
- __ Call(BUILTIN_CODE(masm->isolate(), ToNumber), RelocInfo::CODE_TARGET);
- __ Move(a0, v0);
- __ Pop(a3);
- __ LeaveBuiltinFrame(cp, a1, t0);
- __ SmiUntag(t0);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, new_object;
- __ Branch(&new_object, ne, a1, Operand(a3));
-
- // 5. Allocate a JSValue wrapper for the number.
- __ AllocateJSValue(v0, a1, a0, a2, t1, &new_object);
- __ jmp(&drop_frame_and_ret);
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(t0);
- __ EnterBuiltinFrame(cp, a1, t0);
- __ Push(a0);
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ Pop(a0);
- __ LeaveBuiltinFrame(cp, a1, t0);
- __ SmiUntag(t0);
- }
- __ Sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
-
- __ bind(&drop_frame_and_ret);
- {
- __ Dlsa(sp, sp, t0, kPointerSizeLog2);
- __ DropAndRet(1);
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- cp : context
- // -- ra : return address
- // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
- // -- sp[argc * 8] : receiver
- // -----------------------------------
-
- // 1. Load the first argument into a0 and get rid of the rest (including the
- // receiver).
- Label no_arguments;
- {
- __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
- __ Dsubu(t1, a0, Operand(1)); // In delay slot.
- __ mov(t0, a0); // Store argc in t0.
- __ Dlsa(at, sp, t1, kPointerSizeLog2);
- __ Ld(a0, MemOperand(at));
- }
-
- // 2a. At least one argument, return a0 if it's a string, otherwise
- // dispatch to appropriate conversion.
- Label drop_frame_and_ret, to_string, symbol_descriptive_string;
- {
- __ JumpIfSmi(a0, &to_string);
- __ GetObjectType(a0, t1, t1);
- STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
- __ Subu(t1, t1, Operand(FIRST_NONSTRING_TYPE));
- __ Branch(&symbol_descriptive_string, eq, t1, Operand(zero_reg));
- __ Branch(&to_string, gt, t1, Operand(zero_reg));
- __ mov(v0, a0);
- __ jmp(&drop_frame_and_ret);
- }
-
- // 2b. No arguments, return the empty string (and pop the receiver).
- __ bind(&no_arguments);
- {
- __ LoadRoot(v0, Heap::kempty_stringRootIndex);
- __ DropAndRet(1);
- }
-
- // 3a. Convert a0 to a string.
- __ bind(&to_string);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(t0);
- __ EnterBuiltinFrame(cp, a1, t0);
- __ Call(BUILTIN_CODE(masm->isolate(), ToString), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(cp, a1, t0);
- __ SmiUntag(t0);
- }
- __ jmp(&drop_frame_and_ret);
-
- // 3b. Convert symbol in a0 to a string.
- __ bind(&symbol_descriptive_string);
- {
- __ Dlsa(sp, sp, t0, kPointerSizeLog2);
- __ Drop(1);
- __ Push(a0);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- __ Dlsa(sp, sp, t0, kPointerSizeLog2);
- __ DropAndRet(1);
- }
-}
-
-void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- a3 : new target
- // -- cp : context
- // -- ra : return address
- // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
- // -- sp[argc * 8] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // 2. Load the first argument into a0 and get rid of the rest (including the
- // receiver).
- {
- Label no_arguments, done;
- __ mov(t0, a0); // Store argc in t0.
- __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
- __ Dsubu(a0, a0, Operand(1));
- __ Dlsa(at, sp, a0, kPointerSizeLog2);
- __ Ld(a0, MemOperand(at));
- __ jmp(&done);
- __ bind(&no_arguments);
- __ LoadRoot(a0, Heap::kempty_stringRootIndex);
- __ bind(&done);
- }
-
- // 3. Make sure a0 is a string.
- {
- Label convert, done_convert;
- __ JumpIfSmi(a0, &convert);
- __ GetObjectType(a0, a2, a2);
- __ And(t1, a2, Operand(kIsNotStringMask));
- __ Branch(&done_convert, eq, t1, Operand(zero_reg));
- __ bind(&convert);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(t0);
- __ EnterBuiltinFrame(cp, a1, t0);
- __ Push(a3);
- __ Call(BUILTIN_CODE(masm->isolate(), ToString), RelocInfo::CODE_TARGET);
- __ Move(a0, v0);
- __ Pop(a3);
- __ LeaveBuiltinFrame(cp, a1, t0);
- __ SmiUntag(t0);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, new_object;
- __ Branch(&new_object, ne, a1, Operand(a3));
-
- // 5. Allocate a JSValue wrapper for the string.
- __ AllocateJSValue(v0, a1, a0, a2, t1, &new_object);
- __ jmp(&drop_frame_and_ret);
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(t0);
- __ EnterBuiltinFrame(cp, a1, t0);
- __ Push(a0);
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ Pop(a0);
- __ LeaveBuiltinFrame(cp, a1, t0);
- __ SmiUntag(t0);
- }
- __ Sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
-
- __ bind(&drop_frame_and_ret);
- {
- __ Dlsa(sp, sp, t0, kPointerSizeLog2);
- __ DropAndRet(1);
- }
-}
-
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
@@ -957,33 +684,12 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
- Register native_context = scratch1;
-
// Store code entry in the closure.
__ Sd(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
-
- // Link the closure into the optimized function list.
- __ Ld(native_context, NativeContextMemOperand());
- __ Ld(scratch2,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ Sd(scratch2,
- FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
- scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ Sd(closure,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- // Save closure before the write barrier.
- __ mov(scratch2, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, closure,
- scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs);
- __ mov(closure, scratch2);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
@@ -995,7 +701,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ Lw(t0, FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
// Leave the frame (also dropping the register file).
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ __ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
__ Daddu(sp, sp, args_count);
@@ -1568,6 +1274,18 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
+void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
+ // Set the code slot inside the JSFunction to the trampoline to the
+ // interpreter entry.
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ __ Sd(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ RecordWriteField(a1, JSFunction::kCodeOffset, a2, a4, kRAHasNotBeenSaved,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ // Jump to compile lazy.
+ Generate_CompileLazy(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1612,6 +1330,95 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
+void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -- a1 : target function (preserved for callee)
+ // -----------------------------------
+
+ Label deserialize_in_runtime;
+
+ Register target = a1; // Must be preserved
+ Register scratch0 = a2;
+ Register scratch1 = t0;
+
+ CHECK(scratch0 != a0 && scratch0 != a3 && scratch0 != a1);
+ CHECK(scratch1 != a0 && scratch1 != a3 && scratch1 != a1);
+ CHECK(scratch0 != scratch1);
+
+ // Load the builtin id for lazy deserialization from SharedFunctionInfo.
+
+ __ AssertFunction(target);
+ __ Ld(scratch0,
+ FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
+
+ __ Ld(scratch1,
+ FieldMemOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
+ __ AssertSmi(scratch1);
+
+ // The builtin may already have been deserialized. If that is the case, it is
+ // stored in the builtins table, and we can copy to correct code object to
+ // both the shared function info and function without calling into runtime.
+ //
+ // Otherwise, we need to call into runtime to deserialize.
+
+ {
+ // Load the code object at builtins_table[builtin_id] into scratch1.
+
+ __ SmiUntag(scratch1);
+ __ li(scratch0,
+ Operand(ExternalReference::builtins_address(masm->isolate())));
+ __ Dlsa(scratch1, scratch0, scratch1, kPointerSizeLog2);
+ __ Ld(scratch1, MemOperand(scratch1));
+
+ // Check if the loaded code object has already been deserialized. This is
+ // the case iff it does not equal DeserializeLazy.
+
+ __ Move(scratch0, masm->CodeObject());
+ __ Branch(&deserialize_in_runtime, eq, scratch1, Operand(scratch0));
+ }
+
+ {
+ // If we've reached this spot, the target builtin has been deserialized and
+ // we simply need to copy it over. First to the shared function info.
+
+ Register target_builtin = scratch1;
+ Register shared = scratch0;
+
+ __ Ld(shared,
+ FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
+
+ CHECK(t1 != target && t1 != scratch0 && t1 != scratch1);
+ CHECK(t3 != target && t3 != scratch0 && t3 != scratch1);
+
+ __ Sd(target_builtin,
+ FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset));
+ __ mov(t3, target_builtin); // Write barrier clobbers t3 below.
+ __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, t3, t1,
+ kRAHasNotBeenSaved, kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // And second to the target function.
+
+ __ Sd(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
+ __ mov(t3, target_builtin); // Write barrier clobbers t3 below.
+ __ RecordWriteField(target, JSFunction::kCodeOffset, t3, t1,
+ kRAHasNotBeenSaved, kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // All copying is done. Jump to the deserialized code object.
+
+ __ Daddu(target_builtin, target_builtin,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(target_builtin);
+ }
+
+ __ bind(&deserialize_in_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
+}
+
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1685,7 +1492,6 @@ void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
__ pop(v0);
}
- __ Daddu(sp, sp, Operand(kPointerSize)); // Ignore state
__ Jump(ra); // Jump to the ContinueToBuiltin stub
}
@@ -1739,52 +1545,17 @@ void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
Generate_ContinueToBuiltinHelper(masm, true, true);
}
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass the function and deoptimization type to the runtime system.
- __ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(a0);
__ CallRuntime(Runtime::kNotifyDeoptimized);
}
- // Get the full codegen state from the stack and untag it -> a6.
- __ Lw(a6, UntagSmiMemOperand(sp, 0 * kPointerSize));
- // Switch on the state.
- Label with_tos_register, unknown_state;
- __ Branch(
- &with_tos_register, ne, a6,
- Operand(static_cast<int64_t>(Deoptimizer::BailoutState::NO_REGISTERS)));
- __ Ret(USE_DELAY_SLOT);
- // Safe to fill delay slot Addu will emit one instruction.
- __ Daddu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
-
- __ bind(&with_tos_register);
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code());
- __ Ld(v0, MemOperand(sp, 1 * kPointerSize));
- __ Branch(
- &unknown_state, ne, a6,
- Operand(static_cast<int64_t>(Deoptimizer::BailoutState::TOS_REGISTER)));
-
+ __ Ld(v0, MemOperand(sp, 0 * kPointerSize));
__ Ret(USE_DELAY_SLOT);
// Safe to fill delay slot Addu will emit one instruction.
- __ Daddu(sp, sp, Operand(2 * kPointerSize)); // Remove state.
-
- __ bind(&unknown_state);
- __ stop("no cases left");
-}
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+ __ Daddu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
}
static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
@@ -1902,7 +1673,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ bind(&no_arguments);
{
__ mov(a0, zero_reg);
- DCHECK(receiver.is(a1));
+ DCHECK(receiver == a1);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 646f7f62bc..3ed3eb686d 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -4,6 +4,7 @@
#if V8_TARGET_ARCH_PPC
+#include "src/assembler-inl.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@@ -147,284 +148,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-// static
-void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : number of arguments
- // -- r4 : constructor function
- // -- cp : context
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // 1. Load the first argument into r3.
- Label no_arguments;
- {
- __ mr(r5, r3); // Store argc in r5.
- __ cmpi(r3, Operand::Zero());
- __ beq(&no_arguments);
- __ subi(r3, r3, Operand(1));
- __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
- __ LoadPX(r3, MemOperand(sp, r3));
- }
-
- // 2a. Convert the first argument to a number.
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r5);
- __ EnterBuiltinFrame(cp, r4, r5);
- __ Call(BUILTIN_CODE(masm->isolate(), ToNumber), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(cp, r4, r5);
- __ SmiUntag(r5);
- }
-
- {
- // Drop all arguments including the receiver.
- __ Drop(r5);
- __ Ret(1);
- }
-
- // 2b. No arguments, return +0.
- __ bind(&no_arguments);
- __ LoadSmiLiteral(r3, Smi::kZero);
- __ Ret(1);
-}
-
-// static
-void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : number of arguments
- // -- r4 : constructor function
- // -- r6 : new target
- // -- cp : context
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
-
- // 2. Load the first argument into r5.
- {
- Label no_arguments, done;
- __ mr(r9, r3); // Store argc in r9.
- __ cmpi(r3, Operand::Zero());
- __ beq(&no_arguments);
- __ subi(r3, r3, Operand(1));
- __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
- __ LoadPX(r5, MemOperand(sp, r5));
- __ b(&done);
- __ bind(&no_arguments);
- __ LoadSmiLiteral(r5, Smi::kZero);
- __ bind(&done);
- }
-
- // 3. Make sure r5 is a number.
- {
- Label done_convert;
- __ JumpIfSmi(r5, &done_convert);
- __ CompareObjectType(r5, r7, r7, HEAP_NUMBER_TYPE);
- __ beq(&done_convert);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r9);
- __ EnterBuiltinFrame(cp, r4, r9);
- __ Push(r6);
- __ mr(r3, r5);
- __ Call(BUILTIN_CODE(masm->isolate(), ToNumber), RelocInfo::CODE_TARGET);
- __ mr(r5, r3);
- __ Pop(r6);
- __ LeaveBuiltinFrame(cp, r4, r9);
- __ SmiUntag(r9);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, new_object;
- __ cmp(r4, r6);
- __ bne(&new_object);
-
- // 5. Allocate a JSValue wrapper for the number.
- __ AllocateJSValue(r3, r4, r5, r7, r8, &new_object);
- __ b(&drop_frame_and_ret);
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r9);
- __ EnterBuiltinFrame(cp, r4, r9);
- __ Push(r5); // first argument
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ Pop(r5);
- __ LeaveBuiltinFrame(cp, r4, r9);
- __ SmiUntag(r9);
- }
- __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
-
- __ bind(&drop_frame_and_ret);
- {
- __ Drop(r9);
- __ Ret(1);
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : number of arguments
- // -- r4 : constructor function
- // -- cp : context
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // 1. Load the first argument into r3.
- Label no_arguments;
- {
- __ mr(r5, r3); // Store argc in r5.
- __ cmpi(r3, Operand::Zero());
- __ beq(&no_arguments);
- __ subi(r3, r3, Operand(1));
- __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
- __ LoadPX(r3, MemOperand(sp, r3));
- }
-
- // 2a. At least one argument, return r3 if it's a string, otherwise
- // dispatch to appropriate conversion.
- Label drop_frame_and_ret, to_string, symbol_descriptive_string;
- {
- __ JumpIfSmi(r3, &to_string);
- STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
- __ CompareObjectType(r3, r6, r6, FIRST_NONSTRING_TYPE);
- __ bgt(&to_string);
- __ beq(&symbol_descriptive_string);
- __ b(&drop_frame_and_ret);
- }
-
- // 2b. No arguments, return the empty string (and pop the receiver).
- __ bind(&no_arguments);
- {
- __ LoadRoot(r3, Heap::kempty_stringRootIndex);
- __ Ret(1);
- }
-
- // 3a. Convert r3 to a string.
- __ bind(&to_string);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r5);
- __ EnterBuiltinFrame(cp, r4, r5);
- __ Call(BUILTIN_CODE(masm->isolate(), ToString), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(cp, r4, r5);
- __ SmiUntag(r5);
- }
- __ b(&drop_frame_and_ret);
-
- // 3b. Convert symbol in r3 to a string.
- __ bind(&symbol_descriptive_string);
- {
- __ Drop(r5);
- __ Drop(1);
- __ Push(r3);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- __ Drop(r5);
- __ Ret(1);
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : number of arguments
- // -- r4 : constructor function
- // -- r6 : new target
- // -- cp : context
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
-
- // 2. Load the first argument into r5.
- {
- Label no_arguments, done;
- __ mr(r9, r3); // Store argc in r9.
- __ cmpi(r3, Operand::Zero());
- __ beq(&no_arguments);
- __ subi(r3, r3, Operand(1));
- __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
- __ LoadPX(r5, MemOperand(sp, r5));
- __ b(&done);
- __ bind(&no_arguments);
- __ LoadRoot(r5, Heap::kempty_stringRootIndex);
- __ bind(&done);
- }
-
- // 3. Make sure r5 is a string.
- {
- Label convert, done_convert;
- __ JumpIfSmi(r5, &convert);
- __ CompareObjectType(r5, r7, r7, FIRST_NONSTRING_TYPE);
- __ blt(&done_convert);
- __ bind(&convert);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r9);
- __ EnterBuiltinFrame(cp, r4, r9);
- __ Push(r6);
- __ mr(r3, r5);
- __ Call(BUILTIN_CODE(masm->isolate(), ToString), RelocInfo::CODE_TARGET);
- __ mr(r5, r3);
- __ Pop(r6);
- __ LeaveBuiltinFrame(cp, r4, r9);
- __ SmiUntag(r9);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, new_object;
- __ cmp(r4, r6);
- __ bne(&new_object);
-
- // 5. Allocate a JSValue wrapper for the string.
- __ AllocateJSValue(r3, r4, r5, r7, r8, &new_object);
- __ b(&drop_frame_and_ret);
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r9);
- __ EnterBuiltinFrame(cp, r4, r9);
- __ Push(r5); // first argument
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ Pop(r5);
- __ LeaveBuiltinFrame(cp, r4, r9);
- __ SmiUntag(r9);
- }
- __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
-
- __ bind(&drop_frame_and_ret);
- {
- __ Drop(r9);
- __ Ret(1);
- }
-}
-
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
@@ -977,7 +700,6 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
- Register native_context = scratch1;
// Store code entry in the closure.
__ StoreP(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset),
r0);
@@ -985,29 +707,6 @@ static void ReplaceClosureCodeWithOptimizedCode(
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
-
- // Link the closure into the optimized function list.
- // r7 : code entry
- // r10: native context
- // r4 : closure
- __ LoadP(native_context, NativeContextMemOperand());
- __ LoadP(scratch2, ContextMemOperand(native_context,
- Context::OPTIMIZED_FUNCTIONS_LIST));
- __ StoreP(scratch2,
- FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset), r0);
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
- scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ StoreP(
- closure,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
- // Save closure before the write barrier.
- __ mr(scratch2, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, closure,
- scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ mr(closure, scratch2);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
@@ -1020,7 +719,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
// Leave the frame (also dropping the register file).
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ __ LeaveFrame(StackFrame::INTERPRETED);
__ add(sp, sp, args_count);
}
@@ -1607,6 +1306,18 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
+void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
+ // Set the code slot inside the JSFunction to the trampoline to the
+ // interpreter entry.
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset));
+ __ StoreP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset), r0);
+ __ RecordWriteField(r4, JSFunction::kCodeOffset, r5, r7, kLRHasNotBeenSaved,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ // Jump to compile lazy.
+ Generate_CompileLazy(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count (preserved for callee)
@@ -1654,6 +1365,96 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
+void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argument count (preserved for callee)
+ // -- r6 : new target (preserved for callee)
+ // -- r4 : target function (preserved for callee)
+ // -----------------------------------
+
+ Label deserialize_in_runtime;
+
+ Register target = r4; // Must be preserved
+ Register scratch0 = r5;
+ Register scratch1 = r7;
+
+ CHECK(scratch0 != r3 && scratch0 != r6 && scratch0 != r4);
+ CHECK(scratch1 != r3 && scratch1 != r6 && scratch1 != r4);
+ CHECK(scratch0 != scratch1);
+
+ // Load the builtin id for lazy deserialization from SharedFunctionInfo.
+
+ __ AssertFunction(target);
+ __ LoadP(scratch0,
+ FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
+
+ __ LoadP(scratch1,
+ FieldMemOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
+ __ AssertSmi(scratch1);
+
+ // The builtin may already have been deserialized. If that is the case, it is
+ // stored in the builtins table, and we can copy to correct code object to
+ // both the shared function info and function without calling into runtime.
+ //
+ // Otherwise, we need to call into runtime to deserialize.
+
+ {
+ // Load the code object at builtins_table[builtin_id] into scratch1.
+
+ __ SmiUntag(scratch1);
+ __ mov(scratch0,
+ Operand(ExternalReference::builtins_address(masm->isolate())));
+ __ ShiftLeftImm(scratch1, scratch1, Operand(kPointerSizeLog2));
+ __ LoadPX(scratch1, MemOperand(scratch0, scratch1));
+
+ // Check if the loaded code object has already been deserialized. This is
+ // the case iff it does not equal DeserializeLazy.
+
+ __ Move(scratch0, masm->CodeObject());
+ __ cmp(scratch1, scratch0);
+ __ beq(&deserialize_in_runtime);
+ }
+ {
+ // If we've reached this spot, the target builtin has been deserialized and
+ // we simply need to copy it over. First to the shared function info.
+
+ Register target_builtin = scratch1;
+ Register shared = scratch0;
+
+ __ LoadP(shared,
+ FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
+
+ CHECK(r8 != target && r8 != scratch0 && r8 != scratch1);
+ CHECK(r9 != target && r9 != scratch0 && r9 != scratch1);
+
+ __ StoreP(target_builtin,
+ FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset), r0);
+ __ mr(r9, target_builtin); // Write barrier clobbers r9 below.
+ __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, r9, r8,
+ kLRHasNotBeenSaved, kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // And second to the target function.
+
+ __ StoreP(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset),
+ r0);
+ __ mr(r9, target_builtin); // Write barrier clobbers r9 below.
+ __ RecordWriteField(target, JSFunction::kCodeOffset, r9, r8,
+ kLRHasNotBeenSaved, kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // All copying is done. Jump to the deserialized code object.
+
+ __ addi(target_builtin, target_builtin,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(target_builtin);
+ }
+
+ __ bind(&deserialize_in_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
+}
+
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count (preserved for callee)
@@ -1729,7 +1530,6 @@ void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
__ pop(r3);
}
- __ addi(sp, sp, Operand(kPointerSize)); // Ignore state
__ blr(); // Jump to ContinueToBuiltin stub
}
@@ -1785,52 +1585,16 @@ void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
Generate_ContinueToBuiltinHelper(masm, true, true);
}
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Pass the function and deoptimization type to the runtime system.
- __ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(type)));
- __ push(r3);
__ CallRuntime(Runtime::kNotifyDeoptimized);
}
- // Get the full codegen state from the stack and untag it -> r9.
- __ LoadP(r9, MemOperand(sp, 0 * kPointerSize));
- __ SmiUntag(r9);
- // Switch on the state.
- Label with_tos_register, unknown_state;
- __ cmpi(
- r9,
- Operand(static_cast<intptr_t>(Deoptimizer::BailoutState::NO_REGISTERS)));
- __ bne(&with_tos_register);
- __ addi(sp, sp, Operand(1 * kPointerSize)); // Remove state.
- __ Ret();
-
- __ bind(&with_tos_register);
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r3.code());
- __ LoadP(r3, MemOperand(sp, 1 * kPointerSize));
- __ cmpi(
- r9,
- Operand(static_cast<intptr_t>(Deoptimizer::BailoutState::TOS_REGISTER)));
- __ bne(&unknown_state);
- __ addi(sp, sp, Operand(2 * kPointerSize)); // Remove state.
+ __ LoadP(r3, MemOperand(sp, 0 * kPointerSize));
+ __ addi(sp, sp, Operand(1 * kPointerSize));
__ Ret();
-
- __ bind(&unknown_state);
- __ stop("no cases left");
-}
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index c965805fc7..e9ef390c69 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -4,6 +4,7 @@
#if V8_TARGET_ARCH_S390
+#include "src/assembler-inl.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@@ -147,281 +148,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-// static
-void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : number of arguments
- // -- r3 : constructor function
- // -- cp : context
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // 1. Load the first argument into r2.
- Label no_arguments;
- {
- __ LoadRR(r4, r2); // Store argc in r4.
- __ CmpP(r2, Operand::Zero());
- __ beq(&no_arguments);
- __ SubP(r2, r2, Operand(1));
- __ ShiftLeftP(r2, r2, Operand(kPointerSizeLog2));
- __ LoadP(r2, MemOperand(sp, r2));
- }
-
- // 2a. Convert the first argument to a number.
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r4);
- __ EnterBuiltinFrame(cp, r3, r4);
- __ Call(BUILTIN_CODE(masm->isolate(), ToNumber), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(cp, r3, r4);
- __ SmiUntag(r4);
- }
-
- {
- // Drop all arguments including the receiver.
- __ Drop(r4);
- __ Ret(1);
- }
-
- // 2b. No arguments, return +0.
- __ bind(&no_arguments);
- __ LoadSmiLiteral(r2, Smi::kZero);
- __ Ret(1);
-}
-
-// static
-void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : number of arguments
- // -- r3 : constructor function
- // -- r5 : new target
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
-
- // 2. Load the first argument into r4.
- {
- Label no_arguments, done;
- __ LoadRR(r8, r2); // Store argc in r8.
- __ CmpP(r2, Operand::Zero());
- __ beq(&no_arguments);
- __ SubP(r2, r2, Operand(1));
- __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
- __ LoadP(r4, MemOperand(sp, r4));
- __ b(&done);
- __ bind(&no_arguments);
- __ LoadSmiLiteral(r4, Smi::kZero);
- __ bind(&done);
- }
-
- // 3. Make sure r4 is a number.
- {
- Label done_convert;
- __ JumpIfSmi(r4, &done_convert);
- __ CompareObjectType(r4, r6, r6, HEAP_NUMBER_TYPE);
- __ beq(&done_convert);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r8);
- __ EnterBuiltinFrame(cp, r3, r8);
- __ Push(r5);
- __ LoadRR(r2, r4);
- __ Call(BUILTIN_CODE(masm->isolate(), ToNumber), RelocInfo::CODE_TARGET);
- __ LoadRR(r4, r2);
- __ Pop(r5);
- __ LeaveBuiltinFrame(cp, r3, r8);
- __ SmiUntag(r8);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, new_object;
- __ CmpP(r3, r5);
- __ bne(&new_object);
-
- // 5. Allocate a JSValue wrapper for the number.
- __ AllocateJSValue(r2, r3, r4, r6, r7, &new_object);
- __ b(&drop_frame_and_ret);
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r8);
- __ EnterBuiltinFrame(cp, r3, r8);
- __ Push(r4); // first argument
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ Pop(r4);
- __ LeaveBuiltinFrame(cp, r3, r8);
- __ SmiUntag(r8);
- }
- __ StoreP(r4, FieldMemOperand(r2, JSValue::kValueOffset), r0);
-
- __ bind(&drop_frame_and_ret);
- {
- __ Drop(r8);
- __ Ret(1);
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : number of arguments
- // -- r3 : constructor function
- // -- cp : context
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
- // 1. Load the first argument into r2.
- Label no_arguments;
- {
- __ LoadRR(r4, r2); // Store argc in r4
- __ CmpP(r2, Operand::Zero());
- __ beq(&no_arguments);
- __ SubP(r2, r2, Operand(1));
- __ ShiftLeftP(r2, r2, Operand(kPointerSizeLog2));
- __ LoadP(r2, MemOperand(sp, r2));
- }
-
- // 2a. At least one argument, return r2 if it's a string, otherwise
- // dispatch to appropriate conversion.
- Label drop_frame_and_ret, to_string, symbol_descriptive_string;
- {
- __ JumpIfSmi(r2, &to_string);
- STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
- __ CompareObjectType(r2, r5, r5, FIRST_NONSTRING_TYPE);
- __ bgt(&to_string);
- __ beq(&symbol_descriptive_string);
- __ b(&drop_frame_and_ret);
- }
-
- // 2b. No arguments, return the empty string (and pop the receiver).
- __ bind(&no_arguments);
- {
- __ LoadRoot(r2, Heap::kempty_stringRootIndex);
- __ Ret(1);
- }
-
- // 3a. Convert r2 to a string.
- __ bind(&to_string);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r4);
- __ EnterBuiltinFrame(cp, r3, r4);
- __ Call(BUILTIN_CODE(masm->isolate(), ToString), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(cp, r3, r4);
- __ SmiUntag(r4);
- }
- __ b(&drop_frame_and_ret);
- // 3b. Convert symbol in r2 to a string.
- __ bind(&symbol_descriptive_string);
- {
- __ Drop(r4);
- __ Drop(1);
- __ Push(r2);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- __ Drop(r4);
- __ Ret(1);
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : number of arguments
- // -- r3 : constructor function
- // -- r5 : new target
- // -- cp : context
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
-
- // 2. Load the first argument into r4.
- {
- Label no_arguments, done;
- __ LoadRR(r8, r2); // Store argc in r8.
- __ CmpP(r2, Operand::Zero());
- __ beq(&no_arguments);
- __ SubP(r2, r2, Operand(1));
- __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
- __ LoadP(r4, MemOperand(sp, r4));
- __ b(&done);
- __ bind(&no_arguments);
- __ LoadRoot(r4, Heap::kempty_stringRootIndex);
- __ bind(&done);
- }
-
- // 3. Make sure r4 is a string.
- {
- Label convert, done_convert;
- __ JumpIfSmi(r4, &convert);
- __ CompareObjectType(r4, r6, r6, FIRST_NONSTRING_TYPE);
- __ blt(&done_convert);
- __ bind(&convert);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r8);
- __ EnterBuiltinFrame(cp, r3, r8);
- __ Push(r5);
- __ LoadRR(r2, r4);
- __ Call(BUILTIN_CODE(masm->isolate(), ToString), RelocInfo::CODE_TARGET);
- __ LoadRR(r4, r2);
- __ Pop(r5);
- __ LeaveBuiltinFrame(cp, r3, r8);
- __ SmiUntag(r8);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, new_object;
- __ CmpP(r3, r5);
- __ bne(&new_object);
-
- // 5. Allocate a JSValue wrapper for the string.
- __ AllocateJSValue(r2, r3, r4, r6, r7, &new_object);
- __ b(&drop_frame_and_ret);
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(r8);
- __ EnterBuiltinFrame(cp, r3, r8);
- __ Push(r4); // first argument
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ Pop(r4);
- __ LeaveBuiltinFrame(cp, r3, r8);
- __ SmiUntag(r8);
- }
- __ StoreP(r4, FieldMemOperand(r2, JSValue::kValueOffset), r0);
-
- __ bind(&drop_frame_and_ret);
- {
- __ Drop(r8);
- __ Ret(1);
- }
-}
-
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ LoadP(ip, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
@@ -976,7 +702,6 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
- Register native_context = scratch1;
// Store code entry in the closure.
__ StoreP(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset),
r0);
@@ -985,29 +710,6 @@ static void ReplaceClosureCodeWithOptimizedCode(
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
-
- // Link the closure into the optimized function list.
- // r6 : code entry
- // r9: native context
- // r3 : closure
- __ LoadP(native_context, NativeContextMemOperand());
- __ LoadP(scratch2, ContextMemOperand(native_context,
- Context::OPTIMIZED_FUNCTIONS_LIST));
- __ StoreP(scratch2,
- FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset), r0);
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
- scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ StoreP(
- closure,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
- // Save closure before the write barrier.
- __ LoadRR(scratch2, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, closure,
- scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ LoadRR(closure, scratch2);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
@@ -1020,7 +722,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
// Leave the frame (also dropping the register file).
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ __ LeaveFrame(StackFrame::INTERPRETED);
__ AddP(sp, sp, args_count);
}
@@ -1600,6 +1302,18 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
+void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
+ // Set the code slot inside the JSFunction to the trampoline to the
+ // interpreter entry.
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
+ __ StoreP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ RecordWriteField(r3, JSFunction::kCodeOffset, r4, r6, kLRHasNotBeenSaved,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ // Jump to compile lazy.
+ Generate_CompileLazy(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (preserved for callee)
@@ -1647,6 +1361,95 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
+void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : argument count (preserved for callee)
+ // -- r5 : new target (preserved for callee)
+ // -- r3 : target function (preserved for callee)
+ // -----------------------------------
+
+ Label deserialize_in_runtime;
+
+ Register target = r3; // Must be preserved
+ Register scratch0 = r4;
+ Register scratch1 = r6;
+
+ CHECK(scratch0 != r2 && scratch0 != r5 && scratch0 != r3);
+ CHECK(scratch1 != r2 && scratch1 != r5 && scratch1 != r3);
+ CHECK(scratch0 != scratch1);
+
+ // Load the builtin id for lazy deserialization from SharedFunctionInfo.
+
+ __ AssertFunction(target);
+ __ LoadP(scratch0,
+ FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
+
+ __ LoadP(scratch1,
+ FieldMemOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
+ __ AssertSmi(scratch1);
+
+ // The builtin may already have been deserialized. If that is the case, it is
+ // stored in the builtins table, and we can copy to correct code object to
+ // both the shared function info and function without calling into runtime.
+ //
+ // Otherwise, we need to call into runtime to deserialize.
+
+ {
+ // Load the code object at builtins_table[builtin_id] into scratch1.
+
+ __ SmiUntag(scratch1);
+ __ mov(scratch0,
+ Operand(ExternalReference::builtins_address(masm->isolate())));
+ __ ShiftLeftP(scratch1, scratch1, Operand(kPointerSizeLog2));
+ __ LoadP(scratch1, MemOperand(scratch0, scratch1));
+
+ // Check if the loaded code object has already been deserialized. This is
+ // the case iff it does not equal DeserializeLazy.
+
+ __ Move(scratch0, masm->CodeObject());
+ __ CmpP(scratch1, scratch0);
+ __ beq(&deserialize_in_runtime);
+ }
+ {
+ // If we've reached this spot, the target builtin has been deserialized and
+ // we simply need to copy it over. First to the shared function info.
+
+ Register target_builtin = scratch1;
+ Register shared = scratch0;
+
+ __ LoadP(shared,
+ FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
+
+ CHECK(r7 != target && r7 != scratch0 && r7 != scratch1);
+ CHECK(r8 != target && r8 != scratch0 && r8 != scratch1);
+
+ __ StoreP(target_builtin,
+ FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset));
+ __ LoadRR(r8, target_builtin); // Write barrier clobbers r9 below.
+ __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, r8, r7,
+ kLRHasNotBeenSaved, kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // And second to the target function.
+
+ __ StoreP(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
+ __ LoadRR(r8, target_builtin); // Write barrier clobbers r9 below.
+ __ RecordWriteField(target, JSFunction::kCodeOffset, r8, r7,
+ kLRHasNotBeenSaved, kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // All copying is done. Jump to the deserialized code object.
+
+ __ AddP(target_builtin, target_builtin,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(target_builtin);
+ }
+
+ __ bind(&deserialize_in_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
+}
+
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (preserved for callee)
@@ -1722,7 +1525,6 @@ void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
__ pop(r2);
}
- __ AddP(sp, sp, Operand(kPointerSize)); // Ignore state
__ Ret(); // Jump to ContinueToBuiltin stub
}
@@ -1778,52 +1580,15 @@ void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
Generate_ContinueToBuiltinHelper(masm, true, true);
}
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass the function and deoptimization type to the runtime system.
- __ LoadSmiLiteral(r2, Smi::FromInt(static_cast<int>(type)));
- __ push(r2);
__ CallRuntime(Runtime::kNotifyDeoptimized);
}
- // Get the full codegen state from the stack and untag it -> r8.
- __ LoadP(r8, MemOperand(sp, 0 * kPointerSize));
- __ SmiUntag(r8);
- // Switch on the state.
- Label with_tos_register, unknown_state;
- __ CmpP(
- r8,
- Operand(static_cast<intptr_t>(Deoptimizer::BailoutState::NO_REGISTERS)));
- __ bne(&with_tos_register);
- __ la(sp, MemOperand(sp, 1 * kPointerSize)); // Remove state.
- __ Ret();
-
- __ bind(&with_tos_register);
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r2.code());
- __ LoadP(r2, MemOperand(sp, 1 * kPointerSize));
- __ CmpP(
- r8,
- Operand(static_cast<intptr_t>(Deoptimizer::BailoutState::TOS_REGISTER)));
- __ bne(&unknown_state);
- __ la(sp, MemOperand(sp, 2 * kPointerSize)); // Remove state.
+ __ pop(r2);
__ Ret();
-
- __ bind(&unknown_state);
- __ stop("no cases left");
-}
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index ab3a84fb01..b21e3f5b99 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -4,11 +4,15 @@
#include "src/setup-isolate.h"
+#include "src/assembler-inl.h"
#include "src/builtins/builtins.h"
#include "src/code-events.h"
#include "src/compiler/code-assembler.h"
+#include "src/handles-inl.h"
#include "src/interface-descriptors.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects/shared-function-info.h"
namespace v8 {
namespace internal {
@@ -17,6 +21,7 @@ namespace internal {
#define FORWARD_DECLARE(Name) \
Object* Builtin_##Name(int argc, Object** args, Isolate* isolate);
BUILTIN_LIST_C(FORWARD_DECLARE)
+#undef FORWARD_DECLARE
namespace {
void PostBuildProfileAndTracing(Isolate* isolate, Code* code,
@@ -37,8 +42,6 @@ void PostBuildProfileAndTracing(Isolate* isolate, Code* code,
typedef void (*MacroAssemblerGenerator)(MacroAssembler*);
typedef void (*CodeAssemblerGenerator)(compiler::CodeAssemblerState*);
-static const ExtraICState kPlaceholderState = 1;
-
Handle<Code> BuildPlaceholder(Isolate* isolate) {
HandleScope scope(isolate);
const size_t buffer_size = 1 * KB;
@@ -51,16 +54,14 @@ Handle<Code> BuildPlaceholder(Isolate* isolate) {
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
- const Code::Flags kPlaceholderFlags =
- Code::ComputeFlags(Code::BUILTIN, kPlaceholderState);
Handle<Code> code =
- isolate->factory()->NewCode(desc, kPlaceholderFlags, masm.CodeObject());
+ isolate->factory()->NewCode(desc, Code::BUILTIN, masm.CodeObject());
return scope.CloseAndEscape(code);
}
Code* BuildWithMacroAssembler(Isolate* isolate,
MacroAssemblerGenerator generator,
- Code::Flags flags, const char* s_name) {
+ const char* s_name) {
HandleScope scope(isolate);
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
@@ -73,14 +74,13 @@ Code* BuildWithMacroAssembler(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- isolate->factory()->NewCode(desc, flags, masm.CodeObject());
+ isolate->factory()->NewCode(desc, Code::BUILTIN, masm.CodeObject());
PostBuildProfileAndTracing(isolate, *code, s_name);
return *code;
}
Code* BuildAdaptor(Isolate* isolate, Address builtin_address,
- Builtins::ExitFrameType exit_frame_type, Code::Flags flags,
- const char* name) {
+ Builtins::ExitFrameType exit_frame_type, const char* name) {
HandleScope scope(isolate);
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
@@ -93,7 +93,7 @@ Code* BuildAdaptor(Isolate* isolate, Address builtin_address,
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- isolate->factory()->NewCode(desc, flags, masm.CodeObject());
+ isolate->factory()->NewCode(desc, Code::BUILTIN, masm.CodeObject());
PostBuildProfileAndTracing(isolate, *code, name);
return *code;
}
@@ -101,7 +101,7 @@ Code* BuildAdaptor(Isolate* isolate, Address builtin_address,
// Builder for builtins implemented in TurboFan with JS linkage.
Code* BuildWithCodeStubAssemblerJS(Isolate* isolate,
CodeAssemblerGenerator generator, int argc,
- Code::Flags flags, const char* name) {
+ const char* name) {
HandleScope scope(isolate);
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
@@ -109,8 +109,8 @@ Code* BuildWithCodeStubAssemblerJS(Isolate* isolate,
Zone zone(isolate->allocator(), ZONE_NAME);
const int argc_with_recv =
(argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
- compiler::CodeAssemblerState state(isolate, &zone, argc_with_recv, flags,
- name);
+ compiler::CodeAssemblerState state(isolate, &zone, argc_with_recv,
+ Code::BUILTIN, name);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
PostBuildProfileAndTracing(isolate, *code, name);
@@ -121,8 +121,7 @@ Code* BuildWithCodeStubAssemblerJS(Isolate* isolate,
Code* BuildWithCodeStubAssemblerCS(Isolate* isolate,
CodeAssemblerGenerator generator,
CallDescriptors::Key interface_descriptor,
- Code::Flags flags, const char* name,
- int result_size) {
+ const char* name, int result_size) {
HandleScope scope(isolate);
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
@@ -133,8 +132,8 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate,
CallInterfaceDescriptor descriptor(isolate, interface_descriptor);
// Ensure descriptor is already initialized.
DCHECK_LE(0, descriptor.GetRegisterParameterCount());
- compiler::CodeAssemblerState state(isolate, &zone, descriptor, flags, name,
- result_size);
+ compiler::CodeAssemblerState state(isolate, &zone, descriptor, Code::BUILTIN,
+ name, result_size);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
PostBuildProfileAndTracing(isolate, *code, name);
@@ -167,8 +166,6 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
DisallowHeapAllocation no_gc;
static const int kRelocMask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- const Code::Flags kPlaceholderFlags =
- Code::ComputeFlags(Code::BUILTIN, kPlaceholderState);
HeapIterator iterator(isolate->heap());
while (HeapObject* obj = iterator.next()) {
if (!obj->IsCode()) continue;
@@ -178,7 +175,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
RelocInfo* rinfo = it.rinfo();
if (RelocInfo::IsCodeTarget(rinfo->rmode())) {
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- if (target->flags() != kPlaceholderFlags) continue;
+ if (!target->is_builtin()) continue;
Code* new_target =
Code::cast(builtins->builtins_[target->builtin_index()]);
rinfo->set_target_address(isolate, new_target->instruction_start(),
@@ -188,7 +185,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
Object* object = rinfo->target_object();
if (!object->IsCode()) continue;
Code* target = Code::cast(object);
- if (target->flags() != kPlaceholderFlags) continue;
+ if (!target->is_builtin()) continue;
Code* new_target =
Code::cast(builtins->builtins_[target->builtin_index()]);
rinfo->set_target_object(new_target, UPDATE_WRITE_BARRIER,
@@ -201,16 +198,6 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
code->instruction_size());
}
}
-#ifdef DEBUG
- // Verify that references to all placeholder builtins have been replaced.
- // Skip this check for non-snapshot builds.
- if (isolate->serializer_enabled()) {
- HeapIterator iterator(isolate->heap(), HeapIterator::kFilterUnreachable);
- while (HeapObject* obj = iterator.next()) {
- if (obj->IsCode()) CHECK_NE(kPlaceholderFlags, Code::cast(obj)->flags());
- }
- }
-#endif
}
void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
@@ -223,43 +210,39 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
HandleScope scope(isolate);
int index = 0;
- const Code::Flags kBuiltinFlags = Code::ComputeFlags(Code::BUILTIN);
Code* code;
-#define BUILD_CPP(Name) \
- code = BuildAdaptor(isolate, FUNCTION_ADDR(Builtin_##Name), \
- Builtins::BUILTIN_EXIT, kBuiltinFlags, #Name); \
+#define BUILD_CPP(Name) \
+ code = BuildAdaptor(isolate, FUNCTION_ADDR(Builtin_##Name), \
+ Builtins::BUILTIN_EXIT, #Name); \
AddBuiltin(builtins, index++, code);
#define BUILD_API(Name) \
code = BuildAdaptor(isolate, FUNCTION_ADDR(Builtin_##Name), Builtins::EXIT, \
- kBuiltinFlags, #Name); \
+ #Name); \
AddBuiltin(builtins, index++, code);
#define BUILD_TFJ(Name, Argc, ...) \
code = BuildWithCodeStubAssemblerJS(isolate, &Builtins::Generate_##Name, \
- Argc, kBuiltinFlags, #Name); \
+ Argc, #Name); \
AddBuiltin(builtins, index++, code);
#define BUILD_TFC(Name, InterfaceDescriptor, result_size) \
{ InterfaceDescriptor##Descriptor descriptor(isolate); } \
code = BuildWithCodeStubAssemblerCS(isolate, &Builtins::Generate_##Name, \
CallDescriptors::InterfaceDescriptor, \
- kBuiltinFlags, #Name, result_size); \
+ #Name, result_size); \
AddBuiltin(builtins, index++, code);
-#define BUILD_TFS(Name, ...) \
- /* Return size for generic TF builtins (stub linkage) is always 1. */ \
+#define BUILD_TFS(Name, ...) \
+ /* Return size for generic TF builtins (stub linkage) is always 1. */ \
+ code = BuildWithCodeStubAssemblerCS(isolate, &Builtins::Generate_##Name, \
+ CallDescriptors::Name, #Name, 1); \
+ AddBuiltin(builtins, index++, code);
+#define BUILD_TFH(Name, InterfaceDescriptor) \
+ { InterfaceDescriptor##Descriptor descriptor(isolate); } \
+ /* Return size for IC builtins/handlers is always 1. */ \
code = BuildWithCodeStubAssemblerCS(isolate, &Builtins::Generate_##Name, \
- CallDescriptors::Name, kBuiltinFlags, \
+ CallDescriptors::InterfaceDescriptor, \
#Name, 1); \
AddBuiltin(builtins, index++, code);
-#define BUILD_TFH(Name, Kind, Extra, InterfaceDescriptor) \
- { InterfaceDescriptor##Descriptor descriptor(isolate); } \
- /* Return size for IC builtins/handlers is always 1. */ \
- code = BuildWithCodeStubAssemblerCS(isolate, &Builtins::Generate_##Name, \
- CallDescriptors::InterfaceDescriptor, \
- Code::ComputeFlags(Code::Kind, Extra), \
- #Name, 1); \
- AddBuiltin(builtins, index++, code);
-#define BUILD_ASM(Name) \
- code = BuildWithMacroAssembler(isolate, Builtins::Generate_##Name, \
- kBuiltinFlags, #Name); \
+#define BUILD_ASM(Name) \
+ code = BuildWithMacroAssembler(isolate, Builtins::Generate_##Name, #Name); \
AddBuiltin(builtins, index++, code);
BUILTIN_LIST(BUILD_CPP, BUILD_API, BUILD_TFJ, BUILD_TFC, BUILD_TFS, BUILD_TFH,
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 981bb65fd1..713475cd34 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -11,6 +11,7 @@
#include "src/frame-constants.h"
#include "src/frames.h"
#include "src/objects-inl.h"
+#include "src/objects/debug-objects.h"
namespace v8 {
namespace internal {
@@ -560,8 +561,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// invocation.
}
- // TODO(X64): Is argument correct? Is there a receiver to remove?
- __ ret(1 * kPointerSize); // Remove receiver.
+ __ ret(0);
}
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
@@ -693,34 +693,17 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ jmp(&stepping_prepared);
}
+// TODO(juliana): if we remove the code below then we don't need all
+// the parameters.
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
- Register native_context = scratch1;
// Store the optimized code in the closure.
__ movp(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code);
__ movp(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Link the closure into the optimized function list.
- __ movp(native_context, NativeContextOperand());
- __ movp(scratch3,
- ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ movp(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), scratch3);
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch3,
- scratch2, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ movp(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
- closure);
- // Save closure before the write barrier.
- __ movp(scratch3, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, closure,
- scratch2, kDontSaveFPRegs);
- __ movp(closure, scratch3);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -1330,6 +1313,21 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
+// TODO(jupvfranco): investigate whether there is any case where the CompileLazy
+// builtin does not set the code field in the JS function. If there isn't then
+// we do not need this builtin and can jump directly to CompileLazy.
+void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
+ // Set the code slot inside the JSFunction to the trampoline to the
+ // interpreter entry.
+ __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
+ __ movq(FieldOperand(rdi, JSFunction::kCodeOffset), rcx);
+ __ RecordWriteField(rdi, JSFunction::kCodeOffset, rcx, r15, kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ // Jump to compile lazy.
+ Generate_CompileLazy(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
@@ -1374,6 +1372,91 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
+void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argument count (preserved for callee)
+ // -- rdx : new target (preserved for callee)
+ // -- rdi : target function (preserved for callee)
+ // -----------------------------------
+
+ Label deserialize_in_runtime;
+
+ Register target = rdi; // Must be preserved
+ Register scratch0 = rbx;
+ Register scratch1 = r12;
+
+ CHECK(scratch0 != rax && scratch0 != rdx && scratch0 != rdi);
+ CHECK(scratch1 != rax && scratch1 != rdx && scratch1 != rdi);
+ CHECK(scratch0 != scratch1);
+
+ // Load the builtin id for lazy deserialization from SharedFunctionInfo.
+
+ __ AssertFunction(target);
+ __ movp(scratch0,
+ FieldOperand(target, JSFunction::kSharedFunctionInfoOffset));
+
+ __ movp(scratch1,
+ FieldOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
+ __ AssertSmi(scratch1);
+
+ // The builtin may already have been deserialized. If that is the case, it is
+ // stored in the builtins table, and we can copy to correct code object to
+ // both the shared function info and function without calling into runtime.
+ //
+ // Otherwise, we need to call into runtime to deserialize.
+
+ {
+ // Load the code object at builtins_table[builtin_id] into scratch1.
+
+ __ SmiToInteger32(scratch1, scratch1);
+ __ Move(scratch0, ExternalReference::builtins_address(masm->isolate()));
+ __ movp(scratch1, Operand(scratch0, scratch1, times_pointer_size, 0));
+
+ // Check if the loaded code object has already been deserialized. This is
+ // the case iff it does not equal DeserializeLazy.
+
+ __ Move(scratch0, masm->CodeObject());
+ __ cmpp(scratch1, scratch0);
+ __ j(equal, &deserialize_in_runtime);
+ }
+
+ {
+ // If we've reached this spot, the target builtin has been deserialized and
+ // we simply need to copy it over. First to the shared function info.
+
+ Register target_builtin = scratch1;
+ Register shared = scratch0;
+
+ __ movp(shared,
+ FieldOperand(target, JSFunction::kSharedFunctionInfoOffset));
+
+ CHECK(r14 != target && r14 != scratch0 && r14 != scratch1);
+ CHECK(r15 != target && r15 != scratch0 && r15 != scratch1);
+
+ __ movp(FieldOperand(shared, SharedFunctionInfo::kCodeOffset),
+ target_builtin);
+ __ movp(r14, target_builtin); // Write barrier clobbers r14 below.
+ __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, r14, r15,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // And second to the target function.
+
+ __ movp(FieldOperand(target, JSFunction::kCodeOffset), target_builtin);
+ __ movp(r14, target_builtin); // Write barrier clobbers r14 below.
+ __ RecordWriteField(target, JSFunction::kCodeOffset, r14, r15,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // All copying is done. Jump to the deserialized code object.
+
+ __ leap(target_builtin, FieldOperand(target_builtin, Code::kHeaderSize));
+ __ jmp(target_builtin);
+ }
+
+ __ bind(&deserialize_in_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
+}
+
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
@@ -1457,7 +1540,6 @@ void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
// Tear down internal frame.
}
- __ DropUnderReturnAddress(1); // Ignore state offset
__ ret(0); // Return to ContinueToBuiltin stub still on stack.
}
@@ -1512,51 +1594,17 @@ void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
Generate_ContinueToBuiltinHelper(masm, true, true);
}
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass the deoptimization type to the runtime system.
- __ Push(Smi::FromInt(static_cast<int>(type)));
-
__ CallRuntime(Runtime::kNotifyDeoptimized);
// Tear down internal frame.
}
- // Get the full codegen state from the stack and untag it.
- __ SmiToInteger32(kScratchRegister, Operand(rsp, kPCOnStackSize));
-
- // Switch on the state.
- Label not_no_registers, not_tos_rax;
- __ cmpp(kScratchRegister,
- Immediate(static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS)));
- __ j(not_equal, &not_no_registers, Label::kNear);
- __ ret(1 * kPointerSize); // Remove state.
-
- __ bind(&not_no_registers);
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), rax.code());
- __ movp(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
- __ cmpp(kScratchRegister,
- Immediate(static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER)));
- __ j(not_equal, &not_tos_rax, Label::kNear);
- __ ret(2 * kPointerSize); // Remove state, rax.
-
- __ bind(&not_tos_rax);
- __ Abort(kNoCasesLeft);
-}
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+ __ movp(rax, Operand(rsp, kPCOnStackSize));
+ __ ret(1 * kPointerSize); // Remove rax.
}
// static
@@ -1843,288 +1891,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-// static
-void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : number of arguments
- // -- rdi : constructor function
- // -- rsi : context
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // 1. Load the first argument into rbx.
- Label no_arguments;
- {
- StackArgumentsAccessor args(rsp, rax);
- __ testp(rax, rax);
- __ j(zero, &no_arguments, Label::kNear);
- __ movp(rbx, args.GetArgumentOperand(1));
- }
-
- // 2a. Convert the first argument to a number.
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Integer32ToSmi(rax, rax);
- __ EnterBuiltinFrame(rsi, rdi, rax);
- __ movp(rax, rbx);
- __ Call(BUILTIN_CODE(masm->isolate(), ToNumber), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(rsi, rdi, rbx); // Argc popped to rbx.
- __ SmiToInteger32(rbx, rbx);
- }
-
- {
- // Drop all arguments including the receiver.
- __ PopReturnAddressTo(rcx);
- __ leap(rsp, Operand(rsp, rbx, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(rcx);
- __ Ret();
- }
-
- // 2b. No arguments, return +0 (already in rax).
- __ bind(&no_arguments);
- __ ret(1 * kPointerSize);
-}
-
-// static
-void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : number of arguments
- // -- rdi : constructor function
- // -- rdx : new target
- // -- rsi : context
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Store argc in r8.
- __ Integer32ToSmi(r8, rax);
-
- // 2. Load the first argument into rbx.
- {
- StackArgumentsAccessor args(rsp, rax);
- Label no_arguments, done;
- __ testp(rax, rax);
- __ j(zero, &no_arguments, Label::kNear);
- __ movp(rbx, args.GetArgumentOperand(1));
- __ jmp(&done, Label::kNear);
- __ bind(&no_arguments);
- __ Move(rbx, Smi::kZero);
- __ bind(&done);
- }
-
- // 3. Make sure rbx is a number.
- {
- Label done_convert;
- __ JumpIfSmi(rbx, &done_convert);
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(equal, &done_convert);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterBuiltinFrame(rsi, rdi, r8);
- __ Push(rdx);
- __ Move(rax, rbx);
- __ Call(BUILTIN_CODE(masm->isolate(), ToNumber), RelocInfo::CODE_TARGET);
- __ Move(rbx, rax);
- __ Pop(rdx);
- __ LeaveBuiltinFrame(rsi, rdi, r8);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, new_object;
- __ cmpp(rdx, rdi);
- __ j(not_equal, &new_object);
-
- // 5. Allocate a JSValue wrapper for the number.
- __ AllocateJSValue(rax, rdi, rbx, rcx, &new_object);
- __ jmp(&drop_frame_and_ret, Label::kNear);
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterBuiltinFrame(rsi, rdi, r8);
- __ Push(rbx); // the first argument
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ Pop(FieldOperand(rax, JSValue::kValueOffset));
- __ LeaveBuiltinFrame(rsi, rdi, r8);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- // Drop all arguments including the receiver.
- __ PopReturnAddressTo(rcx);
- __ SmiToInteger32(r8, r8);
- __ leap(rsp, Operand(rsp, r8, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(rcx);
- __ Ret();
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : number of arguments
- // -- rdi : constructor function
- // -- rsi : context
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // 1. Load the first argument into rax.
- Label no_arguments;
- {
- StackArgumentsAccessor args(rsp, rax);
- __ Integer32ToSmi(r8, rax); // Store argc in r8.
- __ testp(rax, rax);
- __ j(zero, &no_arguments, Label::kNear);
- __ movp(rax, args.GetArgumentOperand(1));
- }
-
- // 2a. At least one argument, return rax if it's a string, otherwise
- // dispatch to appropriate conversion.
- Label drop_frame_and_ret, to_string, symbol_descriptive_string;
- {
- __ JumpIfSmi(rax, &to_string, Label::kNear);
- STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
- __ j(above, &to_string, Label::kNear);
- __ j(equal, &symbol_descriptive_string, Label::kNear);
- __ jmp(&drop_frame_and_ret, Label::kNear);
- }
-
- // 2b. No arguments, return the empty string (and pop the receiver).
- __ bind(&no_arguments);
- {
- __ LoadRoot(rax, Heap::kempty_stringRootIndex);
- __ ret(1 * kPointerSize);
- }
-
- // 3a. Convert rax to a string.
- __ bind(&to_string);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterBuiltinFrame(rsi, rdi, r8);
- __ Call(BUILTIN_CODE(masm->isolate(), ToString), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(rsi, rdi, r8);
- }
- __ jmp(&drop_frame_and_ret, Label::kNear);
-
- // 3b. Convert symbol in rax to a string.
- __ bind(&symbol_descriptive_string);
- {
- __ PopReturnAddressTo(rcx);
- __ SmiToInteger32(r8, r8);
- __ leap(rsp, Operand(rsp, r8, times_pointer_size, kPointerSize));
- __ Push(rax);
- __ PushReturnAddressFrom(rcx);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- // Drop all arguments including the receiver.
- __ PopReturnAddressTo(rcx);
- __ SmiToInteger32(r8, r8);
- __ leap(rsp, Operand(rsp, r8, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(rcx);
- __ Ret();
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : number of arguments
- // -- rdi : constructor function
- // -- rdx : new target
- // -- rsi : context
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Store argc in r8.
- __ Integer32ToSmi(r8, rax);
-
- // 2. Load the first argument into rbx.
- {
- StackArgumentsAccessor args(rsp, rax);
- Label no_arguments, done;
- __ testp(rax, rax);
- __ j(zero, &no_arguments, Label::kNear);
- __ movp(rbx, args.GetArgumentOperand(1));
- __ jmp(&done, Label::kNear);
- __ bind(&no_arguments);
- __ LoadRoot(rbx, Heap::kempty_stringRootIndex);
- __ bind(&done);
- }
-
- // 3. Make sure rbx is a string.
- {
- Label convert, done_convert;
- __ JumpIfSmi(rbx, &convert, Label::kNear);
- __ CmpObjectType(rbx, FIRST_NONSTRING_TYPE, rcx);
- __ j(below, &done_convert);
- __ bind(&convert);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterBuiltinFrame(rsi, rdi, r8);
- __ Push(rdx);
- __ Move(rax, rbx);
- __ Call(BUILTIN_CODE(masm->isolate(), ToString), RelocInfo::CODE_TARGET);
- __ Move(rbx, rax);
- __ Pop(rdx);
- __ LeaveBuiltinFrame(rsi, rdi, r8);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, new_object;
- __ cmpp(rdx, rdi);
- __ j(not_equal, &new_object);
-
- // 5. Allocate a JSValue wrapper for the string.
- __ AllocateJSValue(rax, rdi, rbx, rcx, &new_object);
- __ jmp(&drop_frame_and_ret, Label::kNear);
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterBuiltinFrame(rsi, rdi, r8);
- __ Push(rbx); // the first argument
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ Pop(FieldOperand(rax, JSValue::kValueOffset));
- __ LeaveBuiltinFrame(rsi, rdi, r8);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- // Drop all arguments including the receiver.
- __ PopReturnAddressTo(rcx);
- __ SmiToInteger32(r8, r8);
- __ leap(rsp, Operand(rsp, r8, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(rcx);
- __ Ret();
- }
-}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ pushq(rbp);
__ movp(rbp, rsp);
diff --git a/deps/v8/src/char-predicates.h b/deps/v8/src/char-predicates.h
index 88208d04f6..c1107e6568 100644
--- a/deps/v8/src/char-predicates.h
+++ b/deps/v8/src/char-predicates.h
@@ -80,7 +80,7 @@ struct WhiteSpace {
// as well as \u0009 - \u000d and \ufeff.
struct WhiteSpaceOrLineTerminator {
static inline bool Is(uc32 c) {
- return WhiteSpace::Is(c) || unibrow::LineTerminator::Is(c);
+ return WhiteSpace::Is(c) || unibrow::IsLineTerminator(c);
}
};
diff --git a/deps/v8/src/code-events.h b/deps/v8/src/code-events.h
index ca92e2b5e4..122d907881 100644
--- a/deps/v8/src/code-events.h
+++ b/deps/v8/src/code-events.h
@@ -27,46 +27,15 @@ class String;
V(SHARED_FUNC_MOVE_EVENT, "sfi-move") \
V(SNAPSHOT_CODE_NAME_EVENT, "snapshot-code-name") \
V(TICK_EVENT, "tick") \
- V(REPEAT_META_EVENT, "repeat") \
V(BUILTIN_TAG, "Builtin") \
- V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak") \
- V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn") \
- V(CALL_INITIALIZE_TAG, "CallInitialize") \
- V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic") \
- V(CALL_MISS_TAG, "CallMiss") \
- V(CALL_NORMAL_TAG, "CallNormal") \
- V(LOAD_INITIALIZE_TAG, "LoadInitialize") \
- V(LOAD_MEGAMORPHIC_TAG, "LoadMegamorphic") \
- V(STORE_INITIALIZE_TAG, "StoreInitialize") \
- V(STORE_GENERIC_TAG, "StoreGeneric") \
- V(STORE_MEGAMORPHIC_TAG, "StoreMegamorphic") \
- V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak") \
- V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, "KeyedCallDebugPrepareStepIn") \
- V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize") \
- V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic") \
- V(KEYED_CALL_MISS_TAG, "KeyedCallMiss") \
- V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal") \
V(CALLBACK_TAG, "Callback") \
V(EVAL_TAG, "Eval") \
V(FUNCTION_TAG, "Function") \
V(HANDLER_TAG, "Handler") \
V(BYTECODE_HANDLER_TAG, "BytecodeHandler") \
- V(KEYED_LOAD_IC_TAG, "KeyedLoadIC") \
- V(KEYED_LOAD_POLYMORPHIC_IC_TAG, "KeyedLoadPolymorphicIC") \
- V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC") \
- V(KEYED_STORE_IC_TAG, "KeyedStoreIC") \
- V(KEYED_STORE_POLYMORPHIC_IC_TAG, "KeyedStorePolymorphicIC") \
- V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC") \
V(LAZY_COMPILE_TAG, "LazyCompile") \
- V(CALL_IC_TAG, "CallIC") \
- V(LOAD_IC_TAG, "LoadIC") \
- V(LOAD_GLOBAL_IC_TAG, "LoadGlobalIC") \
- V(LOAD_POLYMORPHIC_IC_TAG, "LoadPolymorphicIC") \
V(REG_EXP_TAG, "RegExp") \
V(SCRIPT_TAG, "Script") \
- V(STORE_IC_TAG, "StoreIC") \
- V(STORE_GLOBAL_IC_TAG, "StoreGlobalIC") \
- V(STORE_POLYMORPHIC_IC_TAG, "StorePolymorphicIC") \
V(STUB_TAG, "Stub") \
V(NATIVE_FUNCTION_TAG, "Function") \
V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile") \
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index 9f6207cd98..dfb6bda9e1 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -63,42 +63,17 @@ Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate,
LoadGlobalWithVectorDescriptor(isolate));
}
-// static
-Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
- return Callable(language_mode == STRICT
- ? BUILTIN_CODE(isolate, StoreICStrictTrampoline)
- : BUILTIN_CODE(isolate, StoreICTrampoline),
- StoreDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::StoreICInOptimizedCode(Isolate* isolate,
- LanguageMode language_mode) {
- return Callable(language_mode == STRICT ? BUILTIN_CODE(isolate, StoreICStrict)
- : BUILTIN_CODE(isolate, StoreIC),
- StoreWithVectorDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::StoreIC_Uninitialized(Isolate* isolate,
- LanguageMode language_mode) {
- return Callable(language_mode == STRICT
- ? BUILTIN_CODE(isolate, StoreICStrict_Uninitialized)
- : BUILTIN_CODE(isolate, StoreIC_Uninitialized),
- StoreWithVectorDescriptor(isolate));
-}
-
Callable CodeFactory::StoreOwnIC(Isolate* isolate) {
// TODO(ishell): Currently we use StoreOwnIC only for storing properties that
// already exist in the boilerplate therefore we can use StoreIC.
- return Callable(BUILTIN_CODE(isolate, StoreICStrictTrampoline),
+ return Callable(BUILTIN_CODE(isolate, StoreICTrampoline),
StoreDescriptor(isolate));
}
Callable CodeFactory::StoreOwnICInOptimizedCode(Isolate* isolate) {
// TODO(ishell): Currently we use StoreOwnIC only for storing properties that
// already exist in the boilerplate therefore we can use StoreIC.
- return Callable(BUILTIN_CODE(isolate, StoreICStrict),
+ return Callable(BUILTIN_CODE(isolate, StoreIC),
StoreWithVectorDescriptor(isolate));
}
@@ -106,9 +81,7 @@ Callable CodeFactory::StoreOwnICInOptimizedCode(Isolate* isolate) {
Callable CodeFactory::StoreGlobalIC(Isolate* isolate,
LanguageMode language_mode) {
// TODO(ishell): Use StoreGlobalIC[Strict]Trampoline when it's ready.
- return Callable(language_mode == STRICT
- ? BUILTIN_CODE(isolate, StoreICStrictTrampoline)
- : BUILTIN_CODE(isolate, StoreICTrampoline),
+ return Callable(BUILTIN_CODE(isolate, StoreICTrampoline),
StoreDescriptor(isolate));
}
@@ -116,35 +89,7 @@ Callable CodeFactory::StoreGlobalIC(Isolate* isolate,
Callable CodeFactory::StoreGlobalICInOptimizedCode(Isolate* isolate,
LanguageMode language_mode) {
// TODO(ishell): Use StoreGlobalIC[Strict] when it's ready.
- return Callable(language_mode == STRICT ? BUILTIN_CODE(isolate, StoreICStrict)
- : BUILTIN_CODE(isolate, StoreIC),
- StoreWithVectorDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
- LanguageMode language_mode) {
- return Callable(language_mode == STRICT
- ? BUILTIN_CODE(isolate, KeyedStoreICStrictTrampoline)
- : BUILTIN_CODE(isolate, KeyedStoreICTrampoline),
- StoreDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::KeyedStoreICInOptimizedCode(Isolate* isolate,
- LanguageMode language_mode) {
- return Callable(language_mode == STRICT
- ? BUILTIN_CODE(isolate, KeyedStoreICStrict)
- : BUILTIN_CODE(isolate, KeyedStoreIC),
- StoreWithVectorDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::KeyedStoreIC_Megamorphic(Isolate* isolate,
- LanguageMode language_mode) {
- return Callable(language_mode == STRICT
- ? BUILTIN_CODE(isolate, KeyedStoreIC_Megamorphic_Strict)
- : BUILTIN_CODE(isolate, KeyedStoreIC_Megamorphic),
+ return Callable(BUILTIN_CODE(isolate, StoreIC),
StoreWithVectorDescriptor(isolate));
}
@@ -258,13 +203,6 @@ Callable CodeFactory::HandleDebuggerStatement(Isolate* isolate) {
}
// static
-Callable CodeFactory::FastCloneShallowArray(
- Isolate* isolate, AllocationSiteMode allocation_mode) {
- return Callable(isolate->builtins()->NewCloneShallowArray(allocation_mode),
- FastCloneShallowArrayDescriptor(isolate));
-}
-
-// static
Callable CodeFactory::FastNewFunctionContext(Isolate* isolate,
ScopeType scope_type) {
return Callable(isolate->builtins()->NewFunctionContext(scope_type),
@@ -272,12 +210,6 @@ Callable CodeFactory::FastNewFunctionContext(Isolate* isolate,
}
// static
-Callable CodeFactory::AllocateHeapNumber(Isolate* isolate) {
- AllocateHeapNumberStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, ArgumentsAdaptorTrampoline),
ArgumentAdaptorDescriptor(isolate));
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index 9047785539..1719cb549f 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -32,15 +32,8 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable StoreGlobalIC(Isolate* isolate, LanguageMode mode);
static Callable StoreGlobalICInOptimizedCode(Isolate* isolate,
LanguageMode mode);
- static Callable StoreIC(Isolate* isolate, LanguageMode mode);
- static Callable StoreICInOptimizedCode(Isolate* isolate, LanguageMode mode);
- static Callable StoreIC_Uninitialized(Isolate* isolate, LanguageMode mode);
static Callable StoreOwnIC(Isolate* isolate);
static Callable StoreOwnICInOptimizedCode(Isolate* isolate);
- static Callable KeyedStoreIC(Isolate* isolate, LanguageMode mode);
- static Callable KeyedStoreICInOptimizedCode(Isolate* isolate,
- LanguageMode mode);
- static Callable KeyedStoreIC_Megamorphic(Isolate* isolate, LanguageMode mode);
static Callable ResumeGenerator(Isolate* isolate);
@@ -67,14 +60,9 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable StringCompare(Isolate* isolate, Token::Value token);
static Callable SubString(Isolate* isolate);
- static Callable FastCloneShallowArray(Isolate* isolate,
- AllocationSiteMode allocation_mode);
-
static Callable FastNewFunctionContext(Isolate* isolate,
ScopeType scope_type);
- static Callable AllocateHeapNumber(Isolate* isolate);
-
static Callable ArgumentAdaptor(Isolate* isolate);
static Callable Call(Isolate* isolate,
ConvertReceiverMode mode = ConvertReceiverMode::kAny);
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index 35261955db..0c64d011d4 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -227,6 +227,21 @@ bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test) {
return false;
}
+bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue(Node* maybe_constant,
+ int* value) {
+ int32_t int32_constant;
+ if (ToInt32Constant(maybe_constant, int32_constant)) {
+ *value = int32_constant;
+ return true;
+ }
+ Smi* smi_constant;
+ if (ToSmiConstant(maybe_constant, smi_constant)) {
+ *value = Smi::ToInt(smi_constant);
+ return true;
+ }
+ return false;
+}
+
Node* CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32(Node* value) {
Comment("IntPtrRoundUpToPowerOfTwo32");
CSA_ASSERT(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u)));
@@ -489,6 +504,10 @@ TNode<Smi> CodeStubAssembler::SmiTag(SloppyTNode<IntPtrT> value) {
}
TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
+ intptr_t constant_value;
+ if (ToIntPtrConstant(value, constant_value)) {
+ return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize));
+ }
return UncheckedCast<IntPtrT>(
WordSar(BitcastTaggedToWord(value), SmiShiftBitsConstant()));
}
@@ -510,6 +529,44 @@ TNode<Smi> CodeStubAssembler::SmiMin(SloppyTNode<Smi> a, SloppyTNode<Smi> b) {
return SelectTaggedConstant(SmiLessThan(a, b), a, b);
}
+TNode<Object> CodeStubAssembler::NumberMax(SloppyTNode<Object> a,
+ SloppyTNode<Object> b) {
+ // TODO(danno): This could be optimized by specifically handling smi cases.
+ VARIABLE(result, MachineRepresentation::kTagged);
+ Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
+ GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a);
+ GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b);
+ result.Bind(NanConstant());
+ Goto(&done);
+ BIND(&greater_than_equal_a);
+ result.Bind(a);
+ Goto(&done);
+ BIND(&greater_than_equal_b);
+ result.Bind(b);
+ Goto(&done);
+ BIND(&done);
+ return TNode<Object>::UncheckedCast(result.value());
+}
+
+TNode<Object> CodeStubAssembler::NumberMin(SloppyTNode<Object> a,
+ SloppyTNode<Object> b) {
+ // TODO(danno): This could be optimized by specifically handling smi cases.
+ VARIABLE(result, MachineRepresentation::kTagged);
+ Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
+ GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a);
+ GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b);
+ result.Bind(NanConstant());
+ Goto(&done);
+ BIND(&greater_than_equal_a);
+ result.Bind(b);
+ Goto(&done);
+ BIND(&greater_than_equal_b);
+ result.Bind(a);
+ Goto(&done);
+ BIND(&done);
+ return TNode<Object>::UncheckedCast(result.value());
+}
+
Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
VARIABLE(var_result, MachineRepresentation::kTagged);
Label return_result(this, &var_result),
@@ -769,28 +826,29 @@ void CodeStubAssembler::BranchIfJSObject(Node* object, Label* if_true,
if_true, if_false);
}
-void CodeStubAssembler::BranchIfFastJSArray(
- Node* object, Node* context, CodeStubAssembler::FastJSArrayAccessMode mode,
- Label* if_true, Label* if_false) {
+void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
+ Label* if_true, Label* if_false) {
// Bailout if receiver is a Smi.
GotoIf(TaggedIsSmi(object), if_false);
Node* map = LoadMap(object);
-
- // Bailout if instance type is not JS_ARRAY_TYPE.
- GotoIf(Word32NotEqual(LoadMapInstanceType(map), Int32Constant(JS_ARRAY_TYPE)),
- if_false);
-
- Node* elements_kind = LoadMapElementsKind(map);
+ GotoIfNot(IsJSArrayMap(map), if_false);
// Bailout if receiver has slow elements.
+ Node* elements_kind = LoadMapElementsKind(map);
GotoIfNot(IsFastElementsKind(elements_kind), if_false);
- // Check prototype chain if receiver does not have packed elements.
- if (mode == FastJSArrayAccessMode::INBOUNDS_READ) {
- GotoIfNot(IsHoleyFastElementsKind(elements_kind), if_true);
- }
- BranchIfPrototypesHaveNoElements(map, if_true, if_false);
+ // Check prototype chain if receiver does not have packed elements
+ GotoIfNot(IsPrototypeInitialArrayPrototype(context, map), if_false);
+
+ Branch(IsArrayProtectorCellInvalid(), if_false, if_true);
+}
+
+void CodeStubAssembler::BranchIfFastJSArrayForCopy(Node* object, Node* context,
+ Label* if_true,
+ Label* if_false) {
+ GotoIf(IsSpeciesProtectorCellInvalid(), if_false);
+ BranchIfFastJSArray(object, context, if_true, if_false);
}
Node* CodeStubAssembler::AllocateRaw(Node* size_in_bytes, AllocationFlags flags,
@@ -977,22 +1035,21 @@ Node* CodeStubAssembler::IsRegularHeapObjectSize(Node* size) {
void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
Label* if_false) {
- Label if_valueissmi(this), if_valueisnotsmi(this),
- if_valueisheapnumber(this, Label::kDeferred);
-
+ Label if_smi(this), if_notsmi(this), if_heapnumber(this, Label::kDeferred),
+ if_bigint(this, Label::kDeferred);
// Rule out false {value}.
GotoIf(WordEqual(value, BooleanConstant(false)), if_false);
// Check if {value} is a Smi or a HeapObject.
- Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+ Branch(TaggedIsSmi(value), &if_smi, &if_notsmi);
- BIND(&if_valueissmi);
+ BIND(&if_smi);
{
// The {value} is a Smi, only need to check against zero.
BranchIfSmiEqual(value, SmiConstant(0), if_false, if_true);
}
- BIND(&if_valueisnotsmi);
+ BIND(&if_notsmi);
{
// Check if {value} is the empty string.
GotoIf(IsEmptyString(value), if_false);
@@ -1006,9 +1063,10 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
// We still need to handle numbers specially, but all other {value}s
// that make it here yield true.
- Branch(IsHeapNumberMap(value_map), &if_valueisheapnumber, if_true);
+ GotoIf(IsHeapNumberMap(value_map), &if_heapnumber);
+ Branch(IsBigInt(value), &if_bigint, if_true);
- BIND(&if_valueisheapnumber);
+ BIND(&if_heapnumber);
{
// Load the floating point value of {value}.
Node* value_value = LoadObjectField(value, HeapNumber::kValueOffset,
@@ -1018,6 +1076,14 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
Branch(Float64LessThan(Float64Constant(0.0), Float64Abs(value_value)),
if_true, if_false);
}
+
+ BIND(&if_bigint);
+ {
+ Node* result =
+ CallRuntime(Runtime::kBigIntToBoolean, NoContextConstant(), value);
+ CSA_ASSERT(this, IsBoolean(result));
+ Branch(WordEqual(result, BooleanConstant(true)), if_true, if_false);
+ }
}
}
@@ -1187,8 +1253,8 @@ TNode<Int32T> CodeStubAssembler::LoadHashForJSObject(
{
Node* length_and_hash_int32 = LoadAndUntagToWord32ObjectField(
properties_or_hash, PropertyArray::kLengthAndHashOffset);
- var_hash.Bind(Word32And(length_and_hash_int32,
- Int32Constant(PropertyArray::kHashMask)));
+ var_hash.Bind(
+ DecodeWord32<PropertyArray::HashField>(length_and_hash_int32));
Goto(&done);
}
@@ -1361,6 +1427,12 @@ TNode<Object> CodeStubAssembler::LoadMapConstructor(SloppyTNode<Map> map) {
return result;
}
+Node* CodeStubAssembler::LoadMapEnumLength(SloppyTNode<Map> map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
+ Node* bit_field3 = LoadMapBitField3(map);
+ return DecodeWordFromWord32<Map::EnumLengthBits>(bit_field3);
+}
+
Node* CodeStubAssembler::LoadNameHashField(Node* name) {
CSA_ASSERT(this, IsName(name));
return LoadObjectField(name, Name::kHashFieldOffset, MachineType::Uint32());
@@ -1412,6 +1484,9 @@ Node* CodeStubAssembler::LoadWeakCellValue(Node* weak_cell, Label* if_cleared) {
Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node,
int additional_offset,
ParameterMode parameter_mode) {
+ CSA_SLOW_ASSERT(this, IntPtrGreaterThanOrEqual(
+ ParameterToWord(index_node, parameter_mode),
+ IntPtrConstant(0)));
int32_t header_size =
FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
@@ -2247,6 +2322,7 @@ Node* CodeStubAssembler::AllocateNameDictionary(Node* at_least_space_for) {
Node* CodeStubAssembler::AllocateNameDictionaryWithCapacity(Node* capacity) {
CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
+ CSA_ASSERT(this, IntPtrGreaterThan(capacity, IntPtrConstant(0)));
Node* length = EntryToIndex<NameDictionary>(capacity);
Node* store_size = IntPtrAdd(TimesPointerSize(length),
IntPtrConstant(NameDictionary::kHeaderSize));
@@ -2267,7 +2343,7 @@ Node* CodeStubAssembler::AllocateNameDictionaryWithCapacity(Node* capacity) {
StoreFixedArrayElement(result, NameDictionary::kCapacityIndex,
SmiTag(capacity), SKIP_WRITE_BARRIER);
// Initialize Dictionary fields.
- Node* filler = LoadRoot(Heap::kUndefinedValueRootIndex);
+ Node* filler = UndefinedConstant();
StoreFixedArrayElement(result, NameDictionary::kNextEnumerationIndexIndex,
SmiConstant(PropertyDetails::kInitialIndex),
SKIP_WRITE_BARRIER);
@@ -2303,6 +2379,30 @@ Node* CodeStubAssembler::CopyNameDictionary(Node* dictionary,
return properties;
}
+Node* CodeStubAssembler::AllocateStruct(Node* map, AllocationFlags flags) {
+ Comment("AllocateStruct");
+ CSA_ASSERT(this, IsMap(map));
+ Node* size = TimesPointerSize(LoadMapInstanceSize(map));
+ Node* object = Allocate(size, flags);
+ StoreMapNoWriteBarrier(object, map);
+ InitializeStructBody(object, map, size, Struct::kHeaderSize);
+ return object;
+}
+
+void CodeStubAssembler::InitializeStructBody(Node* object, Node* map,
+ Node* size, int start_offset) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
+ Comment("InitializeStructBody");
+ Node* filler = UndefinedConstant();
+ // Calculate the untagged field addresses.
+ object = BitcastTaggedToWord(object);
+ Node* start_address =
+ IntPtrAdd(object, IntPtrConstant(start_offset - kHeapObjectTag));
+ Node* end_address =
+ IntPtrSub(IntPtrAdd(object, size), IntPtrConstant(kHeapObjectTag));
+ StoreFieldsNoWriteBarrier(start_address, end_address, filler);
+}
+
Node* CodeStubAssembler::AllocateJSObjectFromMap(Node* map, Node* properties,
Node* elements,
AllocationFlags flags) {
@@ -2346,7 +2446,7 @@ void CodeStubAssembler::InitializeJSObjectBody(Node* object, Node* map,
CSA_SLOW_ASSERT(this, IsMap(map));
// TODO(cbruni): activate in-object slack tracking machinery.
Comment("InitializeJSObjectBody");
- Node* filler = LoadRoot(Heap::kUndefinedValueRootIndex);
+ Node* filler = UndefinedConstant();
// Calculate the untagged field addresses.
object = BitcastTaggedToWord(object);
Node* start_address =
@@ -2371,7 +2471,7 @@ void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address,
}
Node* CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements(
- ElementsKind kind, Node* array_map, Node* length, Node* allocation_site) {
+ Node* array_map, Node* length, Node* allocation_site) {
Comment("begin allocation of JSArray without elements");
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
CSA_SLOW_ASSERT(this, IsMap(array_map));
@@ -2381,8 +2481,8 @@ Node* CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements(
}
Node* size = IntPtrConstant(base_size);
- Node* array = AllocateUninitializedJSArray(kind, array_map, length,
- allocation_site, size);
+ Node* array =
+ AllocateUninitializedJSArray(array_map, length, allocation_site, size);
return array;
}
@@ -2405,8 +2505,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
base_size += FixedArray::kHeaderSize;
Node* size = ElementOffsetFromIndex(capacity, kind, capacity_mode, base_size);
- Node* array = AllocateUninitializedJSArray(kind, array_map, length,
- allocation_site, size);
+ Node* array =
+ AllocateUninitializedJSArray(array_map, length, allocation_site, size);
Node* elements = InnerAllocate(array, elements_offset);
StoreObjectFieldNoWriteBarrier(array, JSObject::kElementsOffset, elements);
@@ -2417,13 +2517,14 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
: Heap::kFixedArrayMapRootIndex;
DCHECK(Heap::RootIsImmortalImmovable(elements_map_index));
StoreMapNoWriteBarrier(elements, elements_map_index);
+ Node* capacity_smi = ParameterToTagged(capacity, capacity_mode);
+ CSA_ASSERT(this, SmiGreaterThan(capacity_smi, SmiConstant(0)));
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset,
- ParameterToTagged(capacity, capacity_mode));
+ capacity_smi);
return {array, elements};
}
-Node* CodeStubAssembler::AllocateUninitializedJSArray(ElementsKind kind,
- Node* array_map,
+Node* CodeStubAssembler::AllocateUninitializedJSArray(Node* array_map,
Node* length,
Node* allocation_site,
Node* size_in_bytes) {
@@ -2456,15 +2557,17 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, capacity_mode));
+ int capacity_as_constant;
Node *array = nullptr, *elements = nullptr;
if (IsIntPtrOrSmiConstantZero(capacity)) {
// Array is empty. Use the shared empty fixed array instead of allocating a
// new one.
- array = AllocateUninitializedJSArrayWithoutElements(kind, array_map, length,
+ array = AllocateUninitializedJSArrayWithoutElements(array_map, length,
allocation_site);
StoreObjectFieldRoot(array, JSArray::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
- } else {
+ } else if (TryGetIntPtrOrSmiConstantValue(capacity, &capacity_as_constant) &&
+ capacity_as_constant > 0) {
// Allocate both array and elements object, and initialize the JSArray.
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
kind, array_map, length, allocation_site, capacity, capacity_mode);
@@ -2472,6 +2575,40 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
FillFixedArrayWithValue(kind, elements,
IntPtrOrSmiConstant(0, capacity_mode), capacity,
Heap::kTheHoleValueRootIndex, capacity_mode);
+ } else {
+ Label out(this), empty(this), nonempty(this);
+ VARIABLE(var_array, MachineRepresentation::kTagged);
+
+ Branch(SmiEqual(ParameterToTagged(capacity, capacity_mode), SmiConstant(0)),
+ &empty, &nonempty);
+
+ BIND(&empty);
+ {
+ // Array is empty. Use the shared empty fixed array instead of allocating
+ // a new one.
+ var_array.Bind(AllocateUninitializedJSArrayWithoutElements(
+ array_map, length, allocation_site));
+ StoreObjectFieldRoot(var_array.value(), JSArray::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ Goto(&out);
+ }
+
+ BIND(&nonempty);
+ {
+ // Allocate both array and elements object, and initialize the JSArray.
+ Node* array;
+ std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+ kind, array_map, length, allocation_site, capacity, capacity_mode);
+ var_array.Bind(array);
+ // Fill in the elements with holes.
+ FillFixedArrayWithValue(kind, elements,
+ IntPtrOrSmiConstant(0, capacity_mode), capacity,
+ Heap::kTheHoleValueRootIndex, capacity_mode);
+ Goto(&out);
+ }
+
+ BIND(&out);
+ array = var_array.value();
}
return array;
@@ -2508,7 +2645,8 @@ void CodeStubAssembler::InitializePropertyArrayLength(Node* property_array,
CSA_ASSERT(
this,
IntPtrOrSmiLessThanOrEqual(
- length, IntPtrOrSmiConstant(PropertyArray::kMaxLength, mode), mode));
+ length, IntPtrOrSmiConstant(PropertyArray::LengthField::kMax, mode),
+ mode));
StoreObjectFieldNoWriteBarrier(
property_array, PropertyArray::kLengthAndHashOffset,
ParameterToTagged(length, mode), MachineRepresentation::kTaggedSigned);
@@ -2539,8 +2677,7 @@ void CodeStubAssembler::FillPropertyArrayWithUndefined(Node* array,
CSA_SLOW_ASSERT(this, IsPropertyArray(array));
STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
ElementsKind kind = PACKED_ELEMENTS;
- Node* value = LoadRoot(Heap::kUndefinedValueRootIndex);
-
+ Node* value = UndefinedConstant();
BuildFastFixedArrayForEach(array, kind, from_node, to_node,
[this, value](Node* array, Node* offset) {
StoreNoWriteBarrier(
@@ -3404,6 +3541,23 @@ Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
return var_value_map.value();
}
+void CodeStubAssembler::ThrowRangeError(Node* context,
+ MessageTemplate::Template message,
+ Node* arg0, Node* arg1, Node* arg2) {
+ Node* template_index = SmiConstant(message);
+ if (arg0 == nullptr) {
+ CallRuntime(Runtime::kThrowRangeError, context, template_index);
+ } else if (arg1 == nullptr) {
+ CallRuntime(Runtime::kThrowRangeError, context, template_index, arg0);
+ } else if (arg2 == nullptr) {
+ CallRuntime(Runtime::kThrowRangeError, context, template_index, arg0, arg1);
+ } else {
+ CallRuntime(Runtime::kThrowRangeError, context, template_index, arg0, arg1,
+ arg2);
+ }
+ Unreachable();
+}
+
void CodeStubAssembler::ThrowTypeError(Node* context,
MessageTemplate::Template message,
char const* arg0, char const* arg1) {
@@ -3481,6 +3635,22 @@ Node* CodeStubAssembler::IsArrayProtectorCellInvalid() {
return WordEqual(cell_value, invalid);
}
+Node* CodeStubAssembler::IsSpeciesProtectorCellInvalid() {
+ Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
+ Node* cell = LoadRoot(Heap::kSpeciesProtectorRootIndex);
+ Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+ return WordEqual(cell_value, invalid);
+}
+
+Node* CodeStubAssembler::IsPrototypeInitialArrayPrototype(Node* context,
+ Node* map) {
+ Node* const native_context = LoadNativeContext(context);
+ Node* const initial_array_prototype = LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
+ Node* proto = LoadMapPrototype(map);
+ return WordEqual(proto, initial_array_prototype);
+}
+
Node* CodeStubAssembler::IsCallable(Node* object) {
return IsCallableMap(LoadMap(object));
}
@@ -3715,8 +3885,7 @@ Node* CodeStubAssembler::IsName(Node* object) {
}
Node* CodeStubAssembler::IsString(Node* object) {
- return Int32LessThan(LoadInstanceType(object),
- Int32Constant(FIRST_NONSTRING_TYPE));
+ return IsStringInstanceType(LoadInstanceType(object));
}
Node* CodeStubAssembler::IsSymbolInstanceType(Node* instance_type) {
@@ -3727,6 +3896,14 @@ Node* CodeStubAssembler::IsSymbol(Node* object) {
return IsSymbolMap(LoadMap(object));
}
+Node* CodeStubAssembler::IsBigIntInstanceType(Node* instance_type) {
+ return Word32Equal(instance_type, Int32Constant(BIGINT_TYPE));
+}
+
+Node* CodeStubAssembler::IsBigInt(Node* object) {
+ return IsBigIntInstanceType(LoadInstanceType(object));
+}
+
Node* CodeStubAssembler::IsPrimitiveInstanceType(Node* instance_type) {
return Int32LessThanOrEqual(instance_type,
Int32Constant(LAST_PRIMITIVE_TYPE));
@@ -3839,6 +4016,36 @@ Node* CodeStubAssembler::IsNumberPositive(Node* number) {
MachineRepresentation::kWord32);
}
+Node* CodeStubAssembler::IsNumberArrayIndex(Node* number) {
+ VARIABLE(var_result, MachineRepresentation::kWord32, Int32Constant(1));
+
+ Label check_upper_bound(this), check_is_integer(this), out(this),
+ return_false(this);
+
+ GotoIfNumberGreaterThanOrEqual(number, NumberConstant(0), &check_upper_bound);
+ Goto(&return_false);
+
+ BIND(&check_upper_bound);
+ GotoIfNumberGreaterThanOrEqual(number, NumberConstant(kMaxUInt32),
+ &return_false);
+ Goto(&check_is_integer);
+
+ BIND(&check_is_integer);
+ GotoIf(TaggedIsSmi(number), &out);
+ // Check that the HeapNumber is a valid uint32
+ Node* value = LoadHeapNumberValue(number);
+ Node* int_value = ChangeFloat64ToUint32(value);
+ GotoIf(Float64Equal(value, ChangeUint32ToFloat64(int_value)), &out);
+ Goto(&return_false);
+
+ BIND(&return_false);
+ var_result.Bind(Int32Constant(0));
+ Goto(&out);
+
+ BIND(&out);
+ return var_result.value();
+}
+
TNode<Uint32T> CodeStubAssembler::StringCharCodeAt(
SloppyTNode<String> string, Node* index, ParameterMode parameter_mode) {
CSA_ASSERT(this, MatchesParameterMode(index, parameter_mode));
@@ -3880,8 +4087,8 @@ TNode<Uint32T> CodeStubAssembler::StringCharCodeAt(
BIND(&if_runtime);
{
- Node* result = CallRuntime(Runtime::kStringCharCodeAtRT,
- NoContextConstant(), string, SmiTag(index));
+ Node* result = CallRuntime(Runtime::kStringCharCodeAt, NoContextConstant(),
+ string, SmiTag(index));
var_result.Bind(SmiToWord32(result));
Goto(&return_result);
}
@@ -4046,39 +4253,37 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
// encoding at this point.
Label external_string(this);
{
- if (FLAG_string_slices) {
- Label next(this);
-
- // Short slice. Copy instead of slicing.
- GotoIf(SmiLessThan(substr_length, SmiConstant(SlicedString::kMinLength)),
- &next);
+ Label next(this);
- // Allocate new sliced string.
+ // Short slice. Copy instead of slicing.
+ GotoIf(SmiLessThan(substr_length, SmiConstant(SlicedString::kMinLength)),
+ &next);
- Counters* counters = isolate()->counters();
- IncrementCounter(counters->sub_string_native(), 1);
+ // Allocate new sliced string.
- Label one_byte_slice(this), two_byte_slice(this);
- Branch(IsOneByteStringInstanceType(to_direct.instance_type()),
- &one_byte_slice, &two_byte_slice);
+ Counters* counters = isolate()->counters();
+ IncrementCounter(counters->sub_string_native(), 1);
- BIND(&one_byte_slice);
- {
- var_result.Bind(
- AllocateSlicedOneByteString(substr_length, direct_string, offset));
- Goto(&end);
- }
+ Label one_byte_slice(this), two_byte_slice(this);
+ Branch(IsOneByteStringInstanceType(to_direct.instance_type()),
+ &one_byte_slice, &two_byte_slice);
- BIND(&two_byte_slice);
- {
- var_result.Bind(
- AllocateSlicedTwoByteString(substr_length, direct_string, offset));
- Goto(&end);
- }
+ BIND(&one_byte_slice);
+ {
+ var_result.Bind(
+ AllocateSlicedOneByteString(substr_length, direct_string, offset));
+ Goto(&end);
+ }
- BIND(&next);
+ BIND(&two_byte_slice);
+ {
+ var_result.Bind(
+ AllocateSlicedTwoByteString(substr_length, direct_string, offset));
+ Goto(&end);
}
+ BIND(&next);
+
// The subject string can only be external or sequential string of either
// encoding at this point.
GotoIf(to_direct.is_external(), &external_string);
@@ -4086,7 +4291,6 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
var_result.Bind(AllocAndCopyStringCharacters(
context, direct_string, instance_type, offset, substr_length));
- Counters* counters = isolate()->counters();
IncrementCounter(counters->sub_string_native(), 1);
Goto(&end);
@@ -5275,12 +5479,24 @@ TNode<IntPtrT> CodeStubAssembler::HashTableComputeCapacity(
TNode<IntPtrT> CodeStubAssembler::IntPtrMax(SloppyTNode<IntPtrT> left,
SloppyTNode<IntPtrT> right) {
+ intptr_t left_constant;
+ intptr_t right_constant;
+ if (ToIntPtrConstant(left, left_constant) &&
+ ToIntPtrConstant(right, right_constant)) {
+ return IntPtrConstant(std::max(left_constant, right_constant));
+ }
return SelectConstant(IntPtrGreaterThanOrEqual(left, right), left, right,
MachineType::PointerRepresentation());
}
TNode<IntPtrT> CodeStubAssembler::IntPtrMin(SloppyTNode<IntPtrT> left,
SloppyTNode<IntPtrT> right) {
+ intptr_t left_constant;
+ intptr_t right_constant;
+ if (ToIntPtrConstant(left, left_constant) &&
+ ToIntPtrConstant(right, right_constant)) {
+ return IntPtrConstant(std::min(left_constant, right_constant));
+ }
return SelectConstant(IntPtrLessThanOrEqual(left, right), left, right,
MachineType::PointerRepresentation());
}
@@ -6564,7 +6780,7 @@ Node* CodeStubAssembler::LoadFeedbackVectorForStub() {
}
void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* feedback_vector,
- Node* slot_id, Node* function) {
+ Node* slot_id) {
// This method is used for binary op and compare feedback. These
// vector nodes are initialized with a smi 0, so we can simply OR
// our new feedback in place.
@@ -6849,10 +7065,15 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
}
VARIABLE(var_result, rep);
- Label done(this, &var_result), if_smi(this);
+ Label done(this, &var_result), if_smi(this), if_heapnumber(this);
GotoIf(TaggedIsSmi(input), &if_smi);
- // Try to convert a heap number to a Smi.
- GotoIfNot(IsHeapNumber(input), bailout);
+ // We can handle both HeapNumber and Oddball here, since Oddball has the
+ // same layout as the HeapNumber for the HeapNumber::value field. This
+ // way we can also properly optimize stores of oddballs to typed arrays.
+ GotoIf(IsHeapNumber(input), &if_heapnumber);
+ Branch(HasInstanceType(input, ODDBALL_TYPE), &if_heapnumber, bailout);
+
+ BIND(&if_heapnumber);
{
Node* value = LoadHeapNumberValue(input);
if (rep == MachineRepresentation::kWord32) {
@@ -6896,6 +7117,7 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
ElementsKind elements_kind,
KeyedAccessStoreMode store_mode,
Label* bailout) {
+ CSA_ASSERT(this, Word32BinaryNot(IsJSProxy(object)));
Node* elements = LoadElements(object);
if (IsSmiOrObjectElementsKind(elements_kind) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
@@ -7074,11 +7296,8 @@ void CodeStubAssembler::TransitionElementsKind(Node* object, Node* map,
Comment("Non-simple map transition");
Node* elements = LoadElements(object);
- Node* empty_fixed_array =
- HeapConstant(isolate()->factory()->empty_fixed_array());
-
Label done(this);
- GotoIf(WordEqual(elements, empty_fixed_array), &done);
+ GotoIf(WordEqual(elements, EmptyFixedArrayConstant()), &done);
// TODO(ishell): Use OptimalParameterMode().
ParameterMode mode = INTPTR_PARAMETERS;
@@ -7086,6 +7305,8 @@ void CodeStubAssembler::TransitionElementsKind(Node* object, Node* map,
Node* array_length =
is_jsarray ? SmiUntag(LoadFastJSArrayLength(object)) : elements_length;
+ CSA_ASSERT(this, WordNotEqual(elements_length, IntPtrConstant(0)));
+
GrowElementsCapacity(object, elements, from_kind, to_kind, array_length,
elements_length, mode, bailout);
Goto(&done);
@@ -7232,6 +7453,7 @@ void CodeStubAssembler::HandleSlackTracking(Node* context, Node* object,
// Perform in-object slack tracking if requested.
Node* bit_field3 = LoadMapBitField3(initial_map);
Label end(this), slack_tracking(this), finalize(this, Label::kDeferred);
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
GotoIf(IsSetWord32<Map::ConstructionCounter>(bit_field3), &slack_tracking);
// Initialize remaining fields.
@@ -7521,11 +7743,12 @@ void CodeStubAssembler::BranchIfNumericRelationalComparison(
}
}
-void CodeStubAssembler::GotoUnlessNumberLessThan(Node* lhs, Node* rhs,
- Label* if_false) {
- Label if_true(this);
- BranchIfNumericRelationalComparison(kLessThan, lhs, rhs, &if_true, if_false);
- BIND(&if_true);
+void CodeStubAssembler::GotoIfNumberGreaterThanOrEqual(Node* lhs, Node* rhs,
+ Label* if_true) {
+ Label if_false(this);
+ BranchIfNumericRelationalComparison(kGreaterThanOrEqual, lhs, rhs, if_true,
+ &if_false);
+ BIND(&if_false);
}
Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
@@ -8439,6 +8662,7 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context,
BIND(&if_lhsisreceiver);
{
+ CSA_ASSERT(this, IsJSReceiverInstanceType(lhs_instance_type));
// Check if the {rhs} is also a JSReceiver.
Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
@@ -8527,8 +8751,7 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context,
Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
Variable* var_type_feedback) {
- // Here's pseudo-code for the algorithm below in case of kDontNegateResult
- // mode; for kNegateResult mode we properly negate the result.
+ // Pseudo-code for the algorithm below:
//
// if (lhs == rhs) {
// if (lhs->IsHeapNumber()) return HeapNumber::cast(lhs)->value() != NaN;
@@ -8554,6 +8777,12 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
// } else {
// return false;
// }
+ // } else if (lhs->IsBigInt()) {
+ // if (rhs->IsBigInt()) {
+ // return %BigIntEqual(lhs, rhs);
+ // } else {
+ // return false;
+ // }
// } else {
// return false;
// }
@@ -8590,8 +8819,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_notsame);
{
- // The {lhs} and {rhs} reference different objects, yet for Smi, HeapNumber
- // and String they can still be considered equal.
+ // The {lhs} and {rhs} reference different objects, yet for Smi, HeapNumber,
+ // BigInt and String they can still be considered equal.
if (var_type_feedback != nullptr) {
var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
@@ -8674,9 +8903,6 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
// Load the instance type of {lhs}.
Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
- // Load the instance type of {rhs}.
- Node* rhs_instance_type = LoadInstanceType(rhs);
-
// Check if {lhs} is a String.
Label if_lhsisstring(this), if_lhsisnotstring(this);
Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
@@ -8684,6 +8910,9 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_lhsisstring);
{
+ // Load the instance type of {rhs}.
+ Node* rhs_instance_type = LoadInstanceType(rhs);
+
// Check if {rhs} is also a String.
Label if_rhsisstring(this, Label::kDeferred),
if_rhsisnotstring(this);
@@ -8709,7 +8938,45 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
}
BIND(&if_lhsisnotstring);
+
+ // Check if {lhs} is a BigInt.
+ Label if_lhsisbigint(this), if_lhsisnotbigint(this);
+ Branch(IsBigIntInstanceType(lhs_instance_type), &if_lhsisbigint,
+ &if_lhsisnotbigint);
+
+ BIND(&if_lhsisbigint);
+ {
+ // Load the instance type of {rhs}.
+ Node* rhs_instance_type = LoadInstanceType(rhs);
+
+ // Check if {rhs} is also a BigInt.
+ Label if_rhsisbigint(this, Label::kDeferred),
+ if_rhsisnotbigint(this);
+ Branch(IsBigIntInstanceType(rhs_instance_type), &if_rhsisbigint,
+ &if_rhsisnotbigint);
+
+ BIND(&if_rhsisbigint);
+ {
+ if (var_type_feedback != nullptr) {
+ CSA_ASSERT(
+ this,
+ WordEqual(var_type_feedback->value(),
+ SmiConstant(CompareOperationFeedback::kAny)));
+ }
+ result.Bind(CallRuntime(Runtime::kBigIntEqual,
+ NoContextConstant(), lhs, rhs));
+ Goto(&end);
+ }
+
+ BIND(&if_rhsisnotbigint);
+ Goto(&if_notequal);
+ }
+
+ BIND(&if_lhsisnotbigint);
if (var_type_feedback != nullptr) {
+ // Load the instance type of {rhs}.
+ Node* rhs_instance_type = LoadInstanceType(rhs);
+
Label if_lhsissymbol(this), if_lhsisreceiver(this);
GotoIf(IsJSReceiverInstanceType(lhs_instance_type),
&if_lhsisreceiver);
@@ -8805,88 +9072,107 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
// ECMA#sec-samevalue
// This algorithm differs from the Strict Equality Comparison Algorithm in its
// treatment of signed zeroes and NaNs.
-Node* CodeStubAssembler::SameValue(Node* lhs, Node* rhs) {
- VARIABLE(var_result, MachineRepresentation::kWord32);
- Label strict_equal(this), out(this);
+void CodeStubAssembler::BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true,
+ Label* if_false) {
+ VARIABLE(var_lhs_value, MachineRepresentation::kFloat64);
+ VARIABLE(var_rhs_value, MachineRepresentation::kFloat64);
+ Label do_fcmp(this);
- Node* const int_false = Int32Constant(0);
- Node* const int_true = Int32Constant(1);
+ // Immediately jump to {if_true} if {lhs} == {rhs}, because - unlike
+ // StrictEqual - SameValue considers two NaNs to be equal.
+ GotoIf(WordEqual(lhs, rhs), if_true);
- Label if_equal(this), if_notequal(this);
- Branch(WordEqual(lhs, rhs), &if_equal, &if_notequal);
+ // Check if the {lhs} is a Smi.
+ Label if_lhsissmi(this), if_lhsisheapobject(this);
+ Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisheapobject);
- BIND(&if_equal);
+ BIND(&if_lhsissmi);
{
- // This covers the case when {lhs} == {rhs}. We can simply return true
- // because SameValue considers two NaNs to be equal.
-
- var_result.Bind(int_true);
- Goto(&out);
+ // Since {lhs} is a Smi, the comparison can only yield true
+ // iff the {rhs} is a HeapNumber with the same float64 value.
+ GotoIf(TaggedIsSmi(rhs), if_false);
+ GotoIfNot(IsHeapNumber(rhs), if_false);
+ var_lhs_value.Bind(SmiToFloat64(lhs));
+ var_rhs_value.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
}
- BIND(&if_notequal);
+ BIND(&if_lhsisheapobject);
{
- // This covers the case when {lhs} != {rhs}. We only handle numbers here
- // and defer to StrictEqual for the rest.
-
- Node* const lhs_float = TryTaggedToFloat64(lhs, &strict_equal);
- Node* const rhs_float = TryTaggedToFloat64(rhs, &strict_equal);
-
- Label if_lhsisnan(this), if_lhsnotnan(this);
- BranchIfFloat64IsNaN(lhs_float, &if_lhsisnan, &if_lhsnotnan);
+ // Check if the {rhs} is a Smi.
+ Label if_rhsissmi(this), if_rhsisheapobject(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisheapobject);
- BIND(&if_lhsisnan);
+ BIND(&if_rhsissmi);
{
- // Return true iff {rhs} is NaN.
-
- Node* const result =
- SelectConstant(Float64Equal(rhs_float, rhs_float), int_false,
- int_true, MachineRepresentation::kWord32);
- var_result.Bind(result);
- Goto(&out);
+ // Since {rhs} is a Smi, the comparison can only yield true
+ // iff the {lhs} is a HeapNumber with the same float64 value.
+ GotoIfNot(IsHeapNumber(lhs), if_false);
+ var_lhs_value.Bind(LoadHeapNumberValue(lhs));
+ var_rhs_value.Bind(SmiToFloat64(rhs));
+ Goto(&do_fcmp);
}
- BIND(&if_lhsnotnan);
+ BIND(&if_rhsisheapobject);
{
- Label if_floatisequal(this), if_floatnotequal(this);
- Branch(Float64Equal(lhs_float, rhs_float), &if_floatisequal,
- &if_floatnotequal);
-
- BIND(&if_floatisequal);
+ // Now this can only yield true if either both {lhs} and {rhs}
+ // are HeapNumbers with the same value or both {lhs} and {rhs}
+ // are Strings with the same character sequence.
+ Label if_lhsisheapnumber(this), if_lhsisstring(this);
+ Node* const lhs_map = LoadMap(lhs);
+ GotoIf(IsHeapNumberMap(lhs_map), &if_lhsisheapnumber);
+ Node* const lhs_instance_type = LoadMapInstanceType(lhs_map);
+ Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
+ if_false);
+
+ BIND(&if_lhsisheapnumber);
{
- // We still need to handle the case when {lhs} and {rhs} are -0.0 and
- // 0.0 (or vice versa). Compare the high word to
- // distinguish between the two.
-
- Node* const lhs_hi_word = Float64ExtractHighWord32(lhs_float);
- Node* const rhs_hi_word = Float64ExtractHighWord32(rhs_float);
-
- // If x is +0 and y is -0, return false.
- // If x is -0 and y is +0, return false.
-
- Node* const result = Word32Equal(lhs_hi_word, rhs_hi_word);
- var_result.Bind(result);
- Goto(&out);
+ GotoIfNot(IsHeapNumber(rhs), if_false);
+ var_lhs_value.Bind(LoadHeapNumberValue(lhs));
+ var_rhs_value.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
}
- BIND(&if_floatnotequal);
+ BIND(&if_lhsisstring);
{
- var_result.Bind(int_false);
- Goto(&out);
+ // Now we can only yield true if {rhs} is also a String
+ // with the same sequence of characters.
+ GotoIfNot(IsString(rhs), if_false);
+ Node* const result =
+ CallBuiltin(Builtins::kStringEqual, NoContextConstant(), lhs, rhs);
+ Branch(IsTrue(result), if_true, if_false);
}
}
}
- BIND(&strict_equal);
+ BIND(&do_fcmp);
{
- Node* const is_equal = StrictEqual(lhs, rhs);
- Node* const result = WordEqual(is_equal, TrueConstant());
- var_result.Bind(result);
- Goto(&out);
- }
+ Node* const lhs_value = var_lhs_value.value();
+ Node* const rhs_value = var_rhs_value.value();
- BIND(&out);
- return var_result.value();
+ Label if_equal(this), if_notequal(this);
+ Branch(Float64Equal(lhs_value, rhs_value), &if_equal, &if_notequal);
+
+ BIND(&if_equal);
+ {
+ // We still need to handle the case when {lhs} and {rhs} are -0.0 and
+ // 0.0 (or vice versa). Compare the high word to
+ // distinguish between the two.
+ Node* const lhs_hi_word = Float64ExtractHighWord32(lhs_value);
+ Node* const rhs_hi_word = Float64ExtractHighWord32(rhs_value);
+
+ // If x is +0 and y is -0, return false.
+ // If x is -0 and y is +0, return false.
+ Branch(Word32Equal(lhs_hi_word, rhs_hi_word), if_true, if_false);
+ }
+
+ BIND(&if_notequal);
+ {
+ // Return true iff both {rhs} and {lhs} are NaN.
+ GotoIf(Float64Equal(lhs_value, lhs_value), if_false);
+ Branch(Float64Equal(rhs_value, rhs_value), if_false, if_true);
+ }
+ }
}
Node* CodeStubAssembler::HasProperty(Node* object, Node* key, Node* context,
@@ -9039,7 +9325,7 @@ Node* CodeStubAssembler::Typeof(Node* value) {
Label return_number(this, Label::kDeferred), if_oddball(this),
return_function(this), return_undefined(this), return_object(this),
- return_string(this), return_result(this);
+ return_string(this), return_bigint(this), return_result(this);
GotoIf(TaggedIsSmi(value), &return_number);
@@ -9066,6 +9352,8 @@ Node* CodeStubAssembler::Typeof(Node* value) {
GotoIf(IsStringInstanceType(instance_type), &return_string);
+ GotoIf(IsBigIntInstanceType(instance_type), &return_bigint);
+
CSA_ASSERT(this, Word32Equal(instance_type, Int32Constant(SYMBOL_TYPE)));
result_var.Bind(HeapConstant(isolate()->factory()->symbol_string()));
Goto(&return_result);
@@ -9107,6 +9395,12 @@ Node* CodeStubAssembler::Typeof(Node* value) {
Goto(&return_result);
}
+ BIND(&return_bigint);
+ {
+ result_var.Bind(HeapConstant(isolate()->factory()->bigint_string()));
+ Goto(&return_result);
+ }
+
BIND(&return_result);
return result_var.value();
}
@@ -9162,6 +9456,15 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
GotoIfNot(WordEqual(inst_of_handler, function_has_instance),
&if_otherhandler);
{
+ // TODO(6786): A direct call to a TFJ builtin breaks the lazy
+ // deserialization mechanism in two ways: first, we always pass in a
+ // callable containing the DeserializeLazy code object (assuming that
+ // FunctionPrototypeHasInstance is lazy). Second, a direct call (without
+ // going through CodeFactory::Call) to DeserializeLazy will not initialize
+ // new_target properly. For now we can avoid this by marking
+ // FunctionPrototypeHasInstance as eager, but this should be fixed at some
+ // point.
+ //
// Call to Function.prototype[@@hasInstance] directly.
Callable builtin(BUILTIN_CODE(isolate(), FunctionPrototypeHasInstance),
CallTrampolineDescriptor(isolate()));
@@ -9323,6 +9626,66 @@ Node* CodeStubAssembler::NumberDec(Node* value) {
return var_result.value();
}
+Node* CodeStubAssembler::NumberAdd(Node* a, Node* b) {
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ VARIABLE(var_fadd_value, MachineRepresentation::kFloat64);
+ Label float_add(this, Label::kDeferred), end(this);
+ GotoIf(TaggedIsNotSmi(a), &float_add);
+ GotoIf(TaggedIsNotSmi(b), &float_add);
+
+ // Try fast Smi addition first.
+ Node* pair =
+ IntPtrAddWithOverflow(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
+ Node* overflow = Projection(1, pair);
+
+ // Check if the Smi addition overflowed.
+ Label if_overflow(this), if_notoverflow(this);
+ GotoIf(overflow, &float_add);
+
+ var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
+ Goto(&end);
+
+ BIND(&float_add);
+ {
+ var_result.Bind(ChangeFloat64ToTagged(
+ Float64Add(ChangeNumberToFloat64(a), ChangeNumberToFloat64(b))));
+ Goto(&end);
+ }
+
+ BIND(&end);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::NumberSub(Node* a, Node* b) {
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ VARIABLE(var_fsub_value, MachineRepresentation::kFloat64);
+ Label float_sub(this, Label::kDeferred), end(this);
+ GotoIf(TaggedIsNotSmi(a), &float_sub);
+ GotoIf(TaggedIsNotSmi(b), &float_sub);
+
+ // Try fast Smi subtraction first.
+ Node* pair =
+ IntPtrSubWithOverflow(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
+ Node* overflow = Projection(1, pair);
+
+ // Check if the Smi subtraction overflowed.
+ Label if_overflow(this), if_notoverflow(this);
+ GotoIf(overflow, &float_sub);
+
+ var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
+ Goto(&end);
+
+ BIND(&float_sub);
+ {
+ var_result.Bind(ChangeFloat64ToTagged(
+ Float64Sub(ChangeNumberToFloat64(a), ChangeNumberToFloat64(b))));
+ Goto(&end);
+ }
+
+ BIND(&end);
+ return var_result.value();
+}
+
void CodeStubAssembler::GotoIfNotNumber(Node* input, Label* is_not_number) {
Label is_number(this);
GotoIf(TaggedIsSmi(input), &is_number);
@@ -9391,8 +9754,7 @@ Node* CodeStubAssembler::CreateArrayIterator(Node* array, Node* array_map,
BIND(&if_isgeneric);
{
Label if_isfast(this), if_isslow(this);
- BranchIfFastJSArray(array, context, FastJSArrayAccessMode::INBOUNDS_READ,
- &if_isfast, &if_isslow);
+ BranchIfFastJSArray(array, context, &if_isfast, &if_isslow);
BIND(&if_isfast);
{
@@ -9426,8 +9788,7 @@ Node* CodeStubAssembler::CreateArrayIterator(Node* array, Node* array_map,
BIND(&if_isgeneric);
{
Label if_isfast(this), if_isslow(this);
- BranchIfFastJSArray(array, context, FastJSArrayAccessMode::INBOUNDS_READ,
- &if_isfast, &if_isslow);
+ BranchIfFastJSArray(array, context, &if_isfast, &if_isslow);
BIND(&if_isfast);
{
@@ -9443,12 +9804,7 @@ Node* CodeStubAssembler::CreateArrayIterator(Node* array, Node* array_map,
// its initial state (because the protector cell is only tracked for
// initial the Array and Object prototypes). Check these conditions
// here, and take the slow path if any fail.
- Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
- DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
- GotoIfNot(WordEqual(LoadObjectField(protector_cell,
- PropertyCell::kValueOffset),
- SmiConstant(Isolate::kProtectorValid)),
- &if_isslow);
+ GotoIf(IsArrayProtectorCellInvalid(), &if_isslow);
Node* native_context = LoadNativeContext(context);
@@ -9773,9 +10129,6 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
shared_info);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kCodeOffset, code);
- StoreObjectFieldRoot(fun, JSFunction::kNextFunctionLinkOffset,
- Heap::kUndefinedValueRootIndex);
-
return fun;
}
@@ -9813,6 +10166,86 @@ Node* CodeStubAssembler::MarkerIsNotFrameType(Node* marker_or_function,
IntPtrConstant(StackFrame::TypeToMarker(frame_type)));
}
+void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver,
+ Node* receiver_map,
+ Label* if_fast,
+ Label* if_slow) {
+ VARIABLE(var_object, MachineRepresentation::kTagged, receiver);
+ VARIABLE(var_object_map, MachineRepresentation::kTagged, receiver_map);
+
+ Label loop(this, {&var_object, &var_object_map}), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ // Check that there are no elements on the current {object}.
+ Label if_no_elements(this);
+ Node* object = var_object.value();
+ Node* object_map = var_object_map.value();
+
+ // The following relies on the elements only aliasing with JSProxy::target,
+ // which is a Javascript value and hence cannot be confused with an elements
+ // backing store.
+ STATIC_ASSERT(JSObject::kElementsOffset == JSProxy::kTargetOffset);
+ Node* object_elements = LoadObjectField(object, JSObject::kElementsOffset);
+ GotoIf(IsEmptyFixedArray(object_elements), &if_no_elements);
+ GotoIf(IsEmptySlowElementDictionary(object_elements), &if_no_elements);
+
+ // It might still be an empty JSArray.
+ GotoIfNot(IsJSArrayMap(object_map), if_slow);
+ Node* object_length = LoadObjectField(object, JSArray::kLengthOffset);
+ Branch(WordEqual(object_length, SmiConstant(Smi::kZero)), &if_no_elements,
+ if_slow);
+
+ // Continue with the {object}s prototype.
+ BIND(&if_no_elements);
+ object = LoadMapPrototype(object_map);
+ GotoIf(IsNull(object), if_fast);
+
+ // For all {object}s but the {receiver}, check that the cache is empty.
+ var_object.Bind(object);
+ object_map = LoadMap(object);
+ var_object_map.Bind(object_map);
+ Node* object_enum_length = LoadMapEnumLength(object_map);
+ Branch(WordEqual(object_enum_length, IntPtrConstant(0)), &loop, if_slow);
+ }
+}
+
+Node* CodeStubAssembler::CheckEnumCache(Node* receiver, Label* if_empty,
+ Label* if_runtime) {
+ Label if_fast(this), if_cache(this), if_no_cache(this, Label::kDeferred);
+ Node* receiver_map = LoadMap(receiver);
+
+ // Check if the enum length field of the {receiver} is properly initialized,
+ // indicating that there is an enum cache.
+ Node* receiver_enum_length = LoadMapEnumLength(receiver_map);
+ Branch(WordEqual(receiver_enum_length,
+ IntPtrConstant(kInvalidEnumCacheSentinel)),
+ &if_no_cache, &if_cache);
+
+ BIND(&if_no_cache);
+ {
+ // Avoid runtime-call for empty dictionary receivers.
+ GotoIfNot(IsDictionaryMap(receiver_map), if_runtime);
+ Node* properties = LoadSlowProperties(receiver);
+ Node* length = LoadFixedArrayElement(
+ properties, NameDictionary::kNumberOfElementsIndex);
+ GotoIfNot(WordEqual(length, SmiConstant(Smi::kZero)), if_runtime);
+ // Check that there are no elements on the {receiver} and its prototype
+ // chain. Given that we do not create an EnumCache for dict-mode objects,
+ // directly jump to {if_empty} if there are no elements and no properties
+ // on the {receiver}.
+ CheckPrototypeEnumCache(receiver, receiver_map, if_empty, if_runtime);
+ }
+
+ // Check that there are no elements on the fast {receiver} and its
+ // prototype chain.
+ BIND(&if_cache);
+ CheckPrototypeEnumCache(receiver, receiver_map, &if_fast, if_runtime);
+
+ BIND(&if_fast);
+ return receiver_map;
+}
+
void CodeStubAssembler::Print(const char* s) {
std::string formatted(s);
formatted += "\n";
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index 8379663297..a2d5e80015 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -31,6 +31,8 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(EmptyPropertyDictionary, empty_property_dictionary, \
EmptyPropertyDictionary) \
V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \
+ V(EmptySlowElementDictionary, empty_slow_element_dictionary, \
+ EmptySlowElementDictionary) \
V(empty_string, empty_string, EmptyString) \
V(EmptyWeakCell, empty_weak_cell, EmptyWeakCell) \
V(FalseValue, false_value, False) \
@@ -176,6 +178,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
bool IsIntPtrOrSmiConstantZero(Node* test);
+ bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value);
// Round the 32bits payload of the provided word up to the next power of two.
Node* IntPtrRoundUpToPowerOfTwo32(Node* value);
@@ -192,6 +195,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Float64T> Float64Round(SloppyTNode<Float64T> x);
TNode<Float64T> Float64RoundToEven(SloppyTNode<Float64T> x);
TNode<Float64T> Float64Trunc(SloppyTNode<Float64T> x);
+ // Select the minimum of the two provided Number values.
+ TNode<Object> NumberMax(SloppyTNode<Object> left, SloppyTNode<Object> right);
+ // Select the minimum of the two provided Number values.
+ TNode<Object> NumberMin(SloppyTNode<Object> left, SloppyTNode<Object> right);
// Tag a Word as a Smi value.
TNode<Smi> SmiTag(SloppyTNode<IntPtrT> value);
@@ -272,6 +279,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Smi | HeapNumber operations.
Node* NumberInc(Node* value);
Node* NumberDec(Node* value);
+ Node* NumberAdd(Node* a, Node* b);
+ Node* NumberSub(Node* a, Node* b);
void GotoIfNotNumber(Node* value, Label* is_not_number);
void GotoIfNumber(Node* value, Label* is_number);
@@ -362,6 +371,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<BoolT> WordIsWordAligned(SloppyTNode<WordT> word);
TNode<BoolT> WordIsPowerOfTwo(SloppyTNode<IntPtrT> value);
+ Node* IsNotTheHole(Node* value) { return Word32BinaryNot(IsTheHole(value)); }
+
#if DEBUG
void Bind(Label* label, AssemblerDebugInfo debug_info);
#else
@@ -392,10 +403,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void BranchIfJSReceiver(Node* object, Label* if_true, Label* if_false);
void BranchIfJSObject(Node* object, Label* if_true, Label* if_false);
- enum class FastJSArrayAccessMode { INBOUNDS_READ, ANY_ACCESS };
- void BranchIfFastJSArray(Node* object, Node* context,
- FastJSArrayAccessMode mode, Label* if_true,
+ void BranchIfFastJSArray(Node* object, Node* context, Label* if_true,
Label* if_false);
+ void BranchIfFastJSArrayForCopy(Node* object, Node* context, Label* if_true,
+ Label* if_false);
// Load value from current frame by given offset in bytes.
Node* LoadFromFrame(int offset, MachineType rep = MachineType::AnyTagged());
@@ -486,6 +497,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<IntPtrT> LoadMapConstructorFunctionIndex(SloppyTNode<Map> map);
// Load the constructor of a Map (equivalent to Map::GetConstructor()).
TNode<Object> LoadMapConstructor(SloppyTNode<Map> map);
+ // Load the EnumLength of a Map.
+ Node* LoadMapEnumLength(SloppyTNode<Map> map);
// This is only used on a newly allocated PropertyArray which
// doesn't have an existing hash.
@@ -684,6 +697,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* AllocateNameDictionaryWithCapacity(Node* capacity);
Node* CopyNameDictionary(Node* dictionary, Label* large_object_fallback);
+ Node* AllocateStruct(Node* map, AllocationFlags flags = kNone);
+ void InitializeStructBody(Node* object, Node* map, Node* size,
+ int start_offset = Struct::kHeaderSize);
Node* AllocateJSObjectFromMap(Node* map, Node* properties = nullptr,
Node* elements = nullptr,
AllocationFlags flags = kNone);
@@ -696,8 +712,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
int start_offset = JSObject::kHeaderSize);
// Allocate a JSArray without elements and initialize the header fields.
- Node* AllocateUninitializedJSArrayWithoutElements(ElementsKind kind,
- Node* array_map,
+ Node* AllocateUninitializedJSArrayWithoutElements(Node* array_map,
Node* length,
Node* allocation_site);
// Allocate and return a JSArray with initialized header fields and its
@@ -852,6 +867,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* ThrowIfNotInstanceType(Node* context, Node* value,
InstanceType instance_type,
char const* method_name);
+
+ void ThrowRangeError(Node* context, MessageTemplate::Template message,
+ Node* arg0 = nullptr, Node* arg1 = nullptr,
+ Node* arg2 = nullptr);
void ThrowTypeError(Node* context, MessageTemplate::Template message,
char const* arg0 = nullptr, char const* arg1 = nullptr);
void ThrowTypeError(Node* context, MessageTemplate::Template message,
@@ -928,10 +947,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsString(Node* object);
Node* IsSymbolInstanceType(Node* instance_type);
Node* IsSymbol(Node* object);
+ Node* IsBigIntInstanceType(Node* instance_type);
+ Node* IsBigInt(Node* object);
Node* IsUnseededNumberDictionary(Node* object);
Node* IsWeakCell(Node* object);
Node* IsUndetectableMap(Node* map);
Node* IsArrayProtectorCellInvalid();
+ Node* IsSpeciesProtectorCellInvalid();
+ Node* IsPrototypeInitialArrayPrototype(Node* context, Node* map);
// True iff |object| is a Smi or a HeapNumber.
Node* IsNumber(Node* object);
@@ -940,6 +963,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// within Smi range.
Node* IsNumberNormalized(Node* number);
Node* IsNumberPositive(Node* number);
+ // True iff {number} is a positive number and a valid array index in the range
+ // [0, 2^32-1).
+ Node* IsNumberArrayIndex(Node* number);
// ElementsKind helpers:
Node* IsFastElementsKind(Node* elements_kind);
@@ -1411,8 +1437,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* LoadFeedbackVector(Node* closure);
// Update the type feedback vector.
- void UpdateFeedback(Node* feedback, Node* feedback_vector, Node* slot_id,
- Node* function);
+ void UpdateFeedback(Node* feedback, Node* feedback_vector, Node* slot_id);
// Combine the new feedback with the existing_feedback.
void CombineFeedback(Variable* existing_feedback, Node* feedback);
@@ -1558,7 +1583,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* lhs, Node* rhs, Label* if_true,
Label* if_false);
- void GotoUnlessNumberLessThan(Node* lhs, Node* rhs, Label* if_false);
+ void GotoIfNumberGreaterThanOrEqual(Node* lhs, Node* rhs, Label* if_false);
Node* Equal(Node* lhs, Node* rhs, Node* context,
Variable* var_type_feedback = nullptr);
@@ -1569,9 +1594,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// ECMA#sec-samevalue
// Similar to StrictEqual except that NaNs are treated as equal and minus zero
// differs from positive zero.
- // Unlike Equal and StrictEqual, returns a value suitable for use in Branch
- // instructions, e.g. Branch(SameValue(...), &label).
- Node* SameValue(Node* lhs, Node* rhs);
+ void BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true, Label* if_false);
enum HasPropertyLookupMode { kHasProperty, kForInHasProperty };
@@ -1612,6 +1635,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* MarkerIsNotFrameType(Node* marker_or_function,
StackFrame::Type frame_type);
+ // for..in helpers
+ void CheckPrototypeEnumCache(Node* receiver, Node* receiver_map,
+ Label* if_fast, Label* if_slow);
+ Node* CheckEnumCache(Node* receiver, Label* if_empty, Label* if_runtime);
+
// Support for printf-style debugging
void Print(const char* s);
void Print(const char* prefix, Node* tagged_value);
@@ -1671,8 +1699,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* top_address, Node* limit_address);
// Allocate and return a JSArray of given total size in bytes with header
// fields initialized.
- Node* AllocateUninitializedJSArray(ElementsKind kind, Node* array_map,
- Node* length, Node* allocation_site,
+ Node* AllocateUninitializedJSArray(Node* array_map, Node* length,
+ Node* allocation_site,
Node* size_in_bytes);
Node* SmiShiftBitsConstant();
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 5b57a6f01b..4721642d4a 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -28,11 +28,6 @@ namespace internal {
using compiler::CodeAssemblerState;
-RUNTIME_FUNCTION(UnexpectedStubMiss) {
- FATAL("Unexpected deopt of a stub");
- return Smi::kZero;
-}
-
CodeStubDescriptor::CodeStubDescriptor(CodeStub* stub)
: isolate_(stub->isolate()),
call_descriptor_(stub->GetCallInterfaceDescriptor()),
@@ -100,23 +95,6 @@ void CodeStub::RecordCodeGeneration(Handle<Code> code) {
}
-Code::Kind CodeStub::GetCodeKind() const {
- return Code::STUB;
-}
-
-
-Code::Flags CodeStub::GetCodeFlags() const {
- return Code::ComputeFlags(GetCodeKind(), GetExtraICState());
-}
-
-Handle<Code> CodeStub::GetCodeCopy(const FindAndReplacePattern& pattern) {
- Handle<Code> ic = GetCode();
- ic = isolate()->factory()->CopyCode(ic);
- ic->FindAndReplace(pattern);
- RecordCodeGeneration(ic);
- return ic;
-}
-
void CodeStub::DeleteStubFromCacheForTesting() {
Heap* heap = isolate_->heap();
Handle<UnseededNumberDictionary> dict(heap->code_stubs());
@@ -147,9 +125,8 @@ Handle<Code> PlatformCodeStub::GenerateCode() {
CodeDesc desc;
masm.GetCode(isolate(), &desc);
// Copy the generated code into a heap object.
- Code::Flags flags = Code::ComputeFlags(GetCodeKind(), GetExtraICState());
Handle<Code> new_object = factory->NewCode(
- desc, flags, masm.CodeObject(), NeedsImmovableCode());
+ desc, Code::STUB, masm.CodeObject(), NeedsImmovableCode());
return new_object;
}
@@ -157,10 +134,9 @@ Handle<Code> PlatformCodeStub::GenerateCode() {
Handle<Code> CodeStub::GetCode() {
Heap* heap = isolate()->heap();
Code* code;
- if (UseSpecialCache() ? FindCodeInSpecialCache(&code)
- : FindCodeInCache(&code)) {
- DCHECK(GetCodeKind() == code->kind());
- return Handle<Code>(code);
+ if (FindCodeInCache(&code)) {
+ DCHECK(code->is_stub());
+ return handle(code);
}
{
@@ -185,14 +161,10 @@ Handle<Code> CodeStub::GetCode() {
}
#endif
- if (UseSpecialCache()) {
- AddToSpecialCache(new_object);
- } else {
- // Update the dictionary and the root in Heap.
- Handle<UnseededNumberDictionary> dict = UnseededNumberDictionary::Set(
- handle(heap->code_stubs()), GetKey(), new_object);
- heap->SetRootCodeStubs(*dict);
- }
+ // Update the dictionary and the root in Heap.
+ Handle<UnseededNumberDictionary> dict = UnseededNumberDictionary::Set(
+ handle(heap->code_stubs()), GetKey(), new_object);
+ heap->SetRootCodeStubs(*dict);
code = *new_object;
}
@@ -202,6 +174,9 @@ Handle<Code> CodeStub::GetCode() {
return Handle<Code>(code, isolate());
}
+CodeStub::Major CodeStub::GetMajorKey(Code* code_stub) {
+ return MajorKeyFromKey(code_stub->stub_key());
+}
const char* CodeStub::MajorName(CodeStub::Major major_key) {
switch (major_key) {
@@ -266,8 +241,7 @@ void CodeStub::InitializeDescriptor(Isolate* isolate, uint32_t key,
void CodeStub::GetCodeDispatchCall(CodeStub* stub, void** value_out) {
Handle<Code>* code_out = reinterpret_cast<Handle<Code>*>(value_out);
- // Code stubs with special cache cannot be recreated from stub key.
- *code_out = stub->UseSpecialCache() ? Handle<Code>() : stub->GetCode();
+ *code_out = stub->GetCode();
}
@@ -321,8 +295,8 @@ Handle<Code> TurboFanCodeStub::GenerateCode() {
const char* name = CodeStub::MajorName(MajorKey());
Zone zone(isolate()->allocator(), ZONE_NAME);
CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
- compiler::CodeAssemblerState state(isolate(), &zone, descriptor,
- GetCodeFlags(), name);
+ compiler::CodeAssemblerState state(isolate(), &zone, descriptor, Code::STUB,
+ name);
GenerateAssembly(&state);
return compiler::CodeAssembler::GenerateCode(&state);
}
@@ -364,20 +338,6 @@ TF_STUB(ElementsTransitionAndStoreStub, CodeStubAssembler) {
}
}
-// TODO(ishell): move to builtins.
-TF_STUB(AllocateHeapNumberStub, CodeStubAssembler) {
- Node* result = AllocateHeapNumber();
- Return(result);
-}
-
-// TODO(ishell): move to builtins-handler-gen.
-TF_STUB(StringLengthStub, CodeStubAssembler) {
- Node* value = Parameter(Descriptor::kReceiver);
- Node* string = LoadJSValueValue(value);
- Node* result = LoadStringLength(string);
- Return(result);
-}
-
TF_STUB(TransitionElementsKindStub, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* object = Parameter(Descriptor::kObject);
@@ -517,13 +477,6 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
}
-void AllocateHeapNumberStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- descriptor->Initialize(
- Runtime::FunctionForId(Runtime::kAllocateHeapNumber)->entry);
-}
-
-
// TODO(ishell): move to builtins.
TF_STUB(GetPropertyStub, CodeStubAssembler) {
Label call_runtime(this, Label::kDeferred), return_undefined(this), end(this);
@@ -791,5 +744,18 @@ ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
InternalArrayConstructorStub::InternalArrayConstructorStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
+CommonArrayConstructorStub::CommonArrayConstructorStub(
+ Isolate* isolate, ElementsKind kind,
+ AllocationSiteOverrideMode override_mode)
+ : TurboFanCodeStub(isolate) {
+ // It only makes sense to override local allocation site behavior
+ // if there is a difference between the global allocation site policy
+ // for an ElementsKind and the desired usage of the stub.
+ DCHECK(override_mode != DISABLE_ALLOCATION_SITES ||
+ AllocationSite::ShouldTrack(kind));
+ set_sub_minor_key(ElementsKindBits::encode(kind) |
+ AllocationSiteOverrideModeBits::encode(override_mode));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index b1b2592618..6e23fb9a9d 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -9,7 +9,6 @@
#include "src/assembler.h"
#include "src/codegen.h"
#include "src/factory.h"
-#include "src/find-and-replace-pattern.h"
#include "src/globals.h"
#include "src/interface-descriptors.h"
#include "src/macro-assembler.h"
@@ -45,11 +44,9 @@ class Node;
V(SubString) \
V(NameDictionaryLookup) \
/* --- TurboFanCodeStubs --- */ \
- V(AllocateHeapNumber) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(ArrayNArgumentsConstructor) \
- V(StringLength) \
V(InternalArrayNoArgumentConstructor) \
V(InternalArraySingleArgumentConstructor) \
V(ElementsTransitionAndStore) \
@@ -143,9 +140,6 @@ class CodeStub : public ZoneObject {
// Retrieve the code for the stub. Generate the code if needed.
Handle<Code> GetCode();
- // Retrieve the code for the stub, make and return a copy of the code.
- Handle<Code> GetCodeCopy(const FindAndReplacePattern& pattern);
-
static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key));
}
@@ -154,9 +148,7 @@ class CodeStub : public ZoneObject {
}
// Gets the major key from a code object that is a code stub or binary op IC.
- static Major GetMajorKey(Code* code_stub) {
- return MajorKeyFromKey(code_stub->stub_key());
- }
+ static Major GetMajorKey(Code* code_stub);
static uint32_t NoCacheKey() { return MajorKeyBits::encode(NoCache); }
@@ -196,13 +188,6 @@ class CodeStub : public ZoneObject {
virtual Major MajorKey() const = 0;
uint32_t MinorKey() const { return minor_key_; }
- // BinaryOpStub needs to override this.
- virtual Code::Kind GetCodeKind() const;
-
- virtual ExtraICState GetExtraICState() const { return kNoExtraICState; }
-
- Code::Flags GetCodeFlags() const;
-
friend std::ostream& operator<<(std::ostream& os, const CodeStub& s) {
s.PrintName(os);
return os;
@@ -252,19 +237,6 @@ class CodeStub : public ZoneObject {
// registering stub in the stub cache.
virtual void Activate(Code* code) { }
- // Add the code to a specialized cache, specific to an individual
- // stub type. Please note, this method must add the code object to a
- // roots object, otherwise we will remove the code during GC.
- virtual void AddToSpecialCache(Handle<Code> new_object) { }
-
- // Find code in a specialized cache, work is delegated to the specific stub.
- virtual bool FindCodeInSpecialCache(Code** code_out) {
- return false;
- }
-
- // If a stub uses a special cache override this.
- virtual bool UseSpecialCache() { return false; }
-
// We use this dispatch to statically instantiate the correct code stub for
// the given stub key and call the passed function with that code stub.
typedef void (*DispatchedCall)(CodeStub* stub, void** value_out);
@@ -489,24 +461,10 @@ class TurboFanCodeStub : public CodeStub {
namespace v8 {
namespace internal {
-class StringLengthStub : public TurboFanCodeStub {
- public:
- explicit StringLengthStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_TURBOFAN_CODE_STUB(StringLength, TurboFanCodeStub);
-};
-
class StoreInterceptorStub : public TurboFanCodeStub {
public:
explicit StoreInterceptorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
-
DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
DEFINE_TURBOFAN_CODE_STUB(StoreInterceptor, TurboFanCodeStub);
};
@@ -547,9 +505,6 @@ class LoadIndexedInterceptorStub : public TurboFanCodeStub {
explicit LoadIndexedInterceptorStub(Isolate* isolate)
: TurboFanCodeStub(isolate) {}
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- ExtraICState GetExtraICState() const override { return Code::KEYED_LOAD_IC; }
-
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_TURBOFAN_CODE_STUB(LoadIndexedInterceptor, TurboFanCodeStub);
};
@@ -657,9 +612,6 @@ class KeyedLoadSloppyArgumentsStub : public TurboFanCodeStub {
explicit KeyedLoadSloppyArgumentsStub(Isolate* isolate)
: TurboFanCodeStub(isolate) {}
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
-
protected:
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_TURBOFAN_CODE_STUB(KeyedLoadSloppyArguments, TurboFanCodeStub);
@@ -676,9 +628,6 @@ class KeyedStoreSloppyArgumentsStub : public TurboFanCodeStub {
minor_key_ = CommonStoreModeBits::encode(mode);
}
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
-
protected:
DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
DEFINE_TURBOFAN_CODE_STUB(KeyedStoreSloppyArguments, TurboFanCodeStub);
@@ -765,7 +714,7 @@ class CEntryStub : public PlatformCodeStub {
minor_key_ = SaveDoublesBits::encode(save_doubles == kSaveFPRegs) |
FrameTypeBits::encode(builtin_exit_frame) |
ArgvMode::encode(argv_mode == kArgvInRegister);
- DCHECK(result_size == 1 || result_size == 2 || result_size == 3);
+ DCHECK(result_size == 1 || result_size == 2);
minor_key_ = ResultSizeBits::update(minor_key_, result_size);
}
@@ -895,8 +844,6 @@ class ScriptContextFieldStub : public TurboFanCodeStub {
SlotIndexBits::encode(lookup_result->slot_index);
}
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
-
int context_index() const { return ContextIndexBits::decode(minor_key_); }
int slot_index() const { return SlotIndexBits::decode(minor_key_); }
@@ -923,8 +870,6 @@ class LoadScriptContextFieldStub : public ScriptContextFieldStub {
Isolate* isolate, const ScriptContextTable::LookupResult* lookup_result)
: ScriptContextFieldStub(isolate, lookup_result) {}
- ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
-
private:
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_TURBOFAN_CODE_STUB(LoadScriptContextField, ScriptContextFieldStub);
@@ -937,8 +882,6 @@ class StoreScriptContextFieldStub : public ScriptContextFieldStub {
Isolate* isolate, const ScriptContextTable::LookupResult* lookup_result)
: ScriptContextFieldStub(isolate, lookup_result) {}
- ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
-
private:
DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
DEFINE_TURBOFAN_CODE_STUB(StoreScriptContextField, ScriptContextFieldStub);
@@ -966,9 +909,6 @@ class StoreFastElementStub : public TurboFanCodeStub {
return CommonStoreModeBits::decode(minor_key_);
}
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- ExtraICState GetExtraICState() const override { return Code::KEYED_STORE_IC; }
-
private:
class ElementsKindBits
: public BitField<ElementsKind, CommonStoreModeBits::kNext, 8> {};
@@ -979,30 +919,10 @@ class StoreFastElementStub : public TurboFanCodeStub {
};
-class AllocateHeapNumberStub : public TurboFanCodeStub {
- public:
- explicit AllocateHeapNumberStub(Isolate* isolate)
- : TurboFanCodeStub(isolate) {}
-
- void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateHeapNumber);
- DEFINE_TURBOFAN_CODE_STUB(AllocateHeapNumber, TurboFanCodeStub);
-};
-
class CommonArrayConstructorStub : public TurboFanCodeStub {
protected:
CommonArrayConstructorStub(Isolate* isolate, ElementsKind kind,
- AllocationSiteOverrideMode override_mode)
- : TurboFanCodeStub(isolate) {
- // It only makes sense to override local allocation site behavior
- // if there is a difference between the global allocation site policy
- // for an ElementsKind and the desired usage of the stub.
- DCHECK(override_mode != DISABLE_ALLOCATION_SITES ||
- AllocationSite::ShouldTrack(kind));
- set_sub_minor_key(ElementsKindBits::encode(kind) |
- AllocationSiteOverrideModeBits::encode(override_mode));
- }
+ AllocationSiteOverrideMode override_mode);
void set_sub_minor_key(uint32_t key) { minor_key_ = key; }
@@ -1118,9 +1038,6 @@ class StoreSlowElementStub : public TurboFanCodeStub {
minor_key_ = CommonStoreModeBits::encode(mode);
}
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- ExtraICState GetExtraICState() const override { return Code::KEYED_STORE_IC; }
-
private:
DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
DEFINE_TURBOFAN_CODE_STUB(StoreSlowElement, TurboFanCodeStub);
@@ -1144,9 +1061,6 @@ class ElementsTransitionAndStoreStub : public TurboFanCodeStub {
return CommonStoreModeBits::decode(minor_key_);
}
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- ExtraICState GetExtraICState() const override { return Code::KEYED_STORE_IC; }
-
private:
class FromBits
: public BitField<ElementsKind, CommonStoreModeBits::kNext, 8> {};
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 4c9e0f2967..c313a1139a 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -10,14 +10,12 @@
#include <memory>
-#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
#include "src/compilation-info.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/eh-frame.h"
#include "src/objects-inl.h"
-#include "src/parsing/parse-info.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -69,45 +67,6 @@ UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction)
#undef UNARY_MATH_FUNCTION
-void CodeGenerator::MakeCodePrologue(ParseInfo* parse_info,
- CompilationInfo* info, const char* kind) {
- bool print_ast = false;
- const char* ftype;
-
- if (info->isolate()->bootstrapper()->IsActive()) {
- print_ast = FLAG_print_builtin_ast;
- ftype = "builtin";
- } else {
- print_ast = FLAG_print_ast;
- ftype = "user-defined";
- }
-
- if (!FLAG_trace_codegen && !print_ast) return;
-
- // Requires internalizing the AST, so make sure we are on the main thread and
- // allow handle dereference and allocations.
- // TODO(rmcilroy): Make ast-printer print ast raw strings instead of
- // internalized strings to avoid internalizing here.
- DCHECK(ThreadId::Current().Equals(info->isolate()->thread_id()));
- AllowHandleDereference allow_deref;
- AllowHandleAllocation allow_handles;
- AllowHeapAllocation allow_gc;
- parse_info->ast_value_factory()->Internalize(info->isolate());
-
- if (FLAG_trace_codegen || print_ast) {
- std::unique_ptr<char[]> name = info->GetDebugName();
- PrintF("[generating %s code for %s function: %s]\n", kind, ftype,
- name.get());
- }
-
-#ifdef DEBUG
- if (!info->IsStub() && print_ast) {
- PrintF("--- AST ---\n%s\n",
- AstPrinter(info->isolate()).PrintProgram(info->literal()));
- }
-#endif // DEBUG
-}
-
Handle<Code> CodeGenerator::MakeCodeEpilogue(TurboAssembler* tasm,
EhFrameWriter* eh_frame_writer,
CompilationInfo* info,
@@ -116,12 +75,11 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(TurboAssembler* tasm,
// Allocate and install the code.
CodeDesc desc;
- Code::Flags flags = info->code_flags();
tasm->GetCode(isolate, &desc);
if (eh_frame_writer) eh_frame_writer->GetEhFrame(&desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, flags, self_reference, false, info->prologue_offset());
+ Handle<Code> code = isolate->factory()->NewCode(desc, info->code_kind(),
+ self_reference, false);
isolate->counters()->total_compiled_code_size()->Increment(
code->instruction_size());
return code;
@@ -240,12 +198,12 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
bool print_source = code->kind() == Code::OPTIMIZED_FUNCTION;
if (print_source) {
Handle<SharedFunctionInfo> shared = info->shared_info();
- Handle<Script> script = info->script();
- if (!script->IsUndefined(isolate) &&
- !script->source()->IsUndefined(isolate)) {
+ if (shared->script()->IsScript() &&
+ !Script::cast(shared->script())->source()->IsUndefined(isolate)) {
os << "--- Raw source ---\n";
- StringCharacterStream stream(String::cast(script->source()),
- shared->start_position());
+ StringCharacterStream stream(
+ String::cast(Script::cast(shared->script())->source()),
+ shared->start_position());
// fun->end_position() points to the last character in the stream. We
// need to compensate by adding one to calculate the length.
int source_len = shared->end_position() - shared->start_position() + 1;
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 2d203cfef5..712d283c6b 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -10,10 +10,9 @@
#include "src/runtime/runtime.h"
// Include the declaration of the architecture defined class CodeGenerator.
-// The contract to the shared code is that the the CodeGenerator is a subclass
+// The contract to the shared code is that the the CodeGenerator is a subclass
// of Visitor and that the following methods are available publicly:
// MakeCode
-// MakeCodePrologue
// MakeCodeEpilogue
// masm
// frame
@@ -68,14 +67,9 @@ namespace internal {
class CompilationInfo;
class EhFrameWriter;
-class ParseInfo;
class CodeGenerator {
public:
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(ParseInfo* parse_info, CompilationInfo* info,
- const char* kind);
-
// Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(TurboAssembler* tasm,
EhFrameWriter* unwinding,
diff --git a/deps/v8/src/collector.h b/deps/v8/src/collector.h
index abb2fbb25b..307e8b886f 100644
--- a/deps/v8/src/collector.h
+++ b/deps/v8/src/collector.h
@@ -5,8 +5,9 @@
#ifndef V8_COLLECTOR_H_
#define V8_COLLECTOR_H_
+#include <vector>
+
#include "src/checks.h"
-#include "src/list-inl.h"
#include "src/vector.h"
namespace v8 {
@@ -32,8 +33,8 @@ class Collector {
virtual ~Collector() {
// Free backing store (in reverse allocation order).
current_chunk_.Dispose();
- for (int i = chunks_.length() - 1; i >= 0; i--) {
- chunks_.at(i).Dispose();
+ for (auto rit = chunks_.rbegin(); rit != chunks_.rend(); ++rit) {
+ rit->Dispose();
}
}
@@ -86,8 +87,7 @@ class Collector {
void WriteTo(Vector<T> destination) {
DCHECK(size_ <= destination.length());
int position = 0;
- for (int i = 0; i < chunks_.length(); i++) {
- Vector<T> chunk = chunks_.at(i);
+ for (const Vector<T>& chunk : chunks_) {
for (int j = 0; j < chunk.length(); j++) {
destination[position] = chunk[j];
position++;
@@ -111,10 +111,10 @@ class Collector {
// Resets the collector to be empty.
virtual void Reset() {
- for (int i = chunks_.length() - 1; i >= 0; i--) {
- chunks_.at(i).Dispose();
+ for (auto rit = chunks_.rbegin(); rit != chunks_.rend(); ++rit) {
+ rit->Dispose();
}
- chunks_.Rewind(0);
+ chunks_.clear();
index_ = 0;
size_ = 0;
}
@@ -124,7 +124,7 @@ class Collector {
protected:
static const int kMinCapacity = 16;
- List<Vector<T> > chunks_;
+ std::vector<Vector<T>> chunks_;
Vector<T> current_chunk_; // Block of memory currently being written into.
int index_; // Current index in current chunk.
int size_; // Total number of elements in collector.
@@ -159,7 +159,7 @@ class Collector {
virtual void NewChunk(int new_capacity) {
Vector<T> new_chunk = Vector<T>::New(new_capacity);
if (index_ > 0) {
- chunks_.Add(current_chunk_.SubVector(0, index_));
+ chunks_.push_back(current_chunk_.SubVector(0, index_));
} else {
current_chunk_.Dispose();
}
@@ -231,7 +231,8 @@ class SequenceCollector : public Collector<T, growth_factor, max_growth> {
new_chunk[i] = this->current_chunk_[sequence_start_ + i];
}
if (sequence_start_ > 0) {
- this->chunks_.Add(this->current_chunk_.SubVector(0, sequence_start_));
+ this->chunks_.push_back(
+ this->current_chunk_.SubVector(0, sequence_start_));
} else {
this->current_chunk_.Dispose();
}
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 5183008df8..3e1bd04664 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -95,14 +95,15 @@ CompilationCacheScript::CompilationCacheScript(Isolate* isolate)
// script originates from the same place. This is to avoid issues
// when reporting errors, etc.
bool CompilationCacheScript::HasOrigin(Handle<SharedFunctionInfo> function_info,
- Handle<Object> name, int line_offset,
- int column_offset,
+ MaybeHandle<Object> maybe_name,
+ int line_offset, int column_offset,
ScriptOriginOptions resource_options) {
Handle<Script> script =
Handle<Script>(Script::cast(function_info->script()), isolate());
// If the script name isn't set, the boilerplate script should have
// an undefined name to have the same origin.
- if (name.is_null()) {
+ Handle<Object> name;
+ if (!maybe_name.ToHandle(&name)) {
return script->name()->IsUndefined(isolate());
}
// Do the fast bailout checks first.
@@ -123,7 +124,7 @@ bool CompilationCacheScript::HasOrigin(Handle<SharedFunctionInfo> function_info,
// will be cached, but subsequent code from different source / line
// won't.
InfoVectorPair CompilationCacheScript::Lookup(
- Handle<String> source, Handle<Object> name, int line_offset,
+ Handle<String> source, MaybeHandle<Object> name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
Handle<Context> context, LanguageMode language_mode) {
InfoVectorPair result;
@@ -263,7 +264,7 @@ void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
}
InfoVectorPair CompilationCache::LookupScript(
- Handle<String> source, Handle<Object> name, int line_offset,
+ Handle<String> source, MaybeHandle<Object> name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
Handle<Context> context, LanguageMode language_mode) {
InfoVectorPair empty_result;
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 907faf38b3..50c3dea59e 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -79,7 +79,7 @@ class CompilationCacheScript : public CompilationSubCache {
public:
explicit CompilationCacheScript(Isolate* isolate);
- InfoVectorPair Lookup(Handle<String> source, Handle<Object> name,
+ InfoVectorPair Lookup(Handle<String> source, MaybeHandle<Object> name,
int line_offset, int column_offset,
ScriptOriginOptions resource_options,
Handle<Context> context, LanguageMode language_mode);
@@ -89,8 +89,8 @@ class CompilationCacheScript : public CompilationSubCache {
Handle<Cell> literals);
private:
- bool HasOrigin(Handle<SharedFunctionInfo> function_info, Handle<Object> name,
- int line_offset, int column_offset,
+ bool HasOrigin(Handle<SharedFunctionInfo> function_info,
+ MaybeHandle<Object> name, int line_offset, int column_offset,
ScriptOriginOptions resource_options);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
@@ -152,7 +152,7 @@ class CompilationCache {
// Finds the script shared function info for a source
// string. Returns an empty handle if the cache doesn't contain a
// script for the given source string with the right origin.
- InfoVectorPair LookupScript(Handle<String> source, Handle<Object> name,
+ InfoVectorPair LookupScript(Handle<String> source, MaybeHandle<Object> name,
int line_offset, int column_offset,
ScriptOriginOptions resource_options,
Handle<Context> context,
diff --git a/deps/v8/src/compilation-info.cc b/deps/v8/src/compilation-info.cc
index f4566d29bd..cf3ca63642 100644
--- a/deps/v8/src/compilation-info.cc
+++ b/deps/v8/src/compilation-info.cc
@@ -16,11 +16,13 @@
namespace v8 {
namespace internal {
+// TODO(mvstanton): the Code::OPTIMIZED_FUNCTION constant below is
+// bogus, it's just that I've eliminated Code::FUNCTION and there isn't
+// a "better" value to put in this place.
CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
ParseInfo* parse_info,
FunctionLiteral* literal)
- : CompilationInfo(parse_info->script(), {},
- Code::ComputeFlags(Code::FUNCTION), BASE, isolate, zone) {
+ : CompilationInfo({}, Code::OPTIMIZED_FUNCTION, BASE, isolate, zone) {
// NOTE: The parse_info passed here represents the global information gathered
// during parsing, but does not represent specific details of the actual
// function literal being compiled for this CompilationInfo. As such,
@@ -36,11 +38,9 @@ CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
}
CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
- Handle<Script> script,
Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure)
- : CompilationInfo(script, {}, Code::ComputeFlags(Code::OPTIMIZED_FUNCTION),
- OPTIMIZE, isolate, zone) {
+ : CompilationInfo({}, Code::OPTIMIZED_FUNCTION, OPTIMIZE, isolate, zone) {
shared_info_ = shared;
closure_ = closure;
optimization_id_ = isolate->NextOptimizationId();
@@ -58,26 +58,22 @@ CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
CompilationInfo::CompilationInfo(Vector<const char> debug_name,
Isolate* isolate, Zone* zone,
- Code::Flags code_flags)
- : CompilationInfo(Handle<Script>::null(), debug_name, code_flags, STUB,
- isolate, zone) {}
+ Code::Kind code_kind)
+ : CompilationInfo(debug_name, code_kind, STUB, isolate, zone) {}
-CompilationInfo::CompilationInfo(Handle<Script> script,
- Vector<const char> debug_name,
- Code::Flags code_flags, Mode mode,
+CompilationInfo::CompilationInfo(Vector<const char> debug_name,
+ Code::Kind code_kind, Mode mode,
Isolate* isolate, Zone* zone)
: isolate_(isolate),
- script_(script),
literal_(nullptr),
flags_(0),
- code_flags_(code_flags),
+ code_kind_(code_kind),
mode_(mode),
osr_offset_(BailoutId::None()),
zone_(zone),
deferred_handles_(nullptr),
dependencies_(isolate, zone),
bailout_reason_(kNoReason),
- prologue_offset_(Code::kPrologueOffsetNotSet),
parameter_count_(0),
optimization_id_(-1),
osr_expr_stack_height_(-1),
@@ -105,12 +101,6 @@ int CompilationInfo::num_parameters_including_this() const {
bool CompilationInfo::is_this_defined() const { return !IsStub(); }
-// Primitive functions are unlikely to be picked up by the stack-walking
-// profiler, so they trigger their own optimization when they're called
-// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
-// TODO(6409) Remove when Full-Codegen dies.
-bool CompilationInfo::ShouldSelfOptimize() { return false; }
-
void CompilationInfo::set_deferred_handles(
std::shared_ptr<DeferredHandles> deferred_handles) {
DCHECK(deferred_handles_.get() == nullptr);
@@ -123,9 +113,6 @@ void CompilationInfo::set_deferred_handles(DeferredHandles* deferred_handles) {
}
void CompilationInfo::ReopenHandlesInNewHandleScope() {
- if (!script_.is_null()) {
- script_ = Handle<Script>(*script_);
- }
if (!shared_info_.is_null()) {
shared_info_ = Handle<SharedFunctionInfo>(*shared_info_);
}
@@ -155,14 +142,10 @@ std::unique_ptr<char[]> CompilationInfo::GetDebugName() const {
}
StackFrame::Type CompilationInfo::GetOutputStackFrameType() const {
- switch (output_code_kind()) {
+ switch (code_kind()) {
case Code::STUB:
case Code::BYTECODE_HANDLER:
- case Code::HANDLER:
case Code::BUILTIN:
-#define CASE_KIND(kind) case Code::kind:
- IC_KIND_LIST(CASE_KIND)
-#undef CASE_KIND
return StackFrame::STUB;
case Code::WASM_FUNCTION:
return StackFrame::WASM_COMPILED;
@@ -216,9 +199,5 @@ int CompilationInfo::AddInlinedFunction(
return id;
}
-Code::Kind CompilationInfo::output_code_kind() const {
- return Code::ExtractKindFromFlags(code_flags_);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compilation-info.h b/deps/v8/src/compilation-info.h
index 815b922bf9..ab4fda5223 100644
--- a/deps/v8/src/compilation-info.h
+++ b/deps/v8/src/compilation-info.h
@@ -53,16 +53,14 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
CompilationInfo(Zone* zone, Isolate* isolate, ParseInfo* parse_info,
FunctionLiteral* literal);
// Construct a compilation info for optimized compilation.
- CompilationInfo(Zone* zone, Isolate* isolate, Handle<Script> script,
+ CompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure);
// Construct a compilation info for stub compilation (or testing).
CompilationInfo(Vector<const char> debug_name, Isolate* isolate, Zone* zone,
- Code::Flags code_flags);
+ Code::Kind code_kind);
~CompilationInfo();
- Handle<Script> script() const { return script_; }
-
FunctionLiteral* literal() const { return literal_; }
void set_literal(FunctionLiteral* literal) {
DCHECK_NOT_NULL(literal);
@@ -87,7 +85,7 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
bool has_shared_info() const { return !shared_info().is_null(); }
Handle<JSFunction> closure() const { return closure_; }
Handle<Code> code() const { return code_; }
- Code::Flags code_flags() const { return code_flags_; }
+ Code::Kind code_kind() const { return code_kind_; }
BailoutId osr_offset() const { return osr_offset_; }
JavaScriptFrame* osr_frame() const { return osr_frame_; }
int num_parameters() const;
@@ -173,19 +171,13 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
// Accessors for the different compilation modes.
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
bool IsStub() const { return mode_ == STUB; }
- bool IsWasm() const { return output_code_kind() == Code::WASM_FUNCTION; }
+ bool IsWasm() const { return code_kind() == Code::WASM_FUNCTION; }
void SetOptimizingForOsr(BailoutId osr_offset, JavaScriptFrame* osr_frame) {
DCHECK(IsOptimizing());
osr_offset_ = osr_offset;
osr_frame_ = osr_frame;
}
- // Deoptimization support.
- bool ShouldEnsureSpaceForLazyDeopt() { return !IsStub(); }
-
- // Determines whether or not to insert a self-optimization header.
- bool ShouldSelfOptimize();
-
void set_deferred_handles(std::shared_ptr<DeferredHandles> deferred_handles);
void set_deferred_handles(DeferredHandles* deferred_handles);
std::shared_ptr<DeferredHandles> deferred_handles() {
@@ -208,16 +200,6 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
BailoutReason bailout_reason() const { return bailout_reason_; }
- int prologue_offset() const {
- DCHECK_NE(Code::kPrologueOffsetNotSet, prologue_offset_);
- return prologue_offset_;
- }
-
- void set_prologue_offset(int prologue_offset) {
- DCHECK_EQ(Code::kPrologueOffsetNotSet, prologue_offset_);
- prologue_offset_ = prologue_offset;
- }
-
CompilationDependencies* dependencies() { return &dependencies_; }
int optimization_id() const {
@@ -264,8 +246,6 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
std::unique_ptr<char[]> GetDebugName() const;
- Code::Kind output_code_kind() const;
-
StackFrame::Type GetOutputStackFrameType() const;
int GetDeclareGlobalsFlags() const;
@@ -284,9 +264,8 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
// OPTIMIZE is optimized code generated by the Hydrogen-based backend.
enum Mode { BASE, OPTIMIZE, STUB };
- CompilationInfo(Handle<Script> script, Vector<const char> debug_name,
- Code::Flags code_flags, Mode mode, Isolate* isolate,
- Zone* zone);
+ CompilationInfo(Vector<const char> debug_name, Code::Kind code_kind,
+ Mode mode, Isolate* isolate, Zone* zone);
void SetMode(Mode mode) { mode_ = mode; }
@@ -299,13 +278,12 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
bool GetFlag(Flag flag) const { return (flags_ & flag) != 0; }
Isolate* isolate_;
- Handle<Script> script_;
FunctionLiteral* literal_;
SourceRangeMap* source_range_map_; // Used when block coverage is enabled.
unsigned flags_;
- Code::Flags code_flags_;
+ Code::Kind code_kind_;
Handle<SharedFunctionInfo> shared_info_;
@@ -337,8 +315,6 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
BailoutReason bailout_reason_;
- int prologue_offset_;
-
InlinedFunctionList inlined_functions_;
// Number of parameters used for compilation of stubs that require arguments.
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 0e948c01f2..2de39c0ac4 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -20,12 +20,12 @@ namespace {
void DisposeCompilationJob(CompilationJob* job, bool restore_function_code) {
if (restore_function_code) {
Handle<JSFunction> function = job->compilation_info()->closure();
- function->ReplaceCode(function->shared()->code());
+ function->set_code(function->shared()->code());
if (function->IsInOptimizationQueue()) {
function->ClearOptimizationMarker();
}
- // TODO(mvstanton): We can't call ensureliterals here due to allocation,
- // but we probably shouldn't call ReplaceCode either, as this
+ // TODO(mvstanton): We can't call EnsureLiterals here due to allocation,
+ // but we probably shouldn't call set_code either, as this
// sometimes runs on the worker thread!
// JSFunction::EnsureLiterals(function);
}
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 8c0393f843..b1eaf448c1 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -7,6 +7,7 @@
#include <algorithm>
#include <memory>
+#include "src/api.h"
#include "src/asmjs/asm-js.h"
#include "src/assembler-inl.h"
#include "src/ast/ast-numbering.h"
@@ -201,6 +202,7 @@ Isolate* CompilationJob::isolate() const {
namespace {
void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
+ Handle<Script> script,
CompilationInfo* compilation_info) {
// Log the code generation. If source information is available include
// script name and line number. Check explicitly whether logging is
@@ -208,7 +210,6 @@ void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
if (compilation_info->isolate()->logger()->is_logging_code_events() ||
compilation_info->isolate()->is_profiling()) {
Handle<SharedFunctionInfo> shared = compilation_info->shared_info();
- Handle<Script> script = compilation_info->script();
Handle<AbstractCode> abstract_code =
compilation_info->has_bytecode_array()
? Handle<AbstractCode>::cast(compilation_info->bytecode_array())
@@ -287,7 +288,7 @@ void InstallUnoptimizedCode(CompilationInfo* compilation_info) {
}
DCHECK(!compilation_info->code().is_null());
- shared->ReplaceCode(*compilation_info->code());
+ shared->set_code(*compilation_info->code());
if (compilation_info->has_bytecode_array()) {
DCHECK(!shared->HasBytecodeArray()); // Only compiled once.
DCHECK(!compilation_info->has_asm_wasm_data());
@@ -298,8 +299,7 @@ void InstallUnoptimizedCode(CompilationInfo* compilation_info) {
// Install coverage info on the shared function info.
if (compilation_info->has_coverage_info()) {
- DCHECK(FLAG_block_coverage &&
- compilation_info->isolate()->is_block_code_coverage());
+ DCHECK(compilation_info->isolate()->is_block_code_coverage());
compilation_info->isolate()->debug()->InstallCoverageInfo(
compilation_info->shared_info(), compilation_info->coverage_info());
}
@@ -351,7 +351,7 @@ CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job) {
log_tag = parse_info->lazy_compile() ? CodeEventListener::LAZY_COMPILE_TAG
: CodeEventListener::FUNCTION_TAG;
}
- RecordFunctionCompilation(log_tag, compilation_info);
+ RecordFunctionCompilation(log_tag, parse_info->script(), compilation_info);
job->RecordUnoptimizedCompilationStats();
}
return status;
@@ -549,7 +549,7 @@ bool GetOptimizedCodeNow(CompilationJob* job) {
DCHECK(!isolate->has_pending_exception());
InsertCodeIntoOptimizedCodeCache(compilation_info);
RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
- compilation_info);
+ job->parse_info()->script(), compilation_info);
return true;
}
@@ -726,14 +726,14 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
} else if (job->FinalizeJob() == CompilationJob::SUCCEEDED) {
job->RecordOptimizedCompilationStats();
RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
- compilation_info);
+ job->parse_info()->script(), compilation_info);
InsertCodeIntoOptimizedCodeCache(compilation_info);
if (FLAG_trace_opt) {
PrintF("[completed optimizing ");
compilation_info->closure()->ShortPrint();
PrintF("]\n");
}
- compilation_info->closure()->ReplaceCode(*compilation_info->code());
+ compilation_info->closure()->set_code(*compilation_info->code());
return CompilationJob::SUCCEEDED;
}
}
@@ -745,7 +745,7 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
PrintF(" because: %s]\n",
GetBailoutReason(compilation_info->bailout_reason()));
}
- compilation_info->closure()->ReplaceCode(shared->code());
+ compilation_info->closure()->set_code(shared->code());
// Clear the InOptimizationQueue marker, if it exists.
if (compilation_info->closure()->IsInOptimizationQueue()) {
compilation_info->closure()->ClearOptimizationMarker();
@@ -951,7 +951,7 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
}
// Install code on closure.
- function->ReplaceCode(*code);
+ function->set_code(*code);
// Check postconditions on success.
DCHECK(!isolate->has_pending_exception());
@@ -977,7 +977,7 @@ bool Compiler::CompileOptimized(Handle<JSFunction> function,
}
// Install code on closure.
- function->ReplaceCode(*code);
+ function->set_code(*code);
// Check postconditions on success.
DCHECK(!isolate->has_pending_exception());
@@ -1005,7 +1005,7 @@ MaybeHandle<JSArray> Compiler::CompileForLiveEdit(Handle<Script> script) {
// Start a compilation.
ParseInfo parse_info(script);
- parse_info.set_is_debug();
+ parse_info.set_eager();
// TODO(635): support extensions.
Handle<JSArray> infos;
@@ -1079,7 +1079,23 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
}
script->set_origin_options(options);
script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
- Script::SetEvalOrigin(script, outer_info, eval_position);
+
+ script->set_eval_from_shared(*outer_info);
+ if (eval_position == kNoSourcePosition) {
+ // If the position is missing, attempt to get the code offset by
+ // walking the stack. Do not translate the code offset into source
+ // position, but store it as negative value for lazy translation.
+ StackTraceFrameIterator it(script->GetIsolate());
+ if (!it.done() && it.is_javascript()) {
+ FrameSummary summary = FrameSummary::GetTop(it.javascript_frame());
+ script->set_eval_from_shared(
+ summary.AsJavaScript().function()->shared());
+ eval_position = -summary.code_offset();
+ } else {
+ eval_position = 0;
+ }
+ }
+ script->set_eval_from_position(eval_position);
ParseInfo parse_info(script);
parse_info.set_eval();
@@ -1142,6 +1158,11 @@ bool ContainsAsmModule(Handle<Script> script) {
return false;
}
+bool ShouldProduceCodeCache(ScriptCompiler::CompileOptions options) {
+ return options == ScriptCompiler::kProduceCodeCache ||
+ options == ScriptCompiler::kProduceFullCodeCache;
+}
+
} // namespace
bool Compiler::CodeGenerationFromStringsAllowed(Isolate* isolate,
@@ -1187,18 +1208,18 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
eval_scope_position, eval_position);
}
-Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
- Handle<String> source, Handle<Object> script_name, int line_offset,
- int column_offset, ScriptOriginOptions resource_options,
- Handle<Object> source_map_url, Handle<Context> context,
+MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
+ Handle<String> source, MaybeHandle<Object> maybe_script_name,
+ int line_offset, int column_offset, ScriptOriginOptions resource_options,
+ MaybeHandle<Object> maybe_source_map_url, Handle<Context> context,
v8::Extension* extension, ScriptData** cached_data,
ScriptCompiler::CompileOptions compile_options, NativesFlag natives,
- Handle<FixedArray> host_defined_options) {
+ MaybeHandle<FixedArray> maybe_host_defined_options) {
Isolate* isolate = source->GetIsolate();
if (compile_options == ScriptCompiler::kNoCompileOptions) {
cached_data = NULL;
} else if (compile_options == ScriptCompiler::kProduceParserCache ||
- compile_options == ScriptCompiler::kProduceCodeCache) {
+ ShouldProduceCodeCache(compile_options)) {
DCHECK(cached_data && !*cached_data);
DCHECK(extension == NULL);
DCHECK(!isolate->debug()->is_loaded());
@@ -1216,14 +1237,14 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
CompilationCache* compilation_cache = isolate->compilation_cache();
// Do a lookup in the compilation cache but not for extensions.
- Handle<SharedFunctionInfo> result;
+ MaybeHandle<SharedFunctionInfo> maybe_result;
Handle<Cell> vector;
if (extension == NULL) {
// First check per-isolate compilation cache.
InfoVectorPair pair = compilation_cache->LookupScript(
- source, script_name, line_offset, column_offset, resource_options,
+ source, maybe_script_name, line_offset, column_offset, resource_options,
context, language_mode);
- if (!pair.has_shared() && FLAG_serialize_toplevel &&
+ if (!pair.has_shared() &&
compile_options == ScriptCompiler::kConsumeCodeCache &&
!isolate->debug()->is_loaded()) {
// Then check cached code provided by embedder.
@@ -1249,7 +1270,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
// Deserializer failed. Fall through to compile.
} else {
if (pair.has_shared()) {
- result = Handle<SharedFunctionInfo>(pair.shared(), isolate);
+ maybe_result = MaybeHandle<SharedFunctionInfo>(pair.shared(), isolate);
}
if (pair.has_vector()) {
vector = Handle<Cell>(pair.vector(), isolate);
@@ -1258,14 +1279,11 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
}
base::ElapsedTimer timer;
- if (FLAG_profile_deserialization && FLAG_serialize_toplevel &&
- compile_options == ScriptCompiler::kProduceCodeCache) {
+ if (FLAG_profile_deserialization && ShouldProduceCodeCache(compile_options)) {
timer.Start();
}
- if (result.is_null() ||
- (FLAG_serialize_toplevel &&
- compile_options == ScriptCompiler::kProduceCodeCache)) {
+ if (maybe_result.is_null() || ShouldProduceCodeCache(compile_options)) {
// No cache entry found, or embedder wants a code cache. Compile the script.
// Create a script object describing the script to be compiled.
@@ -1280,16 +1298,19 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
} else if (natives == INSPECTOR_CODE) {
script->set_type(Script::TYPE_INSPECTOR);
}
- if (!script_name.is_null()) {
+ Handle<Object> script_name;
+ if (maybe_script_name.ToHandle(&script_name)) {
script->set_name(*script_name);
script->set_line_offset(line_offset);
script->set_column_offset(column_offset);
}
script->set_origin_options(resource_options);
- if (!source_map_url.is_null()) {
+ Handle<Object> source_map_url;
+ if (maybe_source_map_url.ToHandle(&source_map_url)) {
script->set_source_mapping_url(*source_map_url);
}
- if (!host_defined_options.is_null()) {
+ Handle<FixedArray> host_defined_options;
+ if (maybe_host_defined_options.ToHandle(&host_defined_options)) {
script->set_host_defined_options(*host_defined_options);
}
@@ -1305,15 +1326,17 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
if (!context->IsNativeContext()) {
parse_info.set_outer_scope_info(handle(context->scope_info()));
}
- if (FLAG_serialize_toplevel &&
- compile_options == ScriptCompiler::kProduceCodeCache) {
+ if (ShouldProduceCodeCache(compile_options)) {
parse_info.set_will_serialize();
+ parse_info.set_eager(compile_options ==
+ ScriptCompiler::kProduceFullCodeCache);
}
parse_info.set_language_mode(
static_cast<LanguageMode>(parse_info.language_mode() | language_mode));
- CompileToplevel(&parse_info, isolate).ToHandle(&result);
- if (extension == NULL && !result.is_null()) {
+ maybe_result = CompileToplevel(&parse_info, isolate);
+ Handle<SharedFunctionInfo> result;
+ if (extension == NULL && maybe_result.ToHandle(&result)) {
// We need a feedback vector.
DCHECK(result->is_compiled());
Handle<FeedbackVector> feedback_vector =
@@ -1321,8 +1344,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
vector = isolate->factory()->NewCell(feedback_vector);
compilation_cache->PutScript(source, context, language_mode, result,
vector);
- if (FLAG_serialize_toplevel &&
- compile_options == ScriptCompiler::kProduceCodeCache &&
+ if (ShouldProduceCodeCache(compile_options) &&
!ContainsAsmModule(script)) {
HistogramTimerScope histogram_timer(
isolate->counters()->compile_serialize());
@@ -1338,7 +1360,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
}
}
- if (result.is_null()) {
+ if (maybe_result.is_null()) {
if (natives != EXTENSION_CODE && natives != NATIVES_CODE) {
isolate->ReportPendingMessages();
}
@@ -1346,7 +1368,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
isolate->debug()->OnAfterCompile(script);
}
}
- return result;
+ return maybe_result;
}
Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForStreamedScript(
@@ -1488,7 +1510,7 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
// Caching of optimized code enabled and optimized code found.
DCHECK(!code->marked_for_deoptimization());
DCHECK(function->shared()->is_compiled());
- function->ReplaceCode(code);
+ function->set_code(code);
}
}
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 98de5aabfc..5bd7b53f66 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -107,13 +107,14 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
ParseRestriction restriction, int parameters_end_pos);
// Create a shared function info object for a String source within a context.
- static Handle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
- Handle<String> source, Handle<Object> script_name, int line_offset,
- int column_offset, ScriptOriginOptions resource_options,
- Handle<Object> source_map_url, Handle<Context> context,
+ static MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
+ Handle<String> source, MaybeHandle<Object> maybe_script_name,
+ int line_offset, int column_offset, ScriptOriginOptions resource_options,
+ MaybeHandle<Object> maybe_source_map_url, Handle<Context> context,
v8::Extension* extension, ScriptData** cached_data,
ScriptCompiler::CompileOptions compile_options,
- NativesFlag is_natives_code, Handle<FixedArray> host_defined_options);
+ NativesFlag is_natives_code,
+ MaybeHandle<FixedArray> maybe_host_defined_options);
// Create a shared function info object for a Script that has already been
// parsed while the script was being loaded from a streamed source.
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 1115a46c5c..02f59f493b 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -174,16 +174,6 @@ FieldAccess AccessBuilder::ForJSFunctionCode() {
}
// static
-FieldAccess AccessBuilder::ForJSFunctionNextFunctionLink() {
- FieldAccess access = {
- kTaggedBase, JSFunction::kNextFunctionLinkOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
- kPointerWriteBarrier};
- return access;
-}
-
-// static
FieldAccess AccessBuilder::ForJSBoundFunctionBoundTargetFunction() {
FieldAccess access = {
kTaggedBase, JSBoundFunction::kBoundTargetFunctionOffset,
@@ -509,20 +499,9 @@ FieldAccess AccessBuilder::ForFixedTypedArrayBaseExternalPointer() {
}
// static
-FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridge() {
- FieldAccess access = {
- kTaggedBase, DescriptorArray::kEnumCacheBridgeOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
- return access;
-}
-
-
-// static
-FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache() {
+FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
FieldAccess access = {
- kTaggedBase, DescriptorArray::kEnumCacheBridgeCacheOffset,
+ kTaggedBase, DescriptorArray::kEnumCacheOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::OtherInternal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
@@ -940,10 +919,20 @@ ElementAccess AccessBuilder::ForFixedDoubleArrayElement() {
}
// static
-ElementAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCacheElement() {
- ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize,
- Type::InternalizedString(),
- MachineType::TaggedPointer(), kPointerWriteBarrier};
+FieldAccess AccessBuilder::ForEnumCacheKeys() {
+ FieldAccess access = {kTaggedBase, EnumCache::kKeysOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForEnumCacheIndices() {
+ FieldAccess access = {kTaggedBase, EnumCache::kIndicesOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -996,9 +985,6 @@ ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
}
}
UNREACHABLE();
- ElementAccess access = {kUntaggedBase, 0, Type::None(), MachineType::None(),
- kNoWriteBarrier};
- return access;
}
// static
@@ -1093,6 +1079,16 @@ FieldAccess AccessBuilder::ForOrderedHashTableBaseNumberOfElements() {
}
// static
+ElementAccess AccessBuilder::ForOrderedHashMapEntryValue() {
+ ElementAccess const access = {kTaggedBase,
+ OrderedHashMap::kHashTableStartOffset +
+ OrderedHashMap::kValueOffset * kPointerSize,
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForDictionaryMaxNumberKey() {
FieldAccess access = {
kTaggedBase,
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 1709c58e97..d1f6acfc56 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -73,9 +73,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSFunction::code() field.
static FieldAccess ForJSFunctionCode();
- // Provides access to JSFunction::next_function_link() field.
- static FieldAccess ForJSFunctionNextFunctionLink();
-
// Provides access to JSBoundFunction::bound_target_function() field.
static FieldAccess ForJSBoundFunctionBoundTargetFunction();
@@ -169,11 +166,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to FixedTypedArrayBase::external_pointer() field.
static FieldAccess ForFixedTypedArrayBaseExternalPointer();
- // Provides access to DescriptorArray::enum_cache_bridge() field.
- static FieldAccess ForDescriptorArrayEnumCacheBridge();
-
- // Provides access to DescriptorArray::enum_cache_bridge_cache() field.
- static FieldAccess ForDescriptorArrayEnumCacheBridgeCache();
+ // Provides access to DescriptorArray::enum_cache() field.
+ static FieldAccess ForDescriptorArrayEnumCache();
// Provides access to Map::bit_field() byte.
static FieldAccess ForMapBitField();
@@ -285,8 +279,11 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to FixedDoubleArray elements.
static ElementAccess ForFixedDoubleArrayElement();
- // Provides access to EnumCache elements.
- static ElementAccess ForDescriptorArrayEnumCacheBridgeCacheElement();
+ // Provides access to EnumCache::keys() field.
+ static FieldAccess ForEnumCacheKeys();
+
+ // Provides access to EnumCache::indices() field.
+ static FieldAccess ForEnumCacheIndices();
// Provides access to Fixed{type}TypedArray and External{type}Array elements.
static ElementAccess ForTypedArrayElement(ExternalArrayType type,
@@ -303,6 +300,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForOrderedHashTableBaseNumberOfElements();
static FieldAccess ForOrderedHashTableBaseNumberOfDeletedElements();
+ // Provides access to OrderedHashMap elements.
+ static ElementAccess ForOrderedHashMapEntryValue();
+
// Provides access to Dictionary fields.
static FieldAccess ForDictionaryMaxNumberKey();
static FieldAccess ForDictionaryNextEnumerationIndex();
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 134e9d4ea1..b1c680e517 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -96,6 +96,13 @@ PropertyAccessInfo PropertyAccessInfo::AccessorConstant(
return PropertyAccessInfo(kAccessorConstant, holder, constant, receiver_maps);
}
+// static
+PropertyAccessInfo PropertyAccessInfo::ModuleExport(
+ MapHandles const& receiver_maps, Handle<Cell> cell) {
+ return PropertyAccessInfo(kModuleExport, MaybeHandle<JSObject>(), cell,
+ receiver_maps);
+}
+
PropertyAccessInfo::PropertyAccessInfo()
: kind_(kInvalid),
field_representation_(MachineRepresentation::kNone),
@@ -209,11 +216,19 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
that->receiver_maps_.end());
return true;
}
+ case kModuleExport: {
+ return false;
+ }
}
UNREACHABLE();
}
+Handle<Cell> PropertyAccessInfo::export_cell() const {
+ DCHECK_EQ(kModuleExport, kind_);
+ return Handle<Cell>::cast(constant_);
+}
+
AccessInfoFactory::AccessInfoFactory(CompilationDependencies* dependencies,
Handle<Context> native_context, Zone* zone)
: dependencies_(dependencies),
@@ -400,6 +415,27 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
return true;
} else {
DCHECK_EQ(kAccessor, details.kind());
+ if (map->instance_type() == JS_MODULE_NAMESPACE_TYPE) {
+ DCHECK(map->is_prototype_map());
+ Handle<PrototypeInfo> proto_info =
+ Map::GetOrCreatePrototypeInfo(map, isolate());
+ DCHECK(proto_info->weak_cell()->IsWeakCell());
+ Handle<JSModuleNamespace> module_namespace(
+ JSModuleNamespace::cast(
+ WeakCell::cast(proto_info->weak_cell())->value()),
+ isolate());
+ Handle<Cell> cell(
+ Cell::cast(module_namespace->module()->exports()->Lookup(
+ isolate(), name, Smi::ToInt(name->GetHash()))),
+ isolate());
+ if (cell->value()->IsTheHole(isolate())) {
+ // This module has not been fully initialized yet.
+ return false;
+ }
+ *access_info = PropertyAccessInfo::ModuleExport(
+ MapHandles{receiver_map}, cell);
+ return true;
+ }
Handle<Object> accessors(descriptors->GetValue(number), isolate());
if (!accessors->IsAccessorPair()) return false;
Handle<Object> accessor(
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 7ec8deb8f0..dcdb0f35f0 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -63,7 +63,8 @@ class PropertyAccessInfo final {
kDataConstant,
kDataField,
kDataConstantField,
- kAccessorConstant
+ kAccessorConstant,
+ kModuleExport
};
static PropertyAccessInfo NotFound(MapHandles const& receiver_maps,
@@ -80,6 +81,8 @@ class PropertyAccessInfo final {
static PropertyAccessInfo AccessorConstant(MapHandles const& receiver_maps,
Handle<Object> constant,
MaybeHandle<JSObject> holder);
+ static PropertyAccessInfo ModuleExport(MapHandles const& receiver_maps,
+ Handle<Cell> cell);
PropertyAccessInfo();
@@ -93,6 +96,7 @@ class PropertyAccessInfo final {
// is done.
bool IsDataConstantField() const { return kind() == kDataConstantField; }
bool IsAccessorConstant() const { return kind() == kAccessorConstant; }
+ bool IsModuleExport() const { return kind() == kModuleExport; }
bool HasTransitionMap() const { return !transition_map().is_null(); }
@@ -107,6 +111,7 @@ class PropertyAccessInfo final {
}
MaybeHandle<Map> field_map() const { return field_map_; }
MapHandles const& receiver_maps() const { return receiver_maps_; }
+ Handle<Cell> export_cell() const;
private:
PropertyAccessInfo(MaybeHandle<JSObject> holder,
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index bc5d61fc19..fa9f6a027e 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -222,6 +222,12 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
+ if (index_ == no_reg) {
+ __ add(scratch1_, object_, Operand(index_immediate_));
+ } else {
+ DCHECK_EQ(0, index_immediate_);
+ __ add(scratch1_, object_, Operand(index_));
+ }
RememberedSetAction const remembered_set_action =
mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
: OMIT_REMEMBERED_SET;
@@ -232,15 +238,14 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ Push(lr);
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset());
}
- if (index_.is(no_reg)) {
- __ add(scratch1_, object_, Operand(index_immediate_));
- } else {
- DCHECK_EQ(0, index_immediate_);
- __ add(scratch1_, object_, Operand(index_));
- }
+#ifdef V8_CSA_WRITE_BARRIER
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+#else
__ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
+#endif
if (must_save_lr_) {
__ Pop(lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
@@ -250,7 +255,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
private:
Register const object_;
Register const index_;
- int32_t const index_immediate_; // Valid if index_.is(no_reg).
+ int32_t const index_immediate_; // Valid if index_==no_reg.
Register const value_;
Register const scratch0_;
Register const scratch1_;
@@ -419,51 +424,51 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
__ dmb(ISH); \
} while (0)
-#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
- do { \
- Label exchange; \
- __ dmb(ISH); \
- __ bind(&exchange); \
- __ add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ load_instr(i.OutputRegister(0), i.TempRegister(0)); \
- __ store_instr(i.TempRegister(0), i.InputRegister(2), i.TempRegister(0)); \
- __ teq(i.TempRegister(0), Operand(0)); \
- __ b(ne, &exchange); \
- __ dmb(ISH); \
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
+ do { \
+ Label exchange; \
+ __ add(i.InputRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ dmb(ISH); \
+ __ bind(&exchange); \
+ __ load_instr(i.OutputRegister(0), i.InputRegister(0)); \
+ __ store_instr(i.TempRegister(0), i.InputRegister(2), i.InputRegister(0)); \
+ __ teq(i.TempRegister(0), Operand(0)); \
+ __ b(ne, &exchange); \
+ __ dmb(ISH); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ add(i.InputRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ dmb(ISH); \
+ __ bind(&compareExchange); \
+ __ load_instr(i.OutputRegister(0), i.InputRegister(0)); \
+ __ teq(i.InputRegister(2), Operand(i.OutputRegister(0))); \
+ __ b(ne, &exit); \
+ __ store_instr(i.TempRegister(0), i.InputRegister(3), i.InputRegister(0)); \
+ __ teq(i.TempRegister(0), Operand(0)); \
+ __ b(ne, &compareExchange); \
+ __ bind(&exit); \
+ __ dmb(ISH); \
} while (0)
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr) \
+#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr) \
do { \
- Label compareExchange; \
- Label exit; \
+ Label binop; \
+ __ add(i.InputRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ dmb(ISH); \
- __ bind(&compareExchange); \
- __ add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ load_instr(i.OutputRegister(0), i.TempRegister(0)); \
- __ teq(i.TempRegister(1), Operand(i.OutputRegister(0))); \
- __ b(ne, &exit); \
- __ store_instr(i.TempRegister(0), i.InputRegister(3), i.TempRegister(0)); \
- __ teq(i.TempRegister(0), Operand(0)); \
- __ b(ne, &compareExchange); \
- __ bind(&exit); \
+ __ bind(&binop); \
+ __ load_instr(i.OutputRegister(0), i.InputRegister(0)); \
+ __ bin_instr(i.TempRegister(0), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ store_instr(i.TempRegister(1), i.TempRegister(0), i.InputRegister(0)); \
+ __ teq(i.TempRegister(1), Operand(0)); \
+ __ b(ne, &binop); \
__ dmb(ISH); \
} while (0)
-#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr) \
- do { \
- Label binop; \
- __ add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ dmb(ISH); \
- __ bind(&binop); \
- __ load_instr(i.OutputRegister(0), i.TempRegister(0)); \
- __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
- Operand(i.InputRegister(2))); \
- __ store_instr(i.TempRegister(1), i.TempRegister(1), i.TempRegister(0)); \
- __ teq(i.TempRegister(1), Operand(0)); \
- __ b(ne, &binop); \
- __ dmb(ISH); \
- } while (0)
-
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
@@ -498,10 +503,10 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
Simd128Register dst = i.OutputSimd128Register(), \
src0 = i.InputSimd128Register(0), \
src1 = i.InputSimd128Register(1); \
- if (dst.is(src0) && dst.is(src1)) { \
+ if (dst == src0 && dst == src1) { \
__ vqmovn(dt, dst.low(), src0); \
__ vmov(dst.high(), dst.low()); \
- } else if (dst.is(src0)) { \
+ } else if (dst == src0) { \
__ vqmovn(dt, dst.low(), src0); \
__ vqmovn(dt, dst.high(), src1); \
} else { \
@@ -515,9 +520,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
Simd128Register dst = i.OutputSimd128Register(), \
src0 = i.InputSimd128Register(0), \
src1 = i.InputSimd128Register(1); \
- if (dst.is(src0)) { \
+ if (dst == src0) { \
__ op(size, dst.low(), src0.low(), src0.high()); \
- if (dst.is(src1)) { \
+ if (dst == src1) { \
__ vmov(dst.high(), dst.low()); \
} else { \
__ op(size, dst.high(), src1.low(), src1.high()); \
@@ -590,7 +595,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm,
break;
}
frame_access_state->IncreaseSPDelta(pending_pushes->size());
- pending_pushes->resize(0);
+ pending_pushes->clear();
}
void AdjustStackPointerForTailCall(
@@ -659,6 +664,26 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check if the code object is marked for deoptimization. If it is, then it
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. load the address of the current instruction;
+// 2. read from memory the word that contains that bit, which can be found in
+// the first set of flags ({kKindSpecificFlags1Offset});
+// 3. test kMarkedForDeoptimizationBit in those flags; and
+// 4. if it is not zero then it jumps to the builtin.
+void CodeGenerator::BailoutIfDeoptimized() {
+ int pc_offset = __ pc_offset();
+ int offset =
+ Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc_offset + 8);
+ // We can use the register pc - 8 for the address of the current instruction.
+ __ ldr(ip, MemOperand(pc, offset));
+ __ tst(ip, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ Handle<Code> code = isolate()->builtins()->builtin_handle(
+ Builtins::kCompileLazyDeoptimizedCode);
+ __ Jump(code, RelocInfo::CODE_TARGET, ne);
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -743,13 +768,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchSaveCallerRegisters: {
+ fp_mode_ =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// kReturnRegister0 should have been saved before entering the stub.
- __ PushCallerSaved(kSaveFPRegs, kReturnRegister0);
+ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
+ DCHECK_EQ(0, bytes % kPointerSize);
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ DCHECK(!caller_registers_saved_);
+ caller_registers_saved_ = true;
break;
}
case kArchRestoreCallerRegisters: {
+ DCHECK(fp_mode_ ==
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// Don't overwrite the returned value.
- __ PopCallerSaved(kSaveFPRegs, kReturnRegister0);
+ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ DCHECK(caller_registers_saved_);
+ caller_registers_saved_ = false;
break;
}
case kArchPrepareTailCall:
@@ -765,7 +805,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CallCFunction(func, num_parameters);
}
frame_access_state()->SetFrameAccessToDefault();
+ // Ideally, we should decrement SP delta to match the change of stack
+ // pointer in CallCFunction. However, for certain architectures (e.g.
+ // ARM), there may be more strict alignment requirement, causing old SP
+ // to be saved on the stack. In those cases, we can not calculate the SP
+ // delta statically.
frame_access_state()->ClearSPDelta();
+ if (caller_registers_saved_) {
+ // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
+ // Here, we assume the sequence to be:
+ // kArchSaveCallerRegisters;
+ // kArchCallCFunction;
+ // kArchRestoreCallerRegisters;
+ int bytes =
+ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ }
break;
}
case kArchJmp:
@@ -781,7 +836,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchDebugAbort:
- DCHECK(i.InputRegister(0).is(r1));
+ DCHECK(i.InputRegister(0) == r1);
if (!frame_access_state()->has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
@@ -873,12 +928,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
- Register base;
- if (offset.from_stack_pointer()) {
- base = sp;
- } else {
- base = fp;
- }
+ Register base = offset.from_stack_pointer() ? sp : fp;
__ add(i.OutputRegister(0), base, Operand(offset.offset()));
break;
}
@@ -1185,7 +1235,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
DCHECK(instr->InputAt(1)->IsImmediate());
// 0.0 is the only immediate supported by vcmp instructions.
- DCHECK(i.InputFloat32(1) == 0.0f);
+ DCHECK_EQ(0.0f, i.InputFloat32(1));
__ VFPCompareAndSetFlags(i.InputFloatRegister(0), i.InputFloat32(1));
}
DCHECK_EQ(SetCC, i.OutputSBit());
@@ -1236,7 +1286,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
DCHECK(instr->InputAt(1)->IsImmediate());
// 0.0 is the only immediate supported by vcmp instructions.
- DCHECK(i.InputDouble(1) == 0.0);
+ DCHECK_EQ(0.0, i.InputDouble(1));
__ VFPCompareAndSetFlags(i.InputDoubleRegister(0), i.InputDouble(1));
}
DCHECK_EQ(SetCC, i.OutputSBit());
@@ -1516,7 +1566,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SwVfpRegister result = i.OutputFloatRegister();
SwVfpRegister left = i.InputFloatRegister(0);
SwVfpRegister right = i.InputFloatRegister(1);
- if (left.is(right)) {
+ if (left == right) {
__ Move(result, left);
} else {
auto ool = new (zone()) OutOfLineFloat32Max(this, result, left, right);
@@ -1530,7 +1580,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DwVfpRegister result = i.OutputDoubleRegister();
DwVfpRegister left = i.InputDoubleRegister(0);
DwVfpRegister right = i.InputDoubleRegister(1);
- if (left.is(right)) {
+ if (left == right) {
__ Move(result, left);
} else {
auto ool = new (zone()) OutOfLineFloat64Max(this, result, left, right);
@@ -1544,7 +1594,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SwVfpRegister result = i.OutputFloatRegister();
SwVfpRegister left = i.InputFloatRegister(0);
SwVfpRegister right = i.InputFloatRegister(1);
- if (left.is(right)) {
+ if (left == right) {
__ Move(result, left);
} else {
auto ool = new (zone()) OutOfLineFloat32Min(this, result, left, right);
@@ -1558,7 +1608,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DwVfpRegister result = i.OutputDoubleRegister();
DwVfpRegister left = i.InputDoubleRegister(0);
DwVfpRegister right = i.InputDoubleRegister(1);
- if (left.is(right)) {
+ if (left == right) {
__ Move(result, left);
} else {
auto ool = new (zone()) OutOfLineFloat64Min(this, result, left, right);
@@ -1647,9 +1697,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
// Make sure we don't overwrite source data before it's used.
- if (dst.is(src0)) {
+ if (dst == src0) {
__ vpadd(dst.low(), src0.low(), src0.high());
- if (dst.is(src1)) {
+ if (dst == src1) {
__ vmov(dst.high(), dst.low());
} else {
__ vpadd(dst.high(), src1.low(), src1.high());
@@ -2144,14 +2194,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmS128Select: {
Simd128Register dst = i.OutputSimd128Register();
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
__ vbsl(dst, i.InputSimd128Register(1), i.InputSimd128Register(2));
break;
}
case kArmS32x4ZipLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
__ vmov(dst.high(), src1.low()); // dst = [0, 1, 4, 5]
__ vtrn(Neon32, dst.low(), dst.high()); // dst = [0, 4, 1, 5]
@@ -2160,7 +2210,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS32x4ZipRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
// src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from ZipLeft).
__ vmov(dst.low(), src1.high()); // dst = [2, 3, 6, 7]
__ vtrn(Neon32, dst.low(), dst.high()); // dst = [2, 6, 3, 7]
@@ -2169,7 +2219,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS32x4UnzipLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
__ vmov(kScratchQuadReg, src1);
__ vuzp(Neon32, dst, kScratchQuadReg); // dst = [0, 2, 4, 6]
@@ -2178,7 +2228,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS32x4UnzipRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
// src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from UnzipLeft).
__ vmov(kScratchQuadReg, src1);
__ vuzp(Neon32, kScratchQuadReg, dst); // dst = [1, 3, 5, 7]
@@ -2187,7 +2237,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS32x4TransposeLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
__ vmov(kScratchQuadReg, src1);
__ vtrn(Neon32, dst, kScratchQuadReg); // dst = [0, 4, 2, 6]
@@ -2199,10 +2249,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src1 = i.InputSimd128Register(1);
// Check for in-place shuffles.
// If dst == src0 == src1, then the shuffle is unary and we only use src0.
- if (dst.is(src0)) {
+ if (dst == src0) {
__ vmov(kScratchQuadReg, src0);
src0 = kScratchQuadReg;
- } else if (dst.is(src1)) {
+ } else if (dst == src1) {
__ vmov(kScratchQuadReg, src1);
src1 = kScratchQuadReg;
}
@@ -2226,7 +2276,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS32x4TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
// src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from TransposeLeft).
__ vmov(kScratchQuadReg, src1);
__ vtrn(Neon32, kScratchQuadReg, dst); // dst = [1, 5, 3, 7]
@@ -2236,7 +2286,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
// src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
__ vmov(dst.high(), src1.low()); // dst = [0, 1, 2, 3, 8, ... 11]
__ vzip(Neon16, dst.low(), dst.high()); // dst = [0, 8, 1, 9, ... 11]
break;
@@ -2244,7 +2294,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8ZipRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
// src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
__ vmov(dst.low(), src1.high());
__ vzip(Neon16, dst.low(), dst.high()); // dst = [4, 12, 5, 13, ... 15]
@@ -2253,7 +2303,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8UnzipLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
__ vmov(kScratchQuadReg, src1);
__ vuzp(Neon16, dst, kScratchQuadReg); // dst = [0, 2, 4, 6, ... 14]
@@ -2262,7 +2312,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8UnzipRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
// src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
__ vmov(kScratchQuadReg, src1);
__ vuzp(Neon16, kScratchQuadReg, dst); // dst = [1, 3, 5, 7, ... 15]
@@ -2271,7 +2321,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8TransposeLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
__ vmov(kScratchQuadReg, src1);
__ vtrn(Neon16, dst, kScratchQuadReg); // dst = [0, 8, 2, 10, ... 14]
@@ -2280,7 +2330,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
// src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
__ vmov(kScratchQuadReg, src1);
__ vtrn(Neon16, kScratchQuadReg, dst); // dst = [1, 9, 3, 11, ... 15]
@@ -2289,7 +2339,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16ZipLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
__ vmov(dst.high(), src1.low());
__ vzip(Neon8, dst.low(), dst.high()); // dst = [0, 16, 1, 17, ... 23]
@@ -2298,7 +2348,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16ZipRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
// src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
__ vmov(dst.low(), src1.high());
__ vzip(Neon8, dst.low(), dst.high()); // dst = [8, 24, 9, 25, ... 31]
@@ -2307,7 +2357,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16UnzipLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
__ vmov(kScratchQuadReg, src1);
__ vuzp(Neon8, dst, kScratchQuadReg); // dst = [0, 2, 4, 6, ... 30]
@@ -2316,7 +2366,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16UnzipRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
// src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
__ vmov(kScratchQuadReg, src1);
__ vuzp(Neon8, kScratchQuadReg, dst); // dst = [1, 3, 5, 7, ... 31]
@@ -2325,7 +2375,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16TransposeLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
__ vmov(kScratchQuadReg, src1);
__ vtrn(Neon8, dst, kScratchQuadReg); // dst = [0, 16, 2, 18, ... 30]
@@ -2334,7 +2384,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
// src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
__ vmov(kScratchQuadReg, src1);
__ vtrn(Neon8, kScratchQuadReg, dst); // dst = [1, 17, 3, 19, ... 31]
@@ -2352,8 +2402,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DwVfpRegister table_base = src0.low();
// If unary shuffle, table is src0 (2 d-registers), otherwise src0 and
// src1. They must be consecutive.
- int table_size = src0.is(src1) ? 2 : 4;
- DCHECK_IMPLIES(!src0.is(src1), src0.code() + 1 == src1.code());
+ int table_size = src0 == src1 ? 2 : 4;
+ DCHECK_IMPLIES(src0 != src1, src0.code() + 1 == src1.code());
// The shuffle lane mask is a byte mask, materialize in kScratchQuadReg.
int scratch_s_base = kScratchQuadReg.code() * 4;
for (int j = 0; j < 4; j++) {
@@ -2364,7 +2414,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Float32(four_lanes));
}
NeonListOperand table(table_base, table_size);
- if (!dst.is(src0) && !dst.is(src1)) {
+ if (dst != src0 && dst != src1) {
__ vtbl(dst.low(), table, kScratchQuadReg.low());
__ vtbl(dst.high(), table, kScratchQuadReg.high());
} else {
@@ -2538,25 +2588,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex);
break;
case kAtomicCompareExchangeInt8:
- __ uxtb(i.TempRegister(1), i.InputRegister(2));
+ __ uxtb(i.InputRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb);
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint8:
- __ uxtb(i.TempRegister(1), i.InputRegister(2));
+ __ uxtb(i.InputRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb);
break;
case kAtomicCompareExchangeInt16:
- __ uxth(i.TempRegister(1), i.InputRegister(2));
+ __ uxth(i.InputRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh);
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint16:
- __ uxth(i.TempRegister(1), i.InputRegister(2));
+ __ uxth(i.InputRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh);
break;
case kAtomicCompareExchangeWord32:
- __ mov(i.TempRegister(1), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex);
break;
#define ATOMIC_BINOP_CASE(op, inst) \
@@ -2583,6 +2632,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, orr)
ATOMIC_BINOP_CASE(Xor, eor)
#undef ATOMIC_BINOP_CASE
+#undef ASSEMBLE_CHECKED_LOAD_FP
+#undef ASSEMBLE_CHECKED_LOAD_INTEGER
+#undef ASSEMBLE_CHECKED_STORE_FP
+#undef ASSEMBLE_CHECKED_STORE_INTEGER
+#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
+#undef ASSEMBLE_ATOMIC_STORE_INTEGER
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_IEEE754_BINOP
+#undef ASSEMBLE_IEEE754_UNOP
+#undef ASSEMBLE_NEON_NARROWING_OP
+#undef ASSEMBLE_NEON_PAIRWISE_OP
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -2598,6 +2660,10 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
+void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
+ BranchInfo* branch) {
+ AssembleArchBranch(instr, branch);
+}
void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
@@ -2641,6 +2707,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
+ CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ __ Drop(pop_count);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
@@ -2718,7 +2787,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
if (saves_fp != 0) {
// Save callee-saved FP registers.
- STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
+ STATIC_ASSERT(DwVfpRegister::kNumRegisters == 32);
uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
@@ -2815,7 +2884,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (saves_fp != 0) {
// Save callee-saved FP registers.
- STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
+ STATIC_ASSERT(DwVfpRegister::kNumRegisters == 32);
uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
@@ -2842,7 +2911,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
// Restore FP registers.
const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
if (saves_fp != 0) {
- STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
+ STATIC_ASSERT(DwVfpRegister::kNumRegisters == 32);
uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
__ vldm(ia_w, sp, DwVfpRegister::from_code(first),
@@ -3169,6 +3238,7 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
#undef __
+#undef kScratchReg
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 75021eb9d3..391356e960 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -2275,10 +2275,10 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
- outputs[0] = g.UseUniqueRegister(node);
+ outputs[0] = g.DefineAsRegister(node);
InstructionOperand temp[1];
temp[0] = g.TempRegister();
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
@@ -2312,16 +2312,15 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(old_value);
inputs[input_count++] = g.UseUniqueRegister(new_value);
InstructionOperand outputs[1];
- outputs[0] = g.UseUniqueRegister(node);
- InstructionOperand temp[2];
+ outputs[0] = g.DefineAsRegister(node);
+ InstructionOperand temp[1];
temp[0] = g.TempRegister();
- temp[1] = g.TempRegister();
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 2, temp);
+ Emit(code, 1, outputs, input_count, inputs, 1, temp);
}
void InstructionSelector::VisitAtomicBinaryOperation(
@@ -2352,15 +2351,16 @@ void InstructionSelector::VisitAtomicBinaryOperation(
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
- outputs[0] = g.UseUniqueRegister(node);
+ outputs[0] = g.DefineAsRegister(node);
InstructionOperand temps[2];
- temps[0] = g.TempRegister();
- temps[1] = g.TempRegister();
+ size_t temp_count = 0;
+ temps[temp_count++] = g.TempRegister();
+ temps[temp_count++] = g.TempRegister();
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 2, temps);
+ Emit(code, 1, outputs, input_count, inputs, temp_count, temps);
}
#define VISIT_ATOMIC_BINOP(op) \
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 53a4f11131..2836e77c51 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -40,7 +40,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
CPURegister InputFloat32OrZeroRegister(size_t index) {
if (instr_->InputAt(index)->IsImmediate()) {
- DCHECK(bit_cast<int32_t>(InputFloat32(index)) == 0);
+ DCHECK_EQ(0, bit_cast<int32_t>(InputFloat32(index)));
return wzr;
}
DCHECK(instr_->InputAt(index)->IsFPRegister());
@@ -49,7 +49,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
CPURegister InputFloat64OrZeroRegister(size_t index) {
if (instr_->InputAt(index)->IsImmediate()) {
- DCHECK(bit_cast<int64_t>(InputDouble(index)) == 0);
+ DCHECK_EQ(0, bit_cast<int64_t>(InputDouble(index)));
return xzr;
}
DCHECK(instr_->InputAt(index)->IsDoubleRegister());
@@ -328,6 +328,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CheckPageFlagClear(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask,
exit());
+ __ Add(scratch1_, object_, index_);
RememberedSetAction const remembered_set_action =
mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
: OMIT_REMEMBERED_SET;
@@ -339,10 +340,14 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(),
__ StackPointer());
}
- __ Add(scratch1_, object_, index_);
+#ifdef V8_CSA_WRITE_BARRIER
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+#else
__ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
+#endif
if (must_save_lr_) {
__ Pop(lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
@@ -657,6 +662,28 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check if the code object is marked for deoptimization. If it is, then it
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. load the address of the current instruction;
+// 2. read from memory the word that contains that bit, which can be found in
+// the first set of flags ({kKindSpecificFlags1Offset});
+// 3. test kMarkedForDeoptimizationBit in those flags; and
+// 4. if it is not zero then it jumps to the builtin.
+void CodeGenerator::BailoutIfDeoptimized() {
+ Label current;
+ // The Adr instruction gets the address of the current instruction.
+ __ Adr(x2, &current);
+ __ Bind(&current);
+ int pc = __ pc_offset();
+ int offset = Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc);
+ __ Ldr(x2, MemOperand(x2, offset));
+ __ Tst(x2, Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ Handle<Code> code = isolate()->builtins()->builtin_handle(
+ Builtins::kCompileLazyDeoptimizedCode);
+ __ Jump(code, RelocInfo::CODE_TARGET, ne);
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -764,13 +791,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
UNREACHABLE();
break;
case kArchSaveCallerRegisters: {
+ fp_mode_ =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// kReturnRegister0 should have been saved before entering the stub.
- __ PushCallerSaved(kSaveFPRegs, kReturnRegister0);
+ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
+ DCHECK_EQ(0, bytes % kPointerSize);
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ DCHECK(!caller_registers_saved_);
+ caller_registers_saved_ = true;
break;
}
case kArchRestoreCallerRegisters: {
+ DCHECK(fp_mode_ ==
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// Don't overwrite the returned value.
- __ PopCallerSaved(kSaveFPRegs, kReturnRegister0);
+ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ DCHECK(caller_registers_saved_);
+ caller_registers_saved_ = false;
break;
}
case kArchPrepareTailCall:
@@ -786,7 +828,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CallCFunction(func, num_parameters, 0);
}
frame_access_state()->SetFrameAccessToDefault();
+ // Ideally, we should decrement SP delta to match the change of stack
+ // pointer in CallCFunction. However, for certain architectures (e.g.
+ // ARM), there may be more strict alignment requirement, causing old SP
+ // to be saved on the stack. In those cases, we can not calculate the SP
+ // delta statically.
frame_access_state()->ClearSPDelta();
+ if (caller_registers_saved_) {
+ // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
+ // Here, we assume the sequence to be:
+ // kArchSaveCallerRegisters;
+ // kArchCallCFunction;
+ // kArchRestoreCallerRegisters;
+ int bytes =
+ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ }
break;
}
case kArchJmp:
@@ -881,12 +938,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
- Register base;
- if (offset.from_stack_pointer()) {
- base = __ StackPointer();
- } else {
- base = fp;
- }
+ Register base = offset.from_stack_pointer() ? __ StackPointer() : fp;
__ Add(i.OutputRegister(0), base, Operand(offset.offset()));
break;
}
@@ -1352,7 +1404,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
DCHECK(instr->InputAt(1)->IsImmediate());
// 0.0 is the only immediate supported by fcmp instructions.
- DCHECK(i.InputFloat32(1) == 0.0f);
+ DCHECK_EQ(0.0f, i.InputFloat32(1));
__ Fcmp(i.InputFloat32Register(0), i.InputFloat32(1));
}
break;
@@ -1387,7 +1439,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
DCHECK(instr->InputAt(1)->IsImmediate());
// 0.0 is the only immediate supported by fcmp instructions.
- DCHECK(i.InputDouble(1) == 0.0);
+ DCHECK_EQ(0.0, i.InputDouble(1));
__ Fcmp(i.InputDoubleRegister(0), i.InputDouble(1));
}
break;
@@ -2273,6 +2325,10 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ B(flabel); // no fallthru to flabel.
}
+void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
+ BranchInfo* branch) {
+ AssembleArchBranch(instr, branch);
+}
void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
@@ -2311,6 +2367,10 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
+ CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ pop_count += (pop_count & 1); // align
+ __ Drop(pop_count);
__ Ret();
} else {
DCHECK(csp.Is(__ StackPointer()));
@@ -2401,6 +2461,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
int saved_count = saves_fp.Count();
if (saved_count != 0) {
DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedV().list());
+ DCHECK_EQ(saved_count % 2, 0);
frame->AllocateSavedCalleeRegisterSlots(saved_count *
(kDoubleSize / kPointerSize));
}
@@ -2409,6 +2470,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
descriptor->CalleeSavedRegisters());
saved_count = saves.Count();
if (saved_count != 0) {
+ DCHECK_EQ(saved_count % 2, 0);
frame->AllocateSavedCalleeRegisterSlots(saved_count);
}
}
@@ -2419,7 +2481,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ AssertCspAligned();
}
- int fixed_frame_size = descriptor->CalculateFixedFrameSize();
int shrink_slots =
frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
@@ -2440,12 +2501,9 @@ void CodeGenerator::AssembleConstructFrame() {
__ Abort(kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the
- // unoptimized
- // frame is still on the stack. Optimized code uses OSR values directly
- // from
- // the unoptimized frame. Thus, all that needs to be done is to allocate
- // the
- // remaining stack slots.
+ // unoptimized frame is still on the stack. Optimized code uses OSR values
+ // directly from the unoptimized frame. Thus, all that needs to be done is
+ // to allocate the remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
@@ -2466,9 +2524,9 @@ void CodeGenerator::AssembleConstructFrame() {
__ Mov(scratch, Operand(ExternalReference::address_of_real_stack_limit(
__ isolate())));
__ Ldr(scratch, MemOperand(scratch));
- __ Add(scratch, scratch, Operand(shrink_slots * kPointerSize));
+ __ Add(scratch, scratch, shrink_slots * kPointerSize);
__ Cmp(__ StackPointer(), scratch);
- __ B(cs, &done);
+ __ B(hs, &done);
}
if (!frame_access_state()->has_frame()) {
@@ -2482,7 +2540,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ AssertStackConsistency();
// Initialize the jssp because it is required for the runtime call.
__ Mov(jssp, csp);
- __ Move(cp, Smi::kZero);
+ __ Mov(cp, Smi::kZero);
__ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
@@ -2493,46 +2551,53 @@ void CodeGenerator::AssembleConstructFrame() {
}
__ SetStackPointer(csp);
__ AssertStackConsistency();
- __ bind(&done);
+ __ Bind(&done);
}
// Build remainder of frame, including accounting for and filling-in
- // frame-specific header information, e.g. claiming the extra slot that
- // other platforms explicitly push for STUB frames and frames recording
- // their argument count.
- __ Claim(shrink_slots + (fixed_frame_size & 1));
- if (descriptor->PushArgumentCount()) {
- __ Str(kJavaScriptCallArgCountRegister,
- MemOperand(fp, OptimizedBuiltinFrameConstants::kArgCOffset));
- }
- bool is_stub_frame =
- !descriptor->IsJSFunctionCall() && !descriptor->IsCFunctionCall();
- if (is_stub_frame) {
- UseScratchRegisterScope temps(tasm());
- Register temp = temps.AcquireX();
- __ Mov(temp, StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
- __ Str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
+ // frame-specific header information, i.e. claiming the extra slot that
+ // other platforms explicitly push for STUB (code object) frames and frames
+ // recording their argument count.
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallJSFunction:
+ if (descriptor->PushArgumentCount()) {
+ __ Claim(shrink_slots + 1); // Claim extra slot for argc.
+ __ Str(kJavaScriptCallArgCountRegister,
+ MemOperand(fp, OptimizedBuiltinFrameConstants::kArgCOffset));
+ } else {
+ __ Claim(shrink_slots);
+ }
+ break;
+ case CallDescriptor::kCallCodeObject: {
+ UseScratchRegisterScope temps(tasm());
+ __ Claim(shrink_slots + 1); // Claim extra slot for frame type marker.
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch,
+ StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
+ __ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
+ } break;
+ case CallDescriptor::kCallAddress:
+ __ Claim(shrink_slots);
+ break;
+ default:
+ UNREACHABLE();
}
}
// Save FP registers.
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
descriptor->CalleeSavedFPRegisters());
- int saved_count = saves_fp.Count();
- if (saved_count != 0) {
- DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedV().list());
- __ PushCPURegList(saves_fp);
- }
+ DCHECK_IMPLIES(saves_fp.Count() != 0,
+ saves_fp.list() == CPURegList::GetCalleeSavedV().list());
+ __ PushCPURegList(saves_fp);
+
// Save registers.
// TODO(palfia): TF save list is not in sync with
// CPURegList::GetCalleeSaved(): x30 is missing.
// DCHECK(saves.list() == CPURegList::GetCalleeSaved().list());
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
descriptor->CalleeSavedRegisters());
- saved_count = saves.Count();
- if (saved_count != 0) {
- __ PushCPURegList(saves);
- }
+ __ PushCPURegList(saves);
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
@@ -2541,16 +2606,12 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
// Restore registers.
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
descriptor->CalleeSavedRegisters());
- if (saves.Count() != 0) {
- __ PopCPURegList(saves);
- }
+ __ PopCPURegList(saves);
// Restore fp registers.
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
descriptor->CalleeSavedFPRegisters());
- if (saves_fp.Count() != 0) {
- __ PopCPURegList(saves_fp);
- }
+ __ PopCPURegList(saves_fp);
unwinding_info_writer_.MarkBlockWillExit();
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 8b4e81cf5f..47bc685b8b 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -67,7 +67,7 @@ class Arm64OperandGenerator final : public OperandGenerator {
if (node->opcode() == IrOpcode::kInt32Constant) {
return OpParameter<int32_t>(node);
}
- DCHECK(node->opcode() == IrOpcode::kInt64Constant);
+ DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
return OpParameter<int64_t>(node);
}
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index b553adf333..e11847e502 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -272,7 +272,7 @@ void BranchElimination::ControlPathConditions::Merge(
// Then we go through both lists in lock-step until we find
// the common tail.
while (head_ != other_condition) {
- DCHECK(condition_count_ > 0);
+ DCHECK_LT(0, condition_count_);
condition_count_--;
other_condition = other_condition->next;
head_ = head_->next;
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index 87ba02e30f..4ee30bcdf2 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -12,7 +12,9 @@ namespace v8 {
namespace internal {
namespace compiler {
-using namespace interpreter;
+using interpreter::Bytecode;
+using interpreter::Bytecodes;
+using interpreter::OperandType;
BytecodeLoopAssignments::BytecodeLoopAssignments(int parameter_count,
int register_count, Zone* zone)
@@ -74,7 +76,7 @@ BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
namespace {
void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
- const BytecodeArrayAccessor& accessor) {
+ const interpreter::BytecodeArrayAccessor& accessor) {
int num_operands = Bytecodes::NumberOfOperands(bytecode);
const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
@@ -157,6 +159,7 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
in_liveness.MarkRegisterLive(r.index() + j);
}
}
+ break;
}
default:
DCHECK(!Bytecodes::IsRegisterInputOperandType(operand_types[i]));
@@ -167,7 +170,7 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
BytecodeLivenessState* next_bytecode_in_liveness,
- const BytecodeArrayAccessor& accessor,
+ const interpreter::BytecodeArrayAccessor& accessor,
const BytecodeLivenessMap& liveness_map) {
int current_offset = accessor.current_offset();
const Handle<BytecodeArray>& bytecode_array = accessor.bytecode_array();
@@ -219,7 +222,7 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
}
void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments,
- const BytecodeArrayAccessor& accessor) {
+ const interpreter::BytecodeArrayAccessor& accessor) {
int num_operands = Bytecodes::NumberOfOperands(bytecode);
const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
@@ -260,7 +263,7 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
int osr_loop_end_offset =
osr_bailout_id.IsNone() ? -1 : osr_bailout_id.ToInt();
- BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
+ interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
for (iterator.GoToEnd(); iterator.IsValid(); --iterator) {
Bytecode bytecode = iterator.current_bytecode();
int current_offset = iterator.current_offset();
@@ -495,7 +498,7 @@ std::ostream& BytecodeAnalysis::PrintLivenessTo(std::ostream& os) const {
#if DEBUG
bool BytecodeAnalysis::LivenessIsValid() {
- BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
+ interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
BytecodeLivenessState previous_liveness(bytecode_array()->register_count(),
zone());
@@ -585,7 +588,7 @@ bool BytecodeAnalysis::LivenessIsValid() {
int loop_indent = 0;
- BytecodeArrayIterator forward_iterator(bytecode_array());
+ interpreter::BytecodeArrayIterator forward_iterator(bytecode_array());
for (; !forward_iterator.done(); forward_iterator.Advance()) {
int current_offset = forward_iterator.current_offset();
const BitVector& in_liveness =
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 2d68ed8b03..ca3a70ab1f 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -476,8 +476,8 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
Zone* local_zone, Handle<SharedFunctionInfo> shared_info,
Handle<FeedbackVector> feedback_vector, BailoutId osr_offset,
JSGraph* jsgraph, CallFrequency invocation_frequency,
- SourcePositionTable* source_positions, int inlining_id,
- JSTypeHintLowering::Flags flags, bool stack_check)
+ SourcePositionTable* source_positions, Handle<Context> native_context,
+ int inlining_id, JSTypeHintLowering::Flags flags, bool stack_check)
: local_zone_(local_zone),
jsgraph_(jsgraph),
invocation_frequency_(invocation_frequency),
@@ -505,7 +505,8 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
exit_controls_(local_zone),
state_values_cache_(jsgraph),
source_positions_(source_positions),
- start_position_(shared_info->start_position(), inlining_id) {}
+ start_position_(shared_info->start_position(), inlining_id),
+ native_context_(native_context) {}
Node* BytecodeGraphBuilder::GetFunctionClosure() {
if (!function_closure_.is_set()) {
@@ -1254,15 +1255,17 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
const Operator* op = javascript()->LoadNamed(name, feedback);
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedLoadNamed(op, object, feedback.slot());
+ if (lowering.IsExit()) return;
+
Node* node = nullptr;
- if (Node* simplified =
- TryBuildSimplifiedLoadNamed(op, object, feedback.slot())) {
- if (environment() == nullptr) return;
- node = simplified;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
} else {
+ DCHECK(!lowering.Changed());
node = NewNode(op, object);
}
-
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -1275,20 +1278,21 @@ void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
const Operator* op = javascript()->LoadProperty(feedback);
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedLoadKeyed(op, object, key, feedback.slot());
+ if (lowering.IsExit()) return;
+
Node* node = nullptr;
- if (Node* simplified =
- TryBuildSimplifiedLoadKeyed(op, object, key, feedback.slot())) {
- if (environment() == nullptr) return;
- node = simplified;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
} else {
+ DCHECK(!lowering.Changed());
node = NewNode(op, object, key);
}
-
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode,
- StoreMode store_mode) {
+void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) {
PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
Node* object =
@@ -1304,37 +1308,35 @@ void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode,
feedback.vector()->GetKind(feedback.slot()));
op = javascript()->StoreNamedOwn(name, feedback);
} else {
- DCHECK(store_mode == StoreMode::kNormal);
- DCHECK_EQ(feedback.vector()->GetLanguageMode(feedback.slot()),
- language_mode);
+ DCHECK_EQ(StoreMode::kNormal, store_mode);
+ LanguageMode language_mode =
+ feedback.vector()->GetLanguageMode(feedback.slot());
op = javascript()->StoreNamed(language_mode, name, feedback);
}
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedStoreNamed(op, object, value, feedback.slot());
+ if (lowering.IsExit()) return;
+
Node* node = nullptr;
- if (Node* simplified =
- TryBuildSimplifiedStoreNamed(op, object, value, feedback.slot())) {
- if (environment() == nullptr) return;
- node = simplified;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
} else {
+ DCHECK(!lowering.Changed());
node = NewNode(op, object, value);
}
-
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitStaNamedPropertySloppy() {
- BuildNamedStore(LanguageMode::SLOPPY, StoreMode::kNormal);
-}
-
-void BytecodeGraphBuilder::VisitStaNamedPropertyStrict() {
- BuildNamedStore(LanguageMode::STRICT, StoreMode::kNormal);
+void BytecodeGraphBuilder::VisitStaNamedProperty() {
+ BuildNamedStore(StoreMode::kNormal);
}
void BytecodeGraphBuilder::VisitStaNamedOwnProperty() {
- BuildNamedStore(LanguageMode::STRICT, StoreMode::kOwn);
+ BuildNamedStore(StoreMode::kOwn);
}
-void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
+void BytecodeGraphBuilder::VisitStaKeyedProperty() {
PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
Node* object =
@@ -1343,29 +1345,25 @@ void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
- DCHECK_EQ(feedback.vector()->GetLanguageMode(feedback.slot()), language_mode);
+ LanguageMode language_mode =
+ feedback.vector()->GetLanguageMode(feedback.slot());
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedStoreKeyed(op, object, key, value, feedback.slot());
+ if (lowering.IsExit()) return;
+
Node* node = nullptr;
- if (Node* simplified = TryBuildSimplifiedStoreKeyed(op, object, key, value,
- feedback.slot())) {
- if (environment() == nullptr) return;
- node = simplified;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
} else {
+ DCHECK(!lowering.Changed());
node = NewNode(op, object, key, value);
}
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitStaKeyedPropertySloppy() {
- BuildKeyedStore(LanguageMode::SLOPPY);
-}
-
-void BytecodeGraphBuilder::VisitStaKeyedPropertyStrict() {
- BuildKeyedStore(LanguageMode::STRICT);
-}
-
void BytecodeGraphBuilder::VisitLdaModuleVariable() {
int32_t cell_index = bytecode_iterator().GetImmediateOperand(0);
uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(1);
@@ -1482,11 +1480,11 @@ void BytecodeGraphBuilder::VisitCreateRestParameter() {
void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
Handle<String> constant_pattern =
Handle<String>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
- int literal_index = bytecode_iterator().GetIndexOperand(1);
+ int const slot_id = bytecode_iterator().GetIndexOperand(1);
+ VectorSlotPair pair = CreateVectorSlotPair(slot_id);
int literal_flags = bytecode_iterator().GetFlagOperand(2);
- Node* literal = NewNode(javascript()->CreateLiteralRegExp(
- constant_pattern, literal_flags, literal_index),
- GetFunctionClosure());
+ Node* literal = NewNode(
+ javascript()->CreateLiteralRegExp(constant_pattern, pair, literal_flags));
environment()->BindAccumulator(literal, Environment::kAttachFrameState);
}
@@ -1494,7 +1492,8 @@ void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
Handle<ConstantElementsPair> constant_elements =
Handle<ConstantElementsPair>::cast(
bytecode_iterator().GetConstantForIndexOperand(0));
- int literal_index = bytecode_iterator().GetIndexOperand(1);
+ int const slot_id = bytecode_iterator().GetIndexOperand(1);
+ VectorSlotPair pair = CreateVectorSlotPair(slot_id);
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
int literal_flags =
interpreter::CreateArrayLiteralFlags::FlagsBits::decode(bytecode_flags);
@@ -1506,17 +1505,15 @@ void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
// TODO(mstarzinger): Thread through number of elements. The below number is
// only an estimate and does not match {ArrayLiteral::values::length}.
int number_of_elements = constant_elements->constant_values()->length();
- Node* literal = NewNode(
- javascript()->CreateLiteralArray(constant_elements, literal_flags,
- literal_index, number_of_elements),
- GetFunctionClosure());
+ Node* literal = NewNode(javascript()->CreateLiteralArray(
+ constant_elements, pair, literal_flags, number_of_elements));
environment()->BindAccumulator(literal, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitCreateEmptyArrayLiteral() {
- int literal_index = bytecode_iterator().GetIndexOperand(0);
- Node* literal = NewNode(javascript()->CreateEmptyLiteralArray(literal_index),
- GetFunctionClosure());
+ int const slot_id = bytecode_iterator().GetIndexOperand(0);
+ VectorSlotPair pair = CreateVectorSlotPair(slot_id);
+ Node* literal = NewNode(javascript()->CreateEmptyLiteralArray(pair));
environment()->BindAccumulator(literal);
}
@@ -1524,17 +1521,16 @@ void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
Handle<BoilerplateDescription> constant_properties =
Handle<BoilerplateDescription>::cast(
bytecode_iterator().GetConstantForIndexOperand(0));
- int literal_index = bytecode_iterator().GetIndexOperand(1);
+ int const slot_id = bytecode_iterator().GetIndexOperand(1);
+ VectorSlotPair pair = CreateVectorSlotPair(slot_id);
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
int literal_flags =
interpreter::CreateObjectLiteralFlags::FlagsBits::decode(bytecode_flags);
// TODO(mstarzinger): Thread through number of properties. The below number is
// only an estimate and does not match {ObjectLiteral::properties_count}.
int number_of_properties = constant_properties->size();
- Node* literal = NewNode(
- javascript()->CreateLiteralObject(constant_properties, literal_flags,
- literal_index, number_of_properties),
- GetFunctionClosure());
+ Node* literal = NewNode(javascript()->CreateLiteralObject(
+ constant_properties, pair, literal_flags, number_of_properties));
environment()->BindRegister(bytecode_iterator().GetRegisterOperand(3),
literal, Environment::kAttachFrameState);
}
@@ -1545,7 +1541,20 @@ void BytecodeGraphBuilder::VisitCreateEmptyObjectLiteral() {
environment()->BindAccumulator(literal);
}
-Node* const* BytecodeGraphBuilder::GetCallArgumentsFromRegister(
+void BytecodeGraphBuilder::VisitGetTemplateObject() {
+ Handle<TemplateObjectDescription> description =
+ Handle<TemplateObjectDescription>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0));
+ // It's not observable when the template object is created, so we
+ // can just create it eagerly during graph building and bake in
+ // the JSArray constant here.
+ Node* template_object =
+ jsgraph()->HeapConstant(TemplateObjectDescription::GetTemplateObject(
+ description, native_context()));
+ environment()->BindAccumulator(template_object);
+}
+
+Node* const* BytecodeGraphBuilder::GetCallArgumentsFromRegisters(
Node* callee, Node* receiver, interpreter::Register first_arg,
int arg_count) {
// The arity of the Call node -- includes the callee, receiver and function
@@ -1583,8 +1592,8 @@ Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
interpreter::Register first_arg = interpreter::Register(receiver.index() + 1);
int arg_count = static_cast<int>(reg_count) - 1;
- Node* const* call_args =
- GetCallArgumentsFromRegister(callee, receiver_node, first_arg, arg_count);
+ Node* const* call_args = GetCallArgumentsFromRegisters(callee, receiver_node,
+ first_arg, arg_count);
return ProcessCallArguments(call_op, call_args, 2 + arg_count);
}
@@ -1601,48 +1610,59 @@ void BytecodeGraphBuilder::BuildCall(ConvertReceiverMode receiver_mode,
CallFrequency frequency = ComputeCallFrequency(slot_id);
const Operator* op =
javascript()->Call(arg_count, frequency, feedback, receiver_mode);
+ JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedCall(
+ op, args, static_cast<int>(arg_count), feedback.slot());
+ if (lowering.IsExit()) return;
+
Node* node = nullptr;
- if (Node* simplified = TryBuildSimplifiedCall(
- op, args, static_cast<int>(arg_count), feedback.slot())) {
- if (environment() == nullptr) return;
- node = simplified;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
} else {
+ DCHECK(!lowering.Changed());
node = ProcessCallArguments(op, args, static_cast<int>(arg_count));
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::BuildCallVarArgs(ConvertReceiverMode receiver_mode) {
- DCHECK_EQ(interpreter::Bytecodes::GetReceiverMode(
- bytecode_iterator().current_bytecode()),
- receiver_mode);
- Node* callee =
- environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
- size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
- int const slot_id = bytecode_iterator().GetIndexOperand(3);
-
+Node* const* BytecodeGraphBuilder::ProcessCallVarArgs(
+ ConvertReceiverMode receiver_mode, Node* callee,
+ interpreter::Register first_reg, int arg_count) {
+ DCHECK_GE(arg_count, 0);
Node* receiver_node;
interpreter::Register first_arg;
- int arg_count;
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// The receiver is implicit (and undefined), the arguments are in
// consecutive registers.
receiver_node = jsgraph()->UndefinedConstant();
first_arg = first_reg;
- arg_count = static_cast<int>(reg_count);
} else {
// The receiver is the first register, followed by the arguments in the
// consecutive registers.
- DCHECK_GE(reg_count, 1);
receiver_node = environment()->LookupRegister(first_reg);
first_arg = interpreter::Register(first_reg.index() + 1);
- arg_count = static_cast<int>(reg_count) - 1;
}
+ Node* const* call_args = GetCallArgumentsFromRegisters(callee, receiver_node,
+ first_arg, arg_count);
+ return call_args;
+}
+
+void BytecodeGraphBuilder::BuildCallVarArgs(ConvertReceiverMode receiver_mode) {
+ DCHECK_EQ(interpreter::Bytecodes::GetReceiverMode(
+ bytecode_iterator().current_bytecode()),
+ receiver_mode);
+ Node* callee =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
+ size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
+ int const slot_id = bytecode_iterator().GetIndexOperand(3);
+
+ int arg_count = receiver_mode == ConvertReceiverMode::kNullOrUndefined
+ ? static_cast<int>(reg_count)
+ : static_cast<int>(reg_count) - 1;
Node* const* call_args =
- GetCallArgumentsFromRegister(callee, receiver_node, first_arg, arg_count);
+ ProcessCallVarArgs(receiver_mode, callee, first_reg, arg_count);
BuildCall(receiver_mode, call_args, static_cast<size_t>(2 + arg_count),
slot_id);
}
@@ -1736,20 +1756,24 @@ void BytecodeGraphBuilder::VisitCallWithSpread() {
size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
interpreter::Register first_arg = interpreter::Register(receiver.index() + 1);
int arg_count = static_cast<int>(reg_count) - 1;
- Node* const* args =
- GetCallArgumentsFromRegister(callee, receiver_node, first_arg, arg_count);
+ Node* const* args = GetCallArgumentsFromRegisters(callee, receiver_node,
+ first_arg, arg_count);
int const slot_id = bytecode_iterator().GetIndexOperand(3);
VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
CallFrequency frequency = ComputeCallFrequency(slot_id);
const Operator* op = javascript()->CallWithSpread(
static_cast<int>(reg_count + 1), frequency, feedback);
+
+ JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedCall(
+ op, args, static_cast<int>(arg_count), feedback.slot());
+ if (lowering.IsExit()) return;
+
Node* node = nullptr;
- if (Node* simplified = TryBuildSimplifiedCall(
- op, args, static_cast<int>(arg_count), feedback.slot())) {
- if (environment() == nullptr) return;
- node = simplified;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
} else {
+ DCHECK(!lowering.Changed());
node = ProcessCallArguments(op, args, 2 + arg_count);
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -1757,14 +1781,16 @@ void BytecodeGraphBuilder::VisitCallWithSpread() {
void BytecodeGraphBuilder::VisitCallJSRuntime() {
PrepareEagerCheckpoint();
- Node* callee =
- BuildLoadNativeContextField(bytecode_iterator().GetIndexOperand(0));
- interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
+ Node* callee = BuildLoadNativeContextField(
+ bytecode_iterator().GetNativeContextIndexOperand(0));
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
+ int arg_count = static_cast<int>(reg_count);
- // Create node to perform the JS runtime call.
- const Operator* call = javascript()->Call(reg_count + 1);
- Node* value = ProcessCallArguments(call, callee, receiver, reg_count);
+ const Operator* call = javascript()->Call(2 + arg_count);
+ Node* const* call_args = ProcessCallVarArgs(
+ ConvertReceiverMode::kNullOrUndefined, callee, first_reg, arg_count);
+ Node* value = ProcessCallArguments(call, call_args, 2 + arg_count);
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
@@ -1850,12 +1876,15 @@ void BytecodeGraphBuilder::VisitConstruct() {
int arg_count = static_cast<int>(reg_count);
Node* const* args = GetConstructArgumentsFromRegister(callee, new_target,
first_reg, arg_count);
+ JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedConstruct(
+ op, args, static_cast<int>(arg_count), feedback.slot());
+ if (lowering.IsExit()) return;
+
Node* node = nullptr;
- if (Node* simplified = TryBuildSimplifiedConstruct(
- op, args, static_cast<int>(arg_count), feedback.slot())) {
- if (environment() == nullptr) return;
- node = simplified;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
} else {
+ DCHECK(!lowering.Changed());
node = ProcessConstructArguments(op, args, 2 + arg_count);
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -1878,12 +1907,15 @@ void BytecodeGraphBuilder::VisitConstructWithSpread() {
int arg_count = static_cast<int>(reg_count);
Node* const* args = GetConstructArgumentsFromRegister(callee, new_target,
first_reg, arg_count);
+ JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedConstruct(
+ op, args, static_cast<int>(arg_count), feedback.slot());
+ if (lowering.IsExit()) return;
+
Node* node = nullptr;
- if (Node* simplified = TryBuildSimplifiedConstruct(
- op, args, static_cast<int>(arg_count), feedback.slot())) {
- if (environment() == nullptr) return;
- node = simplified;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
} else {
+ DCHECK(!lowering.Changed());
node = ProcessConstructArguments(op, args, 2 + arg_count);
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -1912,6 +1944,16 @@ void BytecodeGraphBuilder::VisitThrow() {
MergeControlToLeaveFunction(control);
}
+void BytecodeGraphBuilder::VisitAbort() {
+ BuildLoopExitsForFunctionExit(bytecode_analysis()->GetOutLivenessFor(
+ bytecode_iterator().current_offset()));
+ BailoutReason reason =
+ static_cast<BailoutReason>(bytecode_iterator().GetIndexOperand(0));
+ NewNode(simplified()->RuntimeAbort(reason));
+ Node* control = NewNode(common()->Throw());
+ MergeControlToLeaveFunction(control);
+}
+
void BytecodeGraphBuilder::VisitReThrow() {
BuildLoopExitsForFunctionExit(bytecode_analysis()->GetOutLivenessFor(
bytecode_iterator().current_offset()));
@@ -1932,7 +1974,7 @@ void BytecodeGraphBuilder::BuildHoleCheckAndThrow(
Node* node;
const Operator* op = javascript()->CallRuntime(runtime_id);
if (runtime_id == Runtime::kThrowReferenceError) {
- DCHECK(name != nullptr);
+ DCHECK_NOT_NULL(name);
node = NewNode(op, name);
} else {
DCHECK(runtime_id == Runtime::kThrowSuperAlreadyCalledError ||
@@ -1979,12 +2021,17 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* right = environment()->LookupAccumulator();
- Node* node = nullptr;
FeedbackSlot slot = feedback_vector()->ToSlot(
bytecode_iterator().GetIndexOperand(kBinaryOperationHintIndex));
- if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
- node = simplified;
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedBinaryOp(op, left, right, slot);
+ if (lowering.IsExit()) return;
+
+ Node* node = nullptr;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
} else {
+ DCHECK(!lowering.Changed());
node = NewNode(op, left, right);
}
@@ -2012,6 +2059,23 @@ CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() {
return nexus.GetCompareOperationFeedback();
}
+// Helper function to create for-in mode from the recorded type feedback.
+ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
+ FeedbackSlot slot = feedback_vector()->ToSlot(
+ bytecode_iterator().GetIndexOperand(operand_index));
+ ForInICNexus nexus(feedback_vector(), slot);
+ switch (nexus.GetForInFeedback()) {
+ case ForInHint::kNone:
+ case ForInHint::kEnumCacheKeysAndIndices:
+ return ForInMode::kUseEnumCacheKeysAndIndices;
+ case ForInHint::kEnumCacheKeys:
+ return ForInMode::kUseEnumCacheKeys;
+ case ForInHint::kAny:
+ return ForInMode::kGeneric;
+ }
+ UNREACHABLE();
+}
+
CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
if (invocation_frequency_.IsUnknown()) return CallFrequency();
CallICNexus nexus(feedback_vector(), feedback_vector()->ToSlot(slot_id));
@@ -2019,6 +2083,58 @@ CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
invocation_frequency_.value());
}
+void BytecodeGraphBuilder::VisitNegate() {
+ PrepareEagerCheckpoint();
+
+ // TODO(adamk): Create a JSNegate operator, as this desugaring is
+ // invalid for BigInts.
+ const Operator* op = javascript()->Multiply();
+ Node* operand = environment()->LookupAccumulator();
+ Node* multiplier = jsgraph()->SmiConstant(-1);
+
+ FeedbackSlot slot = feedback_vector()->ToSlot(
+ bytecode_iterator().GetIndexOperand(kUnaryOperationHintIndex));
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedBinaryOp(op, operand, multiplier, slot);
+ if (lowering.IsExit()) return;
+
+ Node* node = nullptr;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
+ } else {
+ DCHECK(!lowering.Changed());
+ node = NewNode(op, operand, multiplier);
+ }
+
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
+}
+
+void BytecodeGraphBuilder::VisitBitwiseNot() {
+ PrepareEagerCheckpoint();
+
+ // TODO(adamk): Create a JSBitwiseNot operator, as this desugaring is
+ // invalid for BigInts.
+ const Operator* op = javascript()->BitwiseXor();
+ Node* operand = environment()->LookupAccumulator();
+ Node* xor_value = jsgraph()->SmiConstant(-1);
+
+ FeedbackSlot slot = feedback_vector()->ToSlot(
+ bytecode_iterator().GetIndexOperand(kUnaryOperationHintIndex));
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedBinaryOp(op, operand, xor_value, slot);
+ if (lowering.IsExit()) return;
+
+ Node* node = nullptr;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
+ } else {
+ DCHECK(!lowering.Changed());
+ node = NewNode(op, operand, xor_value);
+ }
+
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitAdd() {
BuildBinaryOp(
javascript()->Add(GetBinaryOperationHint(kBinaryOperationHintIndex)));
@@ -2067,15 +2183,19 @@ void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* op) {
Node* left = environment()->LookupAccumulator();
Node* right = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
- Node* node = nullptr;
FeedbackSlot slot = feedback_vector()->ToSlot(
bytecode_iterator().GetIndexOperand(kBinaryOperationSmiHintIndex));
- if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
- node = simplified;
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedBinaryOp(op, left, right, slot);
+ if (lowering.IsExit()) return;
+
+ Node* node = nullptr;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
} else {
+ DCHECK(!lowering.Changed());
node = NewNode(op, left, right);
}
-
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -2132,15 +2252,19 @@ void BytecodeGraphBuilder::VisitInc() {
Node* right = jsgraph()->Constant(-1);
const Operator* op = javascript()->Subtract();
- Node* node = nullptr;
FeedbackSlot slot = feedback_vector()->ToSlot(
bytecode_iterator().GetIndexOperand(kCountOperationHintIndex));
- if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
- node = simplified;
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedBinaryOp(op, left, right, slot);
+ if (lowering.IsExit()) return;
+
+ Node* node = nullptr;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
} else {
+ DCHECK(!lowering.Changed());
node = NewNode(op, left, right);
}
-
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -2150,15 +2274,19 @@ void BytecodeGraphBuilder::VisitDec() {
Node* right = jsgraph()->OneConstant();
const Operator* op = javascript()->Subtract();
- Node* node = nullptr;
FeedbackSlot slot = feedback_vector()->ToSlot(
bytecode_iterator().GetIndexOperand(kCountOperationHintIndex));
- if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
- node = simplified;
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedBinaryOp(op, left, right, slot);
+ if (lowering.IsExit()) return;
+
+ Node* node = nullptr;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
} else {
+ DCHECK(!lowering.Changed());
node = NewNode(op, left, right);
}
-
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -2214,10 +2342,15 @@ void BytecodeGraphBuilder::BuildCompareOp(const Operator* op) {
int slot_index = bytecode_iterator().GetIndexOperand(1);
FeedbackSlot slot = feedback_vector()->ToSlot(slot_index);
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedBinaryOp(op, left, right, slot);
+ if (lowering.IsExit()) return;
+
Node* node = nullptr;
- if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
- node = simplified;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
} else {
+ DCHECK(!lowering.Changed());
node = NewNode(op, left, right);
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -2344,7 +2477,6 @@ void BytecodeGraphBuilder::VisitTestTypeOf() {
break;
case interpreter::TestTypeOfFlags::LiteralFlag::kOther:
UNREACHABLE(); // Should never be emitted.
- result = nullptr;
break;
}
environment()->BindAccumulator(result);
@@ -2368,17 +2500,20 @@ void BytecodeGraphBuilder::VisitToNumber() {
PrepareEagerCheckpoint();
Node* object = environment()->LookupAccumulator();
- Node* node = nullptr;
FeedbackSlot slot =
- feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(1));
- if (Node* simplified = TryBuildSimplifiedToNumber(object, slot)) {
- node = simplified;
+ feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(0));
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedToNumber(object, slot);
+
+ Node* node = nullptr;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
} else {
+ DCHECK(!lowering.Changed());
node = NewNode(javascript()->ToNumber(), object);
}
- environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), node,
- Environment::kAttachFrameState);
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitJump() { BuildJump(); }
@@ -2504,8 +2639,6 @@ DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
#undef DEBUG_BREAK
void BytecodeGraphBuilder::VisitIncBlockCounter() {
- DCHECK(FLAG_block_coverage);
-
Node* closure = GetFunctionClosure();
Node* coverage_array_slot =
jsgraph()->Constant(bytecode_iterator().GetIndexOperand(0));
@@ -2515,14 +2648,26 @@ void BytecodeGraphBuilder::VisitIncBlockCounter() {
NewNode(op, closure, coverage_array_slot);
}
-void BytecodeGraphBuilder::VisitForInPrepare() {
- PrepareEagerCheckpoint();
+void BytecodeGraphBuilder::VisitForInEnumerate() {
Node* receiver =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Node* prepare = NewNode(javascript()->ForInPrepare(), receiver);
+ Node* enumerator = NewNode(javascript()->ForInEnumerate(), receiver);
+ environment()->BindAccumulator(enumerator, Environment::kAttachFrameState);
+}
+
+void BytecodeGraphBuilder::VisitForInPrepare() {
+ PrepareEagerCheckpoint();
+ Node* enumerator = environment()->LookupAccumulator();
+
+ FeedbackSlot slot =
+ feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(1));
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedForInPrepare(enumerator, slot);
+ if (lowering.IsExit()) return;
+ DCHECK(!lowering.Changed());
+ Node* node = NewNode(javascript()->ForInPrepare(GetForInMode(1)), enumerator);
environment()->BindRegistersToProjections(
- bytecode_iterator().GetRegisterOperand(1), prepare,
- Environment::kAttachFrameState);
+ bytecode_iterator().GetRegisterOperand(0), node);
}
void BytecodeGraphBuilder::VisitForInContinue() {
@@ -2531,10 +2676,10 @@ void BytecodeGraphBuilder::VisitForInContinue() {
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* cache_length =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
- Node* exit_cond =
- NewNode(javascript()->LessThan(CompareOperationHint::kSignedSmall), index,
- cache_length);
- environment()->BindAccumulator(exit_cond, Environment::kAttachFrameState);
+ Node* exit_cond = NewNode(simplified()->SpeculativeNumberLessThan(
+ NumberOperationHint::kSignedSmall),
+ index, cache_length);
+ environment()->BindAccumulator(exit_cond);
}
void BytecodeGraphBuilder::VisitForInNext() {
@@ -2554,18 +2699,15 @@ void BytecodeGraphBuilder::VisitForInNext() {
index = graph()->NewNode(common()->TypeGuard(Type::UnsignedSmall()), index,
environment()->GetControlDependency());
- Node* node = nullptr;
FeedbackSlot slot =
feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(3));
- if (Node* simplified = TryBuildSimplifiedForInNext(receiver, cache_array,
- cache_type, index, slot)) {
- if (environment() == nullptr) return;
- node = simplified;
- } else {
- node = NewNode(javascript()->ForInNext(), receiver, cache_array, cache_type,
- index);
- }
+ JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedForInNext(
+ receiver, cache_array, cache_type, index, slot);
+ if (lowering.IsExit()) return;
+ DCHECK(!lowering.Changed());
+ Node* node = NewNode(javascript()->ForInNext(GetForInMode(3)), receiver,
+ cache_array, cache_type, index);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -2828,146 +2970,147 @@ void BytecodeGraphBuilder::BuildJumpIfJSReceiver() {
BuildJumpIf(condition);
}
-Node* BytecodeGraphBuilder::TryBuildSimplifiedBinaryOp(const Operator* op,
- Node* left, Node* right,
- FeedbackSlot slot) {
+JSTypeHintLowering::LoweringResult
+BytecodeGraphBuilder::TryBuildSimplifiedBinaryOp(const Operator* op, Node* left,
+ Node* right,
+ FeedbackSlot slot) {
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
- Reduction early_reduction = type_hint_lowering().ReduceBinaryOperation(
- op, left, right, effect, control, slot);
- if (early_reduction.Changed()) {
- ApplyEarlyReduction(early_reduction);
- return early_reduction.replacement();
- }
- return nullptr;
+ JSTypeHintLowering::LoweringResult result =
+ type_hint_lowering().ReduceBinaryOperation(op, left, right, effect,
+ control, slot);
+ ApplyEarlyReduction(result);
+ return result;
}
-Node* BytecodeGraphBuilder::TryBuildSimplifiedForInNext(Node* receiver,
- Node* cache_array,
- Node* cache_type,
- Node* index,
- FeedbackSlot slot) {
+JSTypeHintLowering::LoweringResult
+BytecodeGraphBuilder::TryBuildSimplifiedForInNext(Node* receiver,
+ Node* cache_array,
+ Node* cache_type, Node* index,
+ FeedbackSlot slot) {
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
- Reduction early_reduction = type_hint_lowering().ReduceForInNextOperation(
- receiver, cache_array, cache_type, index, effect, control, slot);
- if (early_reduction.Changed()) {
- ApplyEarlyReduction(early_reduction);
- return early_reduction.replacement();
- }
- return nullptr;
+ JSTypeHintLowering::LoweringResult result =
+ type_hint_lowering().ReduceForInNextOperation(
+ receiver, cache_array, cache_type, index, effect, control, slot);
+ ApplyEarlyReduction(result);
+ return result;
}
-Node* BytecodeGraphBuilder::TryBuildSimplifiedToNumber(Node* value,
- FeedbackSlot slot) {
+JSTypeHintLowering::LoweringResult
+BytecodeGraphBuilder::TryBuildSimplifiedForInPrepare(Node* enumerator,
+ FeedbackSlot slot) {
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
- Reduction early_reduction = type_hint_lowering().ReduceToNumberOperation(
- value, effect, control, slot);
- if (early_reduction.Changed()) {
- ApplyEarlyReduction(early_reduction);
- return early_reduction.replacement();
- }
- return nullptr;
+ JSTypeHintLowering::LoweringResult result =
+ type_hint_lowering().ReduceForInPrepareOperation(enumerator, effect,
+ control, slot);
+ ApplyEarlyReduction(result);
+ return result;
}
-Node* BytecodeGraphBuilder::TryBuildSimplifiedCall(const Operator* op,
- Node* const* args,
- int arg_count,
- FeedbackSlot slot) {
+JSTypeHintLowering::LoweringResult
+BytecodeGraphBuilder::TryBuildSimplifiedToNumber(Node* value,
+ FeedbackSlot slot) {
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
- Reduction early_reduction = type_hint_lowering().ReduceCallOperation(
- op, args, arg_count, effect, control, slot);
- if (early_reduction.Changed()) {
- ApplyEarlyReduction(early_reduction);
- return early_reduction.replacement();
- }
- return nullptr;
+ JSTypeHintLowering::LoweringResult result =
+ type_hint_lowering().ReduceToNumberOperation(value, effect, control,
+ slot);
+ ApplyEarlyReduction(result);
+ return result;
}
-Node* BytecodeGraphBuilder::TryBuildSimplifiedConstruct(const Operator* op,
- Node* const* args,
- int arg_count,
- FeedbackSlot slot) {
+JSTypeHintLowering::LoweringResult BytecodeGraphBuilder::TryBuildSimplifiedCall(
+ const Operator* op, Node* const* args, int arg_count, FeedbackSlot slot) {
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
- Reduction early_reduction = type_hint_lowering().ReduceConstructOperation(
- op, args, arg_count, effect, control, slot);
- if (early_reduction.Changed()) {
- ApplyEarlyReduction(early_reduction);
- return early_reduction.replacement();
- }
- return nullptr;
+ JSTypeHintLowering::LoweringResult result =
+ type_hint_lowering().ReduceCallOperation(op, args, arg_count, effect,
+ control, slot);
+ ApplyEarlyReduction(result);
+ return result;
}
-Node* BytecodeGraphBuilder::TryBuildSimplifiedLoadNamed(const Operator* op,
- Node* receiver,
- FeedbackSlot slot) {
+JSTypeHintLowering::LoweringResult
+BytecodeGraphBuilder::TryBuildSimplifiedConstruct(const Operator* op,
+ Node* const* args,
+ int arg_count,
+ FeedbackSlot slot) {
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
- Reduction early_reduction = type_hint_lowering().ReduceLoadNamedOperation(
- op, receiver, effect, control, slot);
- if (early_reduction.Changed()) {
- ApplyEarlyReduction(early_reduction);
- return early_reduction.replacement();
- }
- return nullptr;
+ JSTypeHintLowering::LoweringResult result =
+ type_hint_lowering().ReduceConstructOperation(op, args, arg_count, effect,
+ control, slot);
+ ApplyEarlyReduction(result);
+ return result;
}
-Node* BytecodeGraphBuilder::TryBuildSimplifiedLoadKeyed(const Operator* op,
- Node* receiver,
- Node* key,
- FeedbackSlot slot) {
+JSTypeHintLowering::LoweringResult
+BytecodeGraphBuilder::TryBuildSimplifiedLoadNamed(const Operator* op,
+ Node* receiver,
+ FeedbackSlot slot) {
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
- Reduction early_reduction = type_hint_lowering().ReduceLoadKeyedOperation(
- op, receiver, key, effect, control, slot);
- if (early_reduction.Changed()) {
- ApplyEarlyReduction(early_reduction);
- return early_reduction.replacement();
- }
- return nullptr;
+ JSTypeHintLowering::LoweringResult early_reduction =
+ type_hint_lowering().ReduceLoadNamedOperation(op, receiver, effect,
+ control, slot);
+ ApplyEarlyReduction(early_reduction);
+ return early_reduction;
}
-Node* BytecodeGraphBuilder::TryBuildSimplifiedStoreNamed(const Operator* op,
- Node* receiver,
- Node* value,
- FeedbackSlot slot) {
+JSTypeHintLowering::LoweringResult
+BytecodeGraphBuilder::TryBuildSimplifiedLoadKeyed(const Operator* op,
+ Node* receiver, Node* key,
+ FeedbackSlot slot) {
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
- Reduction early_reduction = type_hint_lowering().ReduceStoreNamedOperation(
- op, receiver, value, effect, control, slot);
- if (early_reduction.Changed()) {
- ApplyEarlyReduction(early_reduction);
- return early_reduction.replacement();
- }
- return nullptr;
+ JSTypeHintLowering::LoweringResult result =
+ type_hint_lowering().ReduceLoadKeyedOperation(op, receiver, key, effect,
+ control, slot);
+ ApplyEarlyReduction(result);
+ return result;
}
-Node* BytecodeGraphBuilder::TryBuildSimplifiedStoreKeyed(const Operator* op,
- Node* receiver,
- Node* key, Node* value,
- FeedbackSlot slot) {
+JSTypeHintLowering::LoweringResult
+BytecodeGraphBuilder::TryBuildSimplifiedStoreNamed(const Operator* op,
+ Node* receiver, Node* value,
+ FeedbackSlot slot) {
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
- Reduction early_reduction = type_hint_lowering().ReduceStoreKeyedOperation(
- op, receiver, key, value, effect, control, slot);
- if (early_reduction.Changed()) {
- ApplyEarlyReduction(early_reduction);
- return early_reduction.replacement();
- }
- return nullptr;
+ JSTypeHintLowering::LoweringResult result =
+ type_hint_lowering().ReduceStoreNamedOperation(op, receiver, value,
+ effect, control, slot);
+ ApplyEarlyReduction(result);
+ return result;
}
-void BytecodeGraphBuilder::ApplyEarlyReduction(Reduction reduction) {
- Node* node = reduction.replacement();
- DCHECK(node->op()->HasProperty(Operator::kNoWrite));
- if (node->op()->EffectOutputCount() > 0) {
- environment()->UpdateEffectDependency(node);
- }
- if (IrOpcode::IsGraphTerminator(node->opcode())) {
- MergeControlToLeaveFunction(node);
+JSTypeHintLowering::LoweringResult
+BytecodeGraphBuilder::TryBuildSimplifiedStoreKeyed(const Operator* op,
+ Node* receiver, Node* key,
+ Node* value,
+ FeedbackSlot slot) {
+ Node* effect = environment()->GetEffectDependency();
+ Node* control = environment()->GetControlDependency();
+ JSTypeHintLowering::LoweringResult result =
+ type_hint_lowering().ReduceStoreKeyedOperation(op, receiver, key, value,
+ effect, control, slot);
+ ApplyEarlyReduction(result);
+ return result;
+}
+
+void BytecodeGraphBuilder::ApplyEarlyReduction(
+ JSTypeHintLowering::LoweringResult reduction) {
+ if (reduction.IsExit()) {
+ MergeControlToLeaveFunction(reduction.control());
+ } else if (reduction.IsSideEffectFree()) {
+ environment()->UpdateEffectDependency(reduction.effect());
+ environment()->UpdateControlDependency(reduction.control());
+ } else {
+ DCHECK(!reduction.Changed());
+ // At the moment, we assume side-effect free reduction. To support
+ // side-effects, we would have to invalidate the eager checkpoint,
+ // so that deoptimization does not repeat the side effect.
}
}
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 0ec8a1f473..94fbd5099f 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -29,7 +29,7 @@ class BytecodeGraphBuilder {
Zone* local_zone, Handle<SharedFunctionInfo> shared,
Handle<FeedbackVector> feedback_vector, BailoutId osr_offset,
JSGraph* jsgraph, CallFrequency invocation_frequency,
- SourcePositionTable* source_positions,
+ SourcePositionTable* source_positions, Handle<Context> native_context,
int inlining_id = SourcePosition::kNotInlined,
JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags,
bool stack_check = true);
@@ -120,9 +120,12 @@ class BytecodeGraphBuilder {
Node** EnsureInputBufferSize(int size);
- Node* const* GetCallArgumentsFromRegister(Node* callee, Node* receiver,
- interpreter::Register first_arg,
- int arg_count);
+ Node* const* GetCallArgumentsFromRegisters(Node* callee, Node* receiver,
+ interpreter::Register first_arg,
+ int arg_count);
+ Node* const* ProcessCallVarArgs(ConvertReceiverMode receiver_mode,
+ Node* callee, interpreter::Register first_reg,
+ int arg_count);
Node* ProcessCallArguments(const Operator* call_op, Node* const* args,
int arg_count);
Node* ProcessCallArguments(const Operator* call_op, Node* callee,
@@ -157,8 +160,7 @@ class BytecodeGraphBuilder {
// Store value to the receiver without checking the prototype chain.
kOwn,
};
- void BuildNamedStore(LanguageMode language_mode, StoreMode store_mode);
- void BuildKeyedStore(LanguageMode language_mode);
+ void BuildNamedStore(StoreMode store_mode);
void BuildLdaLookupSlot(TypeofMode typeof_mode);
void BuildLdaLookupContextSlot(TypeofMode typeof_mode);
void BuildLdaLookupGlobalSlot(TypeofMode typeof_mode);
@@ -178,31 +180,36 @@ class BytecodeGraphBuilder {
void BuildHoleCheckAndThrow(Node* condition, Runtime::FunctionId runtime_id,
Node* name = nullptr);
- // Optional early lowering to the simplified operator level. Returns the node
- // representing the lowered operation or {nullptr} if no lowering available.
- // Note that the result has already been wired into the environment just like
+ // Optional early lowering to the simplified operator level. Note that
+ // the result has already been wired into the environment just like
// any other invocation of {NewNode} would do.
- Node* TryBuildSimplifiedBinaryOp(const Operator* op, Node* left, Node* right,
- FeedbackSlot slot);
- Node* TryBuildSimplifiedForInNext(Node* receiver, Node* cache_array,
- Node* cache_type, Node* index,
- FeedbackSlot slot);
- Node* TryBuildSimplifiedToNumber(Node* input, FeedbackSlot slot);
- Node* TryBuildSimplifiedCall(const Operator* op, Node* const* args,
- int arg_count, FeedbackSlot slot);
- Node* TryBuildSimplifiedConstruct(const Operator* op, Node* const* args,
- int arg_count, FeedbackSlot slot);
- Node* TryBuildSimplifiedLoadNamed(const Operator* op, Node* receiver,
- FeedbackSlot slot);
- Node* TryBuildSimplifiedLoadKeyed(const Operator* op, Node* receiver,
- Node* key, FeedbackSlot slot);
- Node* TryBuildSimplifiedStoreNamed(const Operator* op, Node* receiver,
- Node* value, FeedbackSlot slot);
- Node* TryBuildSimplifiedStoreKeyed(const Operator* op, Node* receiver,
- Node* key, Node* value, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedBinaryOp(
+ const Operator* op, Node* left, Node* right, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedForInNext(
+ Node* receiver, Node* cache_array, Node* cache_type, Node* index,
+ FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedForInPrepare(
+ Node* receiver, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedToNumber(
+ Node* input, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedCall(const Operator* op,
+ Node* const* args,
+ int arg_count,
+ FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedConstruct(
+ const Operator* op, Node* const* args, int arg_count, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadNamed(
+ const Operator* op, Node* receiver, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadKeyed(
+ const Operator* op, Node* receiver, Node* key, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedStoreNamed(
+ const Operator* op, Node* receiver, Node* value, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedStoreKeyed(
+ const Operator* op, Node* receiver, Node* key, Node* value,
+ FeedbackSlot slot);
// Applies the given early reduction onto the current environment.
- void ApplyEarlyReduction(Reduction reduction);
+ void ApplyEarlyReduction(JSTypeHintLowering::LoweringResult reduction);
// Check the context chain for extensions, for lookup fast paths.
Environment* CheckContextExtensions(uint32_t depth);
@@ -215,6 +222,9 @@ class BytecodeGraphBuilder {
// type feedback.
CompareOperationHint GetCompareOperationHint();
+ // Helper function to create for-in mode from the recorded type feedback.
+ ForInMode GetForInMode(int operand_index);
+
// Helper function to compute call frequency from the recorded type
// feedback.
CallFrequency ComputeCallFrequency(int slot_id) const;
@@ -337,6 +347,8 @@ class BytecodeGraphBuilder {
needs_eager_checkpoint_ = value;
}
+ Handle<Context> native_context() const { return native_context_; }
+
#define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
#undef DECLARE_VISIT_BYTECODE
@@ -387,9 +399,13 @@ class BytecodeGraphBuilder {
SourcePosition const start_position_;
+ // The native context for which we optimize.
+ Handle<Context> const native_context_;
+
static int const kBinaryOperationHintIndex = 1;
static int const kCountOperationHintIndex = 0;
static int const kBinaryOperationSmiHintIndex = 1;
+ static int const kUnaryOperationHintIndex = 0;
DISALLOW_COPY_AND_ASSIGN(BytecodeGraphBuilder);
};
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index c70b4c1924..9523ef4e08 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -171,7 +171,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
#endif
// Add return location(s).
- CHECK(locations.return_count_ <= 2);
+ CHECK_GE(2, locations.return_count_);
if (locations.return_count_ > 0) {
locations.AddReturn(LinkageLocation::ForRegister(kReturnRegister0.code(),
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 92866e15e1..a0ed0af93d 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -43,35 +43,35 @@ namespace compiler {
CodeAssemblerState::CodeAssemblerState(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
- Code::Flags flags, const char* name, size_t result_size)
+ Code::Kind kind, const char* name, size_t result_size)
: CodeAssemblerState(
isolate, zone,
Linkage::GetStubCallDescriptor(
isolate, zone, descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size),
- flags, name) {}
+ kind, name) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
- int parameter_count, Code::Flags flags,
+ int parameter_count, Code::Kind kind,
const char* name)
- : CodeAssemblerState(isolate, zone,
- Linkage::GetJSCallDescriptor(
- zone, false, parameter_count,
- Code::ExtractKindFromFlags(flags) == Code::BUILTIN
- ? CallDescriptor::kPushArgumentCount
- : CallDescriptor::kNoFlags),
- flags, name) {}
+ : CodeAssemblerState(
+ isolate, zone,
+ Linkage::GetJSCallDescriptor(zone, false, parameter_count,
+ kind == Code::BUILTIN
+ ? CallDescriptor::kPushArgumentCount
+ : CallDescriptor::kNoFlags),
+ kind, name) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor,
- Code::Flags flags, const char* name)
+ Code::Kind kind, const char* name)
: raw_assembler_(new RawMachineAssembler(
isolate, new (zone) Graph(zone), call_descriptor,
MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements())),
- flags_(flags),
+ kind_(kind),
name_(name),
code_generated_(false),
variables_(zone) {}
@@ -161,7 +161,7 @@ Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state) {
Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
- state->flags_, state->name_, should_optimize_jumps ? &jump_opt : nullptr);
+ state->kind_, state->name_, should_optimize_jumps ? &jump_opt : nullptr);
if (jump_opt.is_optimizable()) {
jump_opt.set_optimizing();
@@ -169,7 +169,7 @@ Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state) {
// Regenerate machine code
code = Pipeline::GenerateCodeForCodeStub(
rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
- state->flags_, state->name_, &jump_opt);
+ state->kind_, state->name_, &jump_opt);
}
state->code_generated_ = true;
@@ -438,6 +438,27 @@ TNode<WordT> CodeAssembler::IntPtrSub(SloppyTNode<WordT> left,
return UncheckedCast<IntPtrT>(raw_assembler()->IntPtrSub(left, right));
}
+TNode<WordT> CodeAssembler::IntPtrMul(SloppyTNode<WordT> left,
+ SloppyTNode<WordT> right) {
+ intptr_t left_constant;
+ bool is_left_constant = ToIntPtrConstant(left, left_constant);
+ intptr_t right_constant;
+ bool is_right_constant = ToIntPtrConstant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return IntPtrConstant(left_constant * right_constant);
+ }
+ if (left_constant == 1) {
+ return right;
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 1) {
+ return left;
+ }
+ }
+ return UncheckedCast<IntPtrT>(raw_assembler()->IntPtrMul(left, right));
+}
+
TNode<WordT> CodeAssembler::WordShl(SloppyTNode<WordT> value, int shift) {
return (shift != 0) ? WordShl(value, IntPtrConstant(shift)) : value;
}
@@ -450,6 +471,318 @@ TNode<Word32T> CodeAssembler::Word32Shr(SloppyTNode<Word32T> value, int shift) {
return (shift != 0) ? Word32Shr(value, Int32Constant(shift)) : value;
}
+TNode<WordT> CodeAssembler::WordOr(SloppyTNode<WordT> left,
+ SloppyTNode<WordT> right) {
+ intptr_t left_constant;
+ bool is_left_constant = ToIntPtrConstant(left, left_constant);
+ intptr_t right_constant;
+ bool is_right_constant = ToIntPtrConstant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return IntPtrConstant(left_constant | right_constant);
+ }
+ if (left_constant == 0) {
+ return right;
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return UncheckedCast<WordT>(raw_assembler()->WordOr(left, right));
+}
+
+TNode<WordT> CodeAssembler::WordAnd(SloppyTNode<WordT> left,
+ SloppyTNode<WordT> right) {
+ intptr_t left_constant;
+ bool is_left_constant = ToIntPtrConstant(left, left_constant);
+ intptr_t right_constant;
+ bool is_right_constant = ToIntPtrConstant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return IntPtrConstant(left_constant & right_constant);
+ }
+ }
+ return UncheckedCast<WordT>(raw_assembler()->WordAnd(left, right));
+}
+
+TNode<WordT> CodeAssembler::WordXor(SloppyTNode<WordT> left,
+ SloppyTNode<WordT> right) {
+ intptr_t left_constant;
+ bool is_left_constant = ToIntPtrConstant(left, left_constant);
+ intptr_t right_constant;
+ bool is_right_constant = ToIntPtrConstant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return IntPtrConstant(left_constant ^ right_constant);
+ }
+ }
+ return UncheckedCast<WordT>(raw_assembler()->WordXor(left, right));
+}
+
+TNode<WordT> CodeAssembler::WordShl(SloppyTNode<WordT> left,
+ SloppyTNode<IntegralT> right) {
+ intptr_t left_constant;
+ bool is_left_constant = ToIntPtrConstant(left, left_constant);
+ intptr_t right_constant;
+ bool is_right_constant = ToIntPtrConstant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return IntPtrConstant(left_constant << right_constant);
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return UncheckedCast<WordT>(raw_assembler()->WordShl(left, right));
+}
+
+TNode<WordT> CodeAssembler::WordShr(SloppyTNode<WordT> left,
+ SloppyTNode<IntegralT> right) {
+ intptr_t left_constant;
+ bool is_left_constant = ToIntPtrConstant(left, left_constant);
+ intptr_t right_constant;
+ bool is_right_constant = ToIntPtrConstant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return IntPtrConstant(static_cast<uintptr_t>(left_constant) >>
+ right_constant);
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return UncheckedCast<WordT>(raw_assembler()->WordShr(left, right));
+}
+
+TNode<WordT> CodeAssembler::WordSar(SloppyTNode<WordT> left,
+ SloppyTNode<IntegralT> right) {
+ intptr_t left_constant;
+ bool is_left_constant = ToIntPtrConstant(left, left_constant);
+ intptr_t right_constant;
+ bool is_right_constant = ToIntPtrConstant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return IntPtrConstant(left_constant >> right_constant);
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return UncheckedCast<WordT>(raw_assembler()->WordSar(left, right));
+}
+
+TNode<Word32T> CodeAssembler::Word32Or(SloppyTNode<Word32T> left,
+ SloppyTNode<Word32T> right) {
+ int32_t left_constant;
+ bool is_left_constant = ToInt32Constant(left, left_constant);
+ int32_t right_constant;
+ bool is_right_constant = ToInt32Constant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return Int32Constant(left_constant | right_constant);
+ }
+ if (left_constant == 0) {
+ return right;
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return UncheckedCast<Word32T>(raw_assembler()->Word32Or(left, right));
+}
+
+TNode<Word32T> CodeAssembler::Word32And(SloppyTNode<Word32T> left,
+ SloppyTNode<Word32T> right) {
+ int32_t left_constant;
+ bool is_left_constant = ToInt32Constant(left, left_constant);
+ int32_t right_constant;
+ bool is_right_constant = ToInt32Constant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return Int32Constant(left_constant & right_constant);
+ }
+ }
+ return UncheckedCast<Word32T>(raw_assembler()->Word32And(left, right));
+}
+
+TNode<Word32T> CodeAssembler::Word32Xor(SloppyTNode<Word32T> left,
+ SloppyTNode<Word32T> right) {
+ int32_t left_constant;
+ bool is_left_constant = ToInt32Constant(left, left_constant);
+ int32_t right_constant;
+ bool is_right_constant = ToInt32Constant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return Int32Constant(left_constant ^ right_constant);
+ }
+ }
+ return UncheckedCast<Word32T>(raw_assembler()->Word32Xor(left, right));
+}
+
+TNode<Word32T> CodeAssembler::Word32Shl(SloppyTNode<Word32T> left,
+ SloppyTNode<Word32T> right) {
+ int32_t left_constant;
+ bool is_left_constant = ToInt32Constant(left, left_constant);
+ int32_t right_constant;
+ bool is_right_constant = ToInt32Constant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return Int32Constant(left_constant << right_constant);
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return UncheckedCast<Word32T>(raw_assembler()->Word32Shl(left, right));
+}
+
+TNode<Word32T> CodeAssembler::Word32Shr(SloppyTNode<Word32T> left,
+ SloppyTNode<Word32T> right) {
+ int32_t left_constant;
+ bool is_left_constant = ToInt32Constant(left, left_constant);
+ int32_t right_constant;
+ bool is_right_constant = ToInt32Constant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return Int32Constant(static_cast<uint32_t>(left_constant) >>
+ right_constant);
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return UncheckedCast<Word32T>(raw_assembler()->Word32Shr(left, right));
+}
+
+TNode<Word32T> CodeAssembler::Word32Sar(SloppyTNode<Word32T> left,
+ SloppyTNode<Word32T> right) {
+ int32_t left_constant;
+ bool is_left_constant = ToInt32Constant(left, left_constant);
+ int32_t right_constant;
+ bool is_right_constant = ToInt32Constant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return Int32Constant(left_constant >> right_constant);
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return UncheckedCast<Word32T>(raw_assembler()->Word32Sar(left, right));
+}
+
+TNode<Word64T> CodeAssembler::Word64Or(SloppyTNode<Word64T> left,
+ SloppyTNode<Word64T> right) {
+ int64_t left_constant;
+ bool is_left_constant = ToInt64Constant(left, left_constant);
+ int64_t right_constant;
+ bool is_right_constant = ToInt64Constant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return Int64Constant(left_constant | right_constant);
+ }
+ if (left_constant == 0) {
+ return right;
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return UncheckedCast<Word64T>(raw_assembler()->Word64Or(left, right));
+}
+
+TNode<Word64T> CodeAssembler::Word64And(SloppyTNode<Word64T> left,
+ SloppyTNode<Word64T> right) {
+ int64_t left_constant;
+ bool is_left_constant = ToInt64Constant(left, left_constant);
+ int64_t right_constant;
+ bool is_right_constant = ToInt64Constant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return Int64Constant(left_constant & right_constant);
+ }
+ }
+ return UncheckedCast<Word64T>(raw_assembler()->Word64And(left, right));
+}
+
+TNode<Word64T> CodeAssembler::Word64Xor(SloppyTNode<Word64T> left,
+ SloppyTNode<Word64T> right) {
+ int64_t left_constant;
+ bool is_left_constant = ToInt64Constant(left, left_constant);
+ int64_t right_constant;
+ bool is_right_constant = ToInt64Constant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return Int64Constant(left_constant ^ right_constant);
+ }
+ }
+ return UncheckedCast<Word64T>(raw_assembler()->Word64Xor(left, right));
+}
+
+TNode<Word64T> CodeAssembler::Word64Shl(SloppyTNode<Word64T> left,
+ SloppyTNode<Word64T> right) {
+ int64_t left_constant;
+ bool is_left_constant = ToInt64Constant(left, left_constant);
+ int64_t right_constant;
+ bool is_right_constant = ToInt64Constant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return Int64Constant(left_constant << right_constant);
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return UncheckedCast<Word64T>(raw_assembler()->Word64Shl(left, right));
+}
+
+TNode<Word64T> CodeAssembler::Word64Shr(SloppyTNode<Word64T> left,
+ SloppyTNode<Word64T> right) {
+ int64_t left_constant;
+ bool is_left_constant = ToInt64Constant(left, left_constant);
+ int64_t right_constant;
+ bool is_right_constant = ToInt64Constant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return Int64Constant(static_cast<uint64_t>(left_constant) >>
+ right_constant);
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return UncheckedCast<Word64T>(raw_assembler()->Word64Shr(left, right));
+}
+
+TNode<Word64T> CodeAssembler::Word64Sar(SloppyTNode<Word64T> left,
+ SloppyTNode<Word64T> right) {
+ int64_t left_constant;
+ bool is_left_constant = ToInt64Constant(left, left_constant);
+ int64_t right_constant;
+ bool is_right_constant = ToInt64Constant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return Int64Constant(left_constant >> right_constant);
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return UncheckedCast<Word64T>(raw_assembler()->Word64Sar(left, right));
+}
+
TNode<UintPtrT> CodeAssembler::ChangeUint32ToWord(SloppyTNode<Word32T> value) {
if (raw_assembler()->machine()->Is64()) {
return UncheckedCast<UintPtrT>(
@@ -783,11 +1116,11 @@ Node* CodeAssembler::CallCFunction1(MachineType return_type,
}
Node* CodeAssembler::CallCFunction1WithCallerSavedRegisters(
- MachineType return_type, MachineType arg0_type, Node* function,
- Node* arg0) {
+ MachineType return_type, MachineType arg0_type, Node* function, Node* arg0,
+ SaveFPRegsMode mode) {
DCHECK(return_type.LessThanOrEqualPointerSize());
return raw_assembler()->CallCFunction1WithCallerSavedRegisters(
- return_type, arg0_type, function, arg0);
+ return_type, arg0_type, function, arg0, mode);
}
Node* CodeAssembler::CallCFunction2(MachineType return_type,
@@ -809,10 +1142,31 @@ Node* CodeAssembler::CallCFunction3(MachineType return_type,
Node* CodeAssembler::CallCFunction3WithCallerSavedRegisters(
MachineType return_type, MachineType arg0_type, MachineType arg1_type,
- MachineType arg2_type, Node* function, Node* arg0, Node* arg1, Node* arg2) {
+ MachineType arg2_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
+ SaveFPRegsMode mode) {
DCHECK(return_type.LessThanOrEqualPointerSize());
return raw_assembler()->CallCFunction3WithCallerSavedRegisters(
- return_type, arg0_type, arg1_type, arg2_type, function, arg0, arg1, arg2);
+ return_type, arg0_type, arg1_type, arg2_type, function, arg0, arg1, arg2,
+ mode);
+}
+
+Node* CodeAssembler::CallCFunction4(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, MachineType arg3_type, Node* function, Node* arg0,
+ Node* arg1, Node* arg2, Node* arg3) {
+ return raw_assembler()->CallCFunction4(return_type, arg0_type, arg1_type,
+ arg2_type, arg3_type, function, arg0,
+ arg1, arg2, arg3);
+}
+
+Node* CodeAssembler::CallCFunction5(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
+ Node* function, Node* arg0, Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4) {
+ return raw_assembler()->CallCFunction5(
+ return_type, arg0_type, arg1_type, arg2_type, arg3_type, arg4_type,
+ function, arg0, arg1, arg2, arg3, arg4);
}
Node* CodeAssembler::CallCFunction6(
@@ -873,8 +1227,8 @@ void CodeAssembler::Switch(Node* index, Label* default_label,
for (size_t i = 0; i < case_count; ++i) {
labels[i] = case_labels[i]->label_;
case_labels[i]->MergeVariables();
- default_label->MergeVariables();
}
+ default_label->MergeVariables();
return raw_assembler()->Switch(index, default_label->label_, case_values,
labels, case_count);
}
@@ -1102,7 +1456,7 @@ void CodeAssemblerLabel::UpdateVariablesAfterBind() {
auto i = variable_merges_.find(var);
if (i != variable_merges_.end()) {
for (auto value : i->second) {
- DCHECK(value != nullptr);
+ DCHECK_NOT_NULL(value);
if (value != shared_value) {
if (shared_value == nullptr) {
shared_value = value;
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index ef18213bf6..64e959a1c0 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -108,16 +108,18 @@ enum class ObjectType {
#undef ENUM_STRUCT_ELEMENT
class AccessCheckNeeded;
-class CodeCacheHashTable;
class CompilationCacheTable;
class Constructor;
class Filler;
class InternalizedString;
+class JSArgumentsObject;
class JSContextExtensionObject;
class JSError;
+class JSSloppyArgumentsObject;
class MapCache;
class MutableHeapNumber;
class NativeContext;
+class SloppyArgumentsElements;
class StringWrapper;
class Undetectable;
class UniqueName;
@@ -188,7 +190,7 @@ class TNode {
TNode() : node_(nullptr) {}
TNode operator=(TNode other) {
- DCHECK(node_ == nullptr);
+ DCHECK_NULL(node_);
node_ = other.node_;
return *this;
}
@@ -268,7 +270,6 @@ class SloppyTNode : public TNode<A> {
V(Float64InsertHighWord32, Float64T, Float64T, Word32T) \
V(IntPtrAddWithOverflow, IntPtrT, IntPtrT, IntPtrT) \
V(IntPtrSubWithOverflow, IntPtrT, IntPtrT, IntPtrT) \
- V(IntPtrMul, IntPtrT, IntPtrT, IntPtrT) \
V(Int32Add, Word32T, Word32T, Word32T) \
V(Int32AddWithOverflow, Int32T, Int32T, Int32T) \
V(Int32Sub, Word32T, Word32T, Word32T) \
@@ -276,25 +277,8 @@ class SloppyTNode : public TNode<A> {
V(Int32MulWithOverflow, Int32T, Int32T, Int32T) \
V(Int32Div, Int32T, Int32T, Int32T) \
V(Int32Mod, Int32T, Int32T, Int32T) \
- V(WordOr, WordT, WordT, WordT) \
- V(WordAnd, WordT, WordT, WordT) \
- V(WordXor, WordT, WordT, WordT) \
- V(WordShl, WordT, WordT, IntegralT) \
- V(WordShr, WordT, WordT, IntegralT) \
- V(WordSar, WordT, WordT, IntegralT) \
V(WordRor, WordT, WordT, IntegralT) \
- V(Word32Or, Word32T, Word32T, Word32T) \
- V(Word32And, Word32T, Word32T, Word32T) \
- V(Word32Xor, Word32T, Word32T, Word32T) \
- V(Word32Shl, Word32T, Word32T, Word32T) \
- V(Word32Shr, Word32T, Word32T, Word32T) \
- V(Word32Sar, Word32T, Word32T, Word32T) \
V(Word32Ror, Word32T, Word32T, Word32T) \
- V(Word64Or, Word64T, Word64T, Word64T) \
- V(Word64And, Word64T, Word64T, Word64T) \
- V(Word64Xor, Word64T, Word64T, Word64T) \
- V(Word64Shr, Word64T, Word64T, Word64T) \
- V(Word64Sar, Word64T, Word64T, Word64T) \
V(Word64Ror, Word64T, Word64T, Word64T)
TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
@@ -625,6 +609,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<WordT> IntPtrAdd(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
TNode<WordT> IntPtrSub(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
+ TNode<WordT> IntPtrMul(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
TNode<IntPtrT> IntPtrAdd(TNode<IntPtrT> left, TNode<IntPtrT> right) {
return Signed(
IntPtrAdd(static_cast<Node*>(left), static_cast<Node*>(right)));
@@ -633,6 +618,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return Signed(
IntPtrSub(static_cast<Node*>(left), static_cast<Node*>(right)));
}
+ TNode<IntPtrT> IntPtrMul(TNode<IntPtrT> left, TNode<IntPtrT> right) {
+ return Signed(
+ IntPtrMul(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
TNode<WordT> WordShl(SloppyTNode<WordT> value, int shift);
TNode<WordT> WordShr(SloppyTNode<WordT> value, int shift);
@@ -641,6 +630,37 @@ class V8_EXPORT_PRIVATE CodeAssembler {
}
TNode<Word32T> Word32Shr(SloppyTNode<Word32T> value, int shift);
+ TNode<WordT> WordOr(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
+ TNode<WordT> WordAnd(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
+ TNode<WordT> WordXor(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
+ TNode<WordT> WordShl(SloppyTNode<WordT> left, SloppyTNode<IntegralT> right);
+ TNode<WordT> WordShr(SloppyTNode<WordT> left, SloppyTNode<IntegralT> right);
+ TNode<WordT> WordSar(SloppyTNode<WordT> left, SloppyTNode<IntegralT> right);
+ TNode<Word32T> Word32Or(SloppyTNode<Word32T> left,
+ SloppyTNode<Word32T> right);
+ TNode<Word32T> Word32And(SloppyTNode<Word32T> left,
+ SloppyTNode<Word32T> right);
+ TNode<Word32T> Word32Xor(SloppyTNode<Word32T> left,
+ SloppyTNode<Word32T> right);
+ TNode<Word32T> Word32Shl(SloppyTNode<Word32T> left,
+ SloppyTNode<Word32T> right);
+ TNode<Word32T> Word32Shr(SloppyTNode<Word32T> left,
+ SloppyTNode<Word32T> right);
+ TNode<Word32T> Word32Sar(SloppyTNode<Word32T> left,
+ SloppyTNode<Word32T> right);
+ TNode<Word64T> Word64Or(SloppyTNode<Word64T> left,
+ SloppyTNode<Word64T> right);
+ TNode<Word64T> Word64And(SloppyTNode<Word64T> left,
+ SloppyTNode<Word64T> right);
+ TNode<Word64T> Word64Xor(SloppyTNode<Word64T> left,
+ SloppyTNode<Word64T> right);
+ TNode<Word64T> Word64Shl(SloppyTNode<Word64T> left,
+ SloppyTNode<Word64T> right);
+ TNode<Word64T> Word64Shr(SloppyTNode<Word64T> left,
+ SloppyTNode<Word64T> right);
+ TNode<Word64T> Word64Sar(SloppyTNode<Word64T> left,
+ SloppyTNode<Word64T> right);
+
// Unary
#define DECLARE_CODE_ASSEMBLER_UNARY_OP(name, ResType, ArgType) \
TNode<ResType> name(SloppyTNode<ArgType> a);
@@ -765,7 +785,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// registers except the register used for return value.
Node* CallCFunction1WithCallerSavedRegisters(MachineType return_type,
MachineType arg0_type,
- Node* function, Node* arg0);
+ Node* function, Node* arg0,
+ SaveFPRegsMode mode);
// Call to a C function with two arguments.
Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
@@ -779,12 +800,23 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Call to a C function with three arguments, while saving/restoring caller
// registers except the register used for return value.
- Node* CallCFunction3WithCallerSavedRegisters(MachineType return_type,
- MachineType arg0_type,
- MachineType arg1_type,
- MachineType arg2_type,
- Node* function, Node* arg0,
- Node* arg1, Node* arg2);
+ Node* CallCFunction3WithCallerSavedRegisters(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
+ SaveFPRegsMode mode);
+
+ // Call to a C function with four arguments.
+ Node* CallCFunction4(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ MachineType arg3_type, Node* function, Node* arg0,
+ Node* arg1, Node* arg2, Node* arg3);
+
+ // Call to a C function with five arguments.
+ Node* CallCFunction5(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ MachineType arg3_type, MachineType arg4_type,
+ Node* function, Node* arg0, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4);
// Call to a C function with six arguments.
Node* CallCFunction6(MachineType return_type, MachineType arg0_type,
@@ -969,13 +1001,12 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
// |result_size| specifies the number of results returned by the stub.
// TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
CodeAssemblerState(Isolate* isolate, Zone* zone,
- const CallInterfaceDescriptor& descriptor,
- Code::Flags flags, const char* name,
- size_t result_size = 1);
+ const CallInterfaceDescriptor& descriptor, Code::Kind kind,
+ const char* name, size_t result_size = 1);
// Create with JSCall linkage.
CodeAssemblerState(Isolate* isolate, Zone* zone, int parameter_count,
- Code::Flags flags, const char* name);
+ Code::Kind kind, const char* name);
~CodeAssemblerState();
@@ -993,11 +1024,11 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
friend class CodeAssemblerVariable;
CodeAssemblerState(Isolate* isolate, Zone* zone,
- CallDescriptor* call_descriptor, Code::Flags flags,
+ CallDescriptor* call_descriptor, Code::Kind kind,
const char* name);
std::unique_ptr<RawMachineAssembler> raw_assembler_;
- Code::Flags flags_;
+ Code::Kind kind_;
const char* name_;
bool code_generated_;
ZoneSet<CodeAssemblerVariable::Impl*> variables_;
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 78d66f7b11..e8aa1a4796 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -60,6 +60,7 @@ CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
inlined_function_count_(0),
translations_(zone()),
last_lazy_deopt_pc_(0),
+ caller_registers_saved_(false),
jump_tables_(nullptr),
ools_(nullptr),
osr_helper_(osr_helper),
@@ -130,8 +131,16 @@ void CodeGenerator::AssembleCode() {
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
ProfileEntryHookStub::MaybeCallEntryHookDelayed(tasm(), zone());
}
- // Architecture-specific, linkage-specific prologue.
- info->set_prologue_offset(tasm()->pc_offset());
+
+ // TODO(jupvfranco): This should be the first thing in the code,
+ // or otherwise MaybeCallEntryHookDelayed may happen twice (for
+ // optimized and deoptimized code).
+ // We want to bailout only from JS functions, which are the only ones
+ // that are optimized.
+ if (info->IsOptimizing()) {
+ DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
+ BailoutIfDeoptimized();
+ }
// Define deoptimization literals for all inlined functions.
DCHECK_EQ(0u, deoptimization_literals_.size());
@@ -316,7 +325,7 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
for (const InstructionOperand& operand : references->reference_operands()) {
if (operand.IsStackSlot()) {
int index = LocationOperand::cast(operand).index();
- DCHECK(index >= 0);
+ DCHECK_LE(0, index);
// We might index values in the fixed part of the frame (i.e. the
// closure pointer or the context pointer); these are not spill slots
// and therefore don't work with the SafepointTable currently, but
@@ -495,7 +504,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
branch.false_label = &continue_label;
branch.fallthru = true;
// Assemble architecture-specific branch.
- AssembleArchBranch(instr, &branch);
+ AssembleArchDeoptBranch(instr, &branch);
tasm()->bind(&continue_label);
break;
}
@@ -622,7 +631,7 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
data->SetInliningPositions(*inl_pos);
if (info->is_osr()) {
- DCHECK(osr_pc_offset_ >= 0);
+ DCHECK_LE(0, osr_pc_offset_);
data->SetOsrBytecodeOffset(Smi::FromInt(info_->osr_offset().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
} else {
@@ -729,11 +738,11 @@ void CodeGenerator::TranslateStateValueDescriptor(
}
} else if (desc->IsArgumentsElements()) {
if (translation != nullptr) {
- translation->ArgumentsElements(desc->is_rest());
+ translation->ArgumentsElements(desc->arguments_type());
}
} else if (desc->IsArgumentsLength()) {
if (translation != nullptr) {
- translation->ArgumentsLength(desc->is_rest());
+ translation->ArgumentsLength(desc->arguments_type());
}
} else if (desc->IsDuplicate()) {
if (translation != nullptr) {
@@ -1026,6 +1035,10 @@ OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
OutOfLineCode::~OutOfLineCode() {}
+Handle<Object> DeoptimizationLiteral::Reify(Isolate* isolate) const {
+ return object_.is_null() ? isolate->factory()->NewNumber(number_) : object_;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index bedba67ba9..94bcd5ef31 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -67,9 +67,7 @@ class DeoptimizationLiteral {
bit_cast<uint64_t>(number_) == bit_cast<uint64_t>(other.number_);
}
- Handle<Object> Reify(Isolate* isolate) const {
- return object_.is_null() ? isolate->factory()->NewNumber(number_) : object_;
- }
+ Handle<Object> Reify(Isolate* isolate) const;
private:
Handle<Object> object_;
@@ -157,16 +155,24 @@ class CodeGenerator final : public GapResolver::Assembler {
// ============= Architecture-specific code generation methods. ==============
// ===========================================================================
- CodeGenResult FinalizeAssembleDeoptimizerCall(Address deoptimization_entry);
-
CodeGenResult AssembleArchInstruction(Instruction* instr);
void AssembleArchJump(RpoNumber target);
void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
+
+ // Generates special branch for deoptimization condition.
+ void AssembleArchDeoptBranch(Instruction* instr, BranchInfo* branch);
+
void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
void AssembleArchTrap(Instruction* instr, FlagsCondition condition);
void AssembleArchLookupSwitch(Instruction* instr);
void AssembleArchTableSwitch(Instruction* instr);
+ // When entering a code that is marked for deoptimization, rather continuing
+ // with its execution, we jump to a lazy compiled code. We need to do this
+ // because this code has already been deoptimized and needs to be unlinked
+ // from the JS functions referring it.
+ void BailoutIfDeoptimized();
+
// Generates an architecture-specific, descriptor-specific prologue
// to set up a stack frame.
void AssembleConstructFrame();
@@ -328,6 +334,19 @@ class CodeGenerator final : public GapResolver::Assembler {
size_t inlined_function_count_;
TranslationBuffer translations_;
int last_lazy_deopt_pc_;
+
+ // kArchCallCFunction could be reached either:
+ // kArchCallCFunction;
+ // or:
+ // kArchSaveCallerRegisters;
+ // kArchCallCFunction;
+ // kArchRestoreCallerRegisters;
+ // The boolean is used to distinguish the two cases. In the latter case, we
+ // also need to decide if FP registers need to be saved, which is controlled
+ // by fp_mode_.
+ bool caller_registers_saved_;
+ SaveFPRegsMode fp_mode_;
+
JumpTable* jump_tables_;
OutOfLineCode* ools_;
base::Optional<OsrHelper> osr_helper_;
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index d92722f701..1693e90ec2 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -11,7 +11,6 @@
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/handles-inl.h"
-#include "src/objects-inl.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -37,7 +36,7 @@ BranchHint BranchHintOf(const Operator* const op) {
}
int ValueInputCountOfReturn(Operator const* const op) {
- DCHECK(op->opcode() == IrOpcode::kReturn);
+ DCHECK_EQ(IrOpcode::kReturn, op->opcode());
// Return nodes have a hidden input at index 0 which we ignore in the value
// input count.
return op->ValueInputCount() - 1;
@@ -1115,7 +1114,7 @@ const Operator* CommonOperatorBuilder::Select(MachineRepresentation rep,
const Operator* CommonOperatorBuilder::Phi(MachineRepresentation rep,
int value_input_count) {
- DCHECK(value_input_count > 0); // Disallow empty phis.
+ DCHECK_LT(0, value_input_count); // Disallow empty phis.
#define CACHED_PHI(kRep, kValueInputCount) \
if (MachineRepresentation::kRep == rep && \
kValueInputCount == value_input_count) { \
@@ -1148,7 +1147,7 @@ const Operator* CommonOperatorBuilder::TypeGuard(Type* type) {
}
const Operator* CommonOperatorBuilder::EffectPhi(int effect_input_count) {
- DCHECK(effect_input_count > 0); // Disallow empty effect phis.
+ DCHECK_LT(0, effect_input_count); // Disallow empty effect phis.
switch (effect_input_count) {
#define CACHED_EFFECT_PHI(input_count) \
case input_count: \
@@ -1166,8 +1165,8 @@ const Operator* CommonOperatorBuilder::EffectPhi(int effect_input_count) {
}
const Operator* CommonOperatorBuilder::InductionVariablePhi(int input_count) {
- DCHECK(input_count >= 4); // There must be always the entry, backedge,
- // increment and at least one bound.
+ DCHECK_LE(4, input_count); // There must be always the entry, backedge,
+ // increment and at least one bound.
switch (input_count) {
#define CACHED_INDUCTION_VARIABLE_PHI(input_count) \
case input_count: \
@@ -1235,24 +1234,28 @@ const Operator* CommonOperatorBuilder::TypedStateValues(
TypedStateValueInfo(types, bitmask)); // parameters
}
-const Operator* CommonOperatorBuilder::ArgumentsElementsState(bool is_rest) {
- return new (zone()) Operator1<bool>( // --
+const Operator* CommonOperatorBuilder::ArgumentsElementsState(
+ ArgumentsStateType type) {
+ return new (zone()) Operator1<ArgumentsStateType>( // --
IrOpcode::kArgumentsElementsState, Operator::kPure, // opcode
"ArgumentsElementsState", // name
- 0, 0, 0, 1, 0, 0, is_rest); // counts
+ 0, 0, 0, 1, 0, 0, // counts
+ type); // parameter
}
-const Operator* CommonOperatorBuilder::ArgumentsLengthState(bool is_rest) {
- return new (zone()) Operator1<bool>( // --
+const Operator* CommonOperatorBuilder::ArgumentsLengthState(
+ ArgumentsStateType type) {
+ return new (zone()) Operator1<ArgumentsStateType>( // --
IrOpcode::kArgumentsLengthState, Operator::kPure, // opcode
"ArgumentsLengthState", // name
- 0, 0, 0, 1, 0, 0, is_rest); // counts
+ 0, 0, 0, 1, 0, 0, // counts
+ type); // parameter
}
-bool IsRestOf(Operator const* op) {
+ArgumentsStateType ArgumentsStateTypeOf(Operator const* op) {
DCHECK(op->opcode() == IrOpcode::kArgumentsElementsState ||
op->opcode() == IrOpcode::kArgumentsLengthState);
- return OpParameter<bool>(op);
+ return OpParameter<ArgumentsStateType>(op);
}
const Operator* CommonOperatorBuilder::ObjectState(uint32_t object_id,
@@ -1358,7 +1361,6 @@ const Operator* CommonOperatorBuilder::TailCall(
return new (zone()) TailCallOperator(descriptor);
}
-
const Operator* CommonOperatorBuilder::Projection(size_t index) {
switch (index) {
#define CACHED_PROJECTION(index) \
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 9dfb059518..4f72267617 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -315,10 +315,28 @@ SparseInputMask SparseInputMaskOf(Operator const*);
ZoneVector<MachineType> const* MachineTypesOf(Operator const*)
WARN_UNUSED_RESULT;
-// The ArgumentsElementsState and ArgumentsLengthState can either describe an
-// unmapped arguments backing store or the backing store of the rest parameters.
-// IsRestOf(op) is true in the second case.
-bool IsRestOf(Operator const*);
+// The ArgumentsElementsState and ArgumentsLengthState can describe the layout
+// for backing stores of arguments objects of various types:
+//
+// +------------------------------------+
+// - kUnmappedArguments: | arg0, ... argK-1, argK, ... argN-1 | {length:N}
+// +------------------------------------+
+// +------------------------------------+
+// - kMappedArguments: | hole, ... hole, argK, ... argN-1 | {length:N}
+// +------------------------------------+
+// +------------------+
+// - kRestParameter: | argK, ... argN-1 | {length:N-K}
+// +------------------+
+//
+// Here {K} represents the number for formal parameters of the active function,
+// whereas {N} represents the actual number of arguments passed at runtime.
+// Note that {N < K} can happen and causes {K} to be capped accordingly.
+//
+// Also note that it is possible for an arguments object of {kMappedArguments}
+// type to carry a backing store of {kUnappedArguments} type when {K == 0}.
+typedef CreateArgumentsType ArgumentsStateType;
+
+ArgumentsStateType ArgumentsStateTypeOf(Operator const*) WARN_UNUSED_RESULT;
uint32_t ObjectIdOf(Operator const*);
@@ -387,8 +405,8 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* StateValues(int arguments, SparseInputMask bitmask);
const Operator* TypedStateValues(const ZoneVector<MachineType>* types,
SparseInputMask bitmask);
- const Operator* ArgumentsElementsState(bool is_rest);
- const Operator* ArgumentsLengthState(bool is_rest);
+ const Operator* ArgumentsElementsState(ArgumentsStateType type);
+ const Operator* ArgumentsLengthState(ArgumentsStateType type);
const Operator* ObjectState(uint32_t object_id, int pointer_slots);
const Operator* TypedObjectState(uint32_t object_id,
const ZoneVector<MachineType>* types);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 53e16a2bd2..d886fda97a 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -13,7 +13,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/schedule.h"
-#include "src/objects-inl.h"
+#include "src/factory-inl.h"
namespace v8 {
namespace internal {
@@ -354,7 +354,7 @@ void EffectControlLinearizer::Run() {
} else if (node->opcode() == IrOpcode::kPhi) {
// Just skip phis.
} else if (node->opcode() == IrOpcode::kTerminate) {
- DCHECK(terminate == nullptr);
+ DCHECK_NULL(terminate);
terminate = node;
} else {
break;
@@ -632,9 +632,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCompareMaps:
result = LowerCompareMaps(node);
break;
- case IrOpcode::kCheckMapValue:
- LowerCheckMapValue(node, frame_state);
- break;
case IrOpcode::kCheckNumber:
result = LowerCheckNumber(node, frame_state);
break;
@@ -715,12 +712,21 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedTruncateTaggedToWord32:
result = LowerCheckedTruncateTaggedToWord32(node, frame_state);
break;
+ case IrOpcode::kObjectIsArrayBufferView:
+ result = LowerObjectIsArrayBufferView(node);
+ break;
case IrOpcode::kObjectIsCallable:
result = LowerObjectIsCallable(node);
break;
+ case IrOpcode::kObjectIsConstructor:
+ result = LowerObjectIsConstructor(node);
+ break;
case IrOpcode::kObjectIsDetectableCallable:
result = LowerObjectIsDetectableCallable(node);
break;
+ case IrOpcode::kObjectIsMinusZero:
+ result = LowerObjectIsMinusZero(node);
+ break;
case IrOpcode::kObjectIsNaN:
result = LowerObjectIsNaN(node);
break;
@@ -751,8 +757,14 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kArgumentsLength:
result = LowerArgumentsLength(node);
break;
- case IrOpcode::kNewUnmappedArgumentsElements:
- result = LowerNewUnmappedArgumentsElements(node);
+ case IrOpcode::kNewDoubleElements:
+ result = LowerNewDoubleElements(node);
+ break;
+ case IrOpcode::kNewSmiOrObjectElements:
+ result = LowerNewSmiOrObjectElements(node);
+ break;
+ case IrOpcode::kNewArgumentsElements:
+ result = LowerNewArgumentsElements(node);
break;
case IrOpcode::kArrayBufferWasNeutered:
result = LowerArrayBufferWasNeutered(node);
@@ -817,20 +829,30 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kTransitionElementsKind:
LowerTransitionElementsKind(node);
break;
+ case IrOpcode::kLoadFieldByIndex:
+ result = LowerLoadFieldByIndex(node);
+ break;
case IrOpcode::kLoadTypedElement:
result = LowerLoadTypedElement(node);
break;
case IrOpcode::kStoreTypedElement:
LowerStoreTypedElement(node);
break;
- case IrOpcode::kLookupHashStorageIndex:
- result = LowerLookupHashStorageIndex(node);
+ case IrOpcode::kStoreSignedSmallElement:
+ LowerStoreSignedSmallElement(node);
+ break;
+ case IrOpcode::kFindOrderedHashMapEntry:
+ result = LowerFindOrderedHashMapEntry(node);
+ break;
+ case IrOpcode::kFindOrderedHashMapEntryForInt32Key:
+ result = LowerFindOrderedHashMapEntryForInt32Key(node);
break;
- case IrOpcode::kLoadHashMapValue:
- result = LowerLoadHashMapValue(node);
case IrOpcode::kTransitionAndStoreElement:
LowerTransitionAndStoreElement(node);
break;
+ case IrOpcode::kRuntimeAbort:
+ LowerRuntimeAbort(node);
+ break;
case IrOpcode::kFloat64RoundUp:
if (!LowerFloat64RoundUp(node).To(&result)) {
return false;
@@ -1306,19 +1328,6 @@ Node* EffectControlLinearizer::LowerCompareMaps(Node* node) {
return done.PhiAt(0);
}
-void EffectControlLinearizer::LowerCheckMapValue(Node* node,
- Node* frame_state) {
- Node* value = node->InputAt(0);
- Node* map = node->InputAt(1);
-
- // Load the current map of the {value}.
- Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
-
- // Check if the {value}s map matches the expected {map}.
- Node* check = __ WordEqual(value_map, map);
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, check, frame_state);
-}
-
Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
@@ -1876,6 +1885,31 @@ Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerObjectIsArrayBufferView(Node* node) {
+ Node* value = node->InputAt(0);
+
+ auto if_smi = __ MakeDeferredLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kBit);
+
+ Node* check = ObjectIsSmi(value);
+ __ GotoIf(check, &if_smi);
+
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+ STATIC_ASSERT(JS_TYPED_ARRAY_TYPE + 1 == JS_DATA_VIEW_TYPE);
+ Node* vfalse = __ Uint32LessThan(
+ __ Int32Sub(value_instance_type, __ Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ __ Int32Constant(2));
+ __ Goto(&done, vfalse);
+
+ __ Bind(&if_smi);
+ __ Goto(&done, __ Int32Constant(0));
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerObjectIsCallable(Node* node) {
Node* value = node->InputAt(0);
@@ -1900,6 +1934,31 @@ Node* EffectControlLinearizer::LowerObjectIsCallable(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerObjectIsConstructor(Node* node) {
+ Node* value = node->InputAt(0);
+
+ auto if_smi = __ MakeDeferredLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kBit);
+
+ Node* check = ObjectIsSmi(value);
+ __ GotoIf(check, &if_smi);
+
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_bit_field =
+ __ LoadField(AccessBuilder::ForMapBitField(), value_map);
+ Node* vfalse =
+ __ Word32Equal(__ Int32Constant(1 << Map::kIsConstructor),
+ __ Word32And(value_bit_field,
+ __ Int32Constant(1 << Map::kIsConstructor)));
+ __ Goto(&done, vfalse);
+
+ __ Bind(&if_smi);
+ __ Goto(&done, __ Int32Constant(0));
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerObjectIsDetectableCallable(Node* node) {
Node* value = node->InputAt(0);
@@ -1926,6 +1985,31 @@ Node* EffectControlLinearizer::LowerObjectIsDetectableCallable(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerObjectIsMinusZero(Node* node) {
+ Node* value = node->InputAt(0);
+ Node* zero = __ Int32Constant(0);
+
+ auto done = __ MakeLabel(MachineRepresentation::kBit);
+
+ // Check if {value} is a Smi.
+ __ GotoIf(ObjectIsSmi(value), &done, zero);
+
+ // Check if {value} is a HeapNumber.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ __ GotoIfNot(__ WordEqual(value_map, __ HeapNumberMapConstant()), &done,
+ zero);
+
+ // Check if {value} contains -0.
+ Node* value_value = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ __ Goto(&done,
+ __ Float64Equal(
+ __ Float64Div(__ Float64Constant(1.0), value_value),
+ __ Float64Constant(-std::numeric_limits<double>::infinity())));
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerObjectIsNaN(Node* node) {
Node* value = node->InputAt(0);
Node* zero = __ Int32Constant(0);
@@ -2099,7 +2183,7 @@ Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
Node* arguments_frame = NodeProperties::GetValueInput(node, 0);
int formal_parameter_count = FormalParameterCountOf(node->op());
bool is_rest_length = IsRestLengthOf(node->op());
- DCHECK(formal_parameter_count >= 0);
+ DCHECK_LE(0, formal_parameter_count);
if (is_rest_length) {
// The ArgumentsLength node is computing the number of rest parameters,
@@ -2169,18 +2253,110 @@ Node* EffectControlLinearizer::LowerArgumentsFrame(Node* node) {
return done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerNewUnmappedArgumentsElements(Node* node) {
+Node* EffectControlLinearizer::LowerNewDoubleElements(Node* node) {
+ PretenureFlag const pretenure = PretenureFlagOf(node->op());
+ Node* length = node->InputAt(0);
+
+ // Compute the effective size of the backing store.
+ Node* size =
+ __ Int32Add(__ Word32Shl(length, __ Int32Constant(kDoubleSizeLog2)),
+ __ Int32Constant(FixedDoubleArray::kHeaderSize));
+
+ // Allocate the result and initialize the header.
+ Node* result = __ Allocate(pretenure, size);
+ __ StoreField(AccessBuilder::ForMap(), result,
+ __ FixedDoubleArrayMapConstant());
+ __ StoreField(AccessBuilder::ForFixedArrayLength(), result,
+ ChangeInt32ToSmi(length));
+
+ // Initialize the backing store with holes.
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ Node* limit = ChangeUint32ToUintPtr(length);
+ Node* the_hole =
+ __ LoadField(AccessBuilder::ForHeapNumberValue(), __ TheHoleConstant());
+ auto loop = __ MakeLoopLabel(MachineType::PointerRepresentation());
+ auto done_loop = __ MakeLabel();
+ __ Goto(&loop, __ IntPtrConstant(0));
+ __ Bind(&loop);
+ {
+ // Check if we've initialized everything.
+ Node* index = loop.PhiAt(0);
+ Node* check = __ UintLessThan(index, limit);
+ __ GotoIfNot(check, &done_loop);
+
+ // Storing "the_hole" doesn't need a write barrier.
+ StoreRepresentation rep(MachineRepresentation::kFloat64, kNoWriteBarrier);
+ Node* offset = __ IntAdd(
+ __ WordShl(index, __ IntPtrConstant(kDoubleSizeLog2)),
+ __ IntPtrConstant(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ Store(rep, result, offset, the_hole);
+
+ // Advance the {index}.
+ index = __ IntAdd(index, __ IntPtrConstant(1));
+ __ Goto(&loop, index);
+ }
+
+ __ Bind(&done_loop);
+ return result;
+}
+
+Node* EffectControlLinearizer::LowerNewSmiOrObjectElements(Node* node) {
+ PretenureFlag const pretenure = PretenureFlagOf(node->op());
+ Node* length = node->InputAt(0);
+
+ // Compute the effective size of the backing store.
+ Node* size =
+ __ Int32Add(__ Word32Shl(length, __ Int32Constant(kPointerSizeLog2)),
+ __ Int32Constant(FixedArray::kHeaderSize));
+
+ // Allocate the result and initialize the header.
+ Node* result = __ Allocate(pretenure, size);
+ __ StoreField(AccessBuilder::ForMap(), result, __ FixedArrayMapConstant());
+ __ StoreField(AccessBuilder::ForFixedArrayLength(), result,
+ ChangeInt32ToSmi(length));
+
+ // Initialize the backing store with holes.
+ Node* limit = ChangeUint32ToUintPtr(length);
+ Node* the_hole = __ TheHoleConstant();
+ auto loop = __ MakeLoopLabel(MachineType::PointerRepresentation());
+ auto done_loop = __ MakeLabel();
+ __ Goto(&loop, __ IntPtrConstant(0));
+ __ Bind(&loop);
+ {
+ // Check if we've initialized everything.
+ Node* index = loop.PhiAt(0);
+ Node* check = __ UintLessThan(index, limit);
+ __ GotoIfNot(check, &done_loop);
+
+ // Storing "the_hole" doesn't need a write barrier.
+ StoreRepresentation rep(MachineRepresentation::kTagged, kNoWriteBarrier);
+ Node* offset =
+ __ IntAdd(__ WordShl(index, __ IntPtrConstant(kPointerSizeLog2)),
+ __ IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Store(rep, result, offset, the_hole);
+
+ // Advance the {index}.
+ index = __ IntAdd(index, __ IntPtrConstant(1));
+ __ Goto(&loop, index);
+ }
+
+ __ Bind(&done_loop);
+ return result;
+}
+
+Node* EffectControlLinearizer::LowerNewArgumentsElements(Node* node) {
Node* frame = NodeProperties::GetValueInput(node, 0);
Node* length = NodeProperties::GetValueInput(node, 1);
+ int mapped_count = OpParameter<int>(node);
Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kNewUnmappedArgumentsElements);
+ Builtins::CallableFor(isolate(), Builtins::kNewArgumentsElements);
Operator::Properties const properties = node->op()->properties();
CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
return __ Call(desc, __ HeapConstant(callable.code()), frame, length,
- __ NoContextConstant());
+ __ SmiConstant(mapped_count), __ NoContextConstant());
}
Node* EffectControlLinearizer::LowerArrayBufferWasNeutered(Node* node) {
@@ -2561,15 +2737,31 @@ Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
return __ WordShl(value, SmiShiftBitsConstant());
}
-Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) {
+Node* EffectControlLinearizer::ChangeIntPtrToInt32(Node* value) {
+ if (machine()->Is64()) {
+ value = __ TruncateInt64ToInt32(value);
+ }
+ return value;
+}
+
+Node* EffectControlLinearizer::ChangeUint32ToUintPtr(Node* value) {
if (machine()->Is64()) {
value = __ ChangeUint32ToUint64(value);
}
+ return value;
+}
+
+Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) {
+ value = ChangeUint32ToUintPtr(value);
return __ WordShl(value, SmiShiftBitsConstant());
}
+Node* EffectControlLinearizer::ChangeSmiToIntPtr(Node* value) {
+ return __ WordSar(value, SmiShiftBitsConstant());
+}
+
Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
- value = __ WordSar(value, SmiShiftBitsConstant());
+ value = ChangeSmiToIntPtr(value);
if (machine()->Is64()) {
value = __ TruncateInt64ToInt32(value);
}
@@ -2683,81 +2875,43 @@ Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
Node* frame_state) {
- GrowFastElementsFlags flags = GrowFastElementsFlagsOf(node->op());
+ GrowFastElementsMode mode = GrowFastElementsModeOf(node->op());
Node* object = node->InputAt(0);
Node* elements = node->InputAt(1);
Node* index = node->InputAt(2);
- Node* length = node->InputAt(3);
+ Node* elements_length = node->InputAt(3);
auto done = __ MakeLabel(MachineRepresentation::kTagged);
- auto done_grow = __ MakeLabel(MachineRepresentation::kTagged);
auto if_grow = __ MakeDeferredLabel();
auto if_not_grow = __ MakeLabel();
- Node* check0 = (flags & GrowFastElementsFlag::kHoleyElements)
- ? __ Uint32LessThanOrEqual(length, index)
- : __ Word32Equal(length, index);
- __ GotoIfNot(check0, &if_not_grow);
- {
- // Load the length of the {elements} backing store.
- Node* elements_length =
- __ LoadField(AccessBuilder::ForFixedArrayLength(), elements);
- elements_length = ChangeSmiToInt32(elements_length);
-
- // Check if we need to grow the {elements} backing store.
- Node* check1 = __ Uint32LessThan(index, elements_length);
- __ GotoIfNot(check1, &if_grow);
- __ Goto(&done_grow, elements);
-
- __ Bind(&if_grow);
- // We need to grow the {elements} for {object}.
- Operator::Properties properties = Operator::kEliminatable;
- Callable callable =
- (flags & GrowFastElementsFlag::kDoubleElements)
- ? Builtins::CallableFor(isolate(),
- Builtins::kGrowFastDoubleElements)
- : Builtins::CallableFor(isolate(),
- Builtins::kGrowFastSmiOrObjectElements);
- CallDescriptor::Flags call_flags = CallDescriptor::kNoFlags;
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, call_flags,
- properties);
- Node* new_object = __ Call(desc, __ HeapConstant(callable.code()), object,
- ChangeInt32ToSmi(index), __ NoContextConstant());
-
- // Ensure that we were able to grow the {elements}.
- // TODO(turbofan): We use kSmi as reason here similar to Crankshaft,
- // but maybe we should just introduce a reason that makes sense.
- __ DeoptimizeIf(DeoptimizeReason::kSmi, ObjectIsSmi(new_object),
- frame_state);
- __ Goto(&done_grow, new_object);
-
- __ Bind(&done_grow);
+ // Check if we need to grow the {elements} backing store.
+ Node* check = __ Uint32LessThan(index, elements_length);
+ __ GotoIfNot(check, &if_grow);
+ __ Goto(&done, elements);
- // For JSArray {object}s we also need to update the "length".
- if (flags & GrowFastElementsFlag::kArrayObject) {
- // Compute the new {length}.
- Node* object_length =
- ChangeInt32ToSmi(__ Int32Add(index, __ Int32Constant(1)));
+ __ Bind(&if_grow);
+ // We need to grow the {elements} for {object}.
+ Operator::Properties properties = Operator::kEliminatable;
+ Callable callable =
+ (mode == GrowFastElementsMode::kDoubleElements)
+ ? Builtins::CallableFor(isolate(), Builtins::kGrowFastDoubleElements)
+ : Builtins::CallableFor(isolate(),
+ Builtins::kGrowFastSmiOrObjectElements);
+ CallDescriptor::Flags call_flags = CallDescriptor::kNoFlags;
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, call_flags,
+ properties);
+ Node* new_elements = __ Call(desc, __ HeapConstant(callable.code()), object,
+ ChangeInt32ToSmi(index), __ NoContextConstant());
- // Update the "length" property of the {object}.
- __ StoreField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS), object,
- object_length);
- }
- __ Goto(&done, done_grow.PhiAt(0));
- }
+ // Ensure that we were able to grow the {elements}.
+ // TODO(turbofan): We use kSmi as reason here similar to Crankshaft,
+ // but maybe we should just introduce a reason that makes sense.
+ __ DeoptimizeIf(DeoptimizeReason::kSmi, ObjectIsSmi(new_elements),
+ frame_state);
+ __ Goto(&done, new_elements);
- __ Bind(&if_not_grow);
- {
- // In case of non-holey {elements}, we need to verify that the {index} is
- // in-bounds, otherwise for holey {elements}, the check above already
- // guards the index (and the operator forces {index} to be unsigned).
- if (!(flags & GrowFastElementsFlag::kHoleyElements)) {
- Node* check1 = __ Uint32LessThan(index, length);
- __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds, check1, frame_state);
- }
- __ Goto(&done, elements);
- }
__ Bind(&done);
return done.PhiAt(0);
}
@@ -2803,6 +2957,107 @@ void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) {
__ Bind(&done);
}
+Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
+ Node* object = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* zero = __ IntPtrConstant(0);
+ Node* one = __ IntPtrConstant(1);
+
+ // Sign-extend the {index} on 64-bit architectures.
+ if (machine()->Is64()) {
+ index = __ ChangeInt32ToInt64(index);
+ }
+
+ auto if_double = __ MakeDeferredLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kTagged);
+
+ // Check if field is a mutable double field.
+ __ GotoIfNot(__ WordEqual(__ WordAnd(index, one), zero), &if_double);
+
+ // The field is a proper Tagged field on {object}. The {index} is shifted
+ // to the left by one in the code below.
+ {
+ // Check if field is in-object or out-of-object.
+ auto if_outofobject = __ MakeLabel();
+ __ GotoIf(__ IntLessThan(index, zero), &if_outofobject);
+
+ // The field is located in the {object} itself.
+ {
+ Node* offset =
+ __ IntAdd(__ WordShl(index, __ IntPtrConstant(kPointerSizeLog2 - 1)),
+ __ IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag));
+ Node* result = __ Load(MachineType::AnyTagged(), object, offset);
+ __ Goto(&done, result);
+ }
+
+ // The field is located in the properties backing store of {object}.
+ // The {index} is equal to the negated out of property index plus 1.
+ __ Bind(&if_outofobject);
+ {
+ Node* properties =
+ __ LoadField(AccessBuilder::ForJSObjectPropertiesOrHash(), object);
+ Node* offset =
+ __ IntAdd(__ WordShl(__ IntSub(zero, index),
+ __ IntPtrConstant(kPointerSizeLog2 - 1)),
+ __ IntPtrConstant((FixedArray::kHeaderSize - kPointerSize) -
+ kHeapObjectTag));
+ Node* result = __ Load(MachineType::AnyTagged(), properties, offset);
+ __ Goto(&done, result);
+ }
+ }
+
+ // The field is a Double field, either unboxed in the object on 64-bit
+ // architectures, or as MutableHeapNumber.
+ __ Bind(&if_double);
+ {
+ auto done_double = __ MakeLabel(MachineRepresentation::kFloat64);
+
+ index = __ WordSar(index, one);
+
+ // Check if field is in-object or out-of-object.
+ auto if_outofobject = __ MakeLabel();
+ __ GotoIf(__ IntLessThan(index, zero), &if_outofobject);
+
+ // The field is located in the {object} itself.
+ {
+ Node* offset =
+ __ IntAdd(__ WordShl(index, __ IntPtrConstant(kPointerSizeLog2)),
+ __ IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag));
+ if (FLAG_unbox_double_fields) {
+ Node* result = __ Load(MachineType::Float64(), object, offset);
+ __ Goto(&done_double, result);
+ } else {
+ Node* result = __ Load(MachineType::AnyTagged(), object, offset);
+ result = __ LoadField(AccessBuilder::ForHeapNumberValue(), result);
+ __ Goto(&done_double, result);
+ }
+ }
+
+ __ Bind(&if_outofobject);
+ {
+ Node* properties =
+ __ LoadField(AccessBuilder::ForJSObjectPropertiesOrHash(), object);
+ Node* offset =
+ __ IntAdd(__ WordShl(__ IntSub(zero, index),
+ __ IntPtrConstant(kPointerSizeLog2)),
+ __ IntPtrConstant((FixedArray::kHeaderSize - kPointerSize) -
+ kHeapObjectTag));
+ Node* result = __ Load(MachineType::AnyTagged(), properties, offset);
+ result = __ LoadField(AccessBuilder::ForHeapNumberValue(), result);
+ __ Goto(&done_double, result);
+ }
+
+ __ Bind(&done_double);
+ {
+ Node* result = AllocateHeapNumberWithValue(done_double.PhiAt(0));
+ __ Goto(&done, result);
+ }
+ }
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
Node* buffer = node->InputAt(0);
@@ -2931,57 +3186,64 @@ void EffectControlLinearizer::LowerTransitionAndStoreElement(Node* node) {
}
auto do_store = __ MakeLabel(MachineRepresentation::kWord32);
- Node* check1 = ObjectIsSmi(value);
- __ GotoIf(check1, &do_store, kind);
+ // We can store a smi anywhere.
+ __ GotoIf(ObjectIsSmi(value), &do_store, kind);
+
+ // {value} is a HeapObject.
+ auto transition_smi_array = __ MakeDeferredLabel();
+ auto transition_double_to_fast = __ MakeDeferredLabel();
+ {
+ __ GotoIfNot(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
+ &transition_smi_array);
+ __ GotoIfNot(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS), &do_store,
+ kind);
+
+ // We have double elements kind. Only a HeapNumber can be stored
+ // without effecting a transition.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* heap_number_map = __ HeapNumberMapConstant();
+ Node* check = __ WordEqual(value_map, heap_number_map);
+ __ GotoIfNot(check, &transition_double_to_fast);
+ __ Goto(&do_store, kind);
+ }
+
+ __ Bind(&transition_smi_array); // deferred code.
{
- // {value} is a HeapObject.
- Node* check2 = IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS);
- auto if_array_not_fast_smi = __ MakeLabel();
- __ GotoIf(check2, &if_array_not_fast_smi);
+ // Transition {array} from HOLEY_SMI_ELEMENTS to HOLEY_DOUBLE_ELEMENTS or
+ // to HOLEY_ELEMENTS.
+ auto if_value_not_heap_number = __ MakeLabel();
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* heap_number_map = __ HeapNumberMapConstant();
+ Node* check = __ WordEqual(value_map, heap_number_map);
+ __ GotoIfNot(check, &if_value_not_heap_number);
{
- // Transition {array} from HOLEY_SMI_ELEMENTS to HOLEY_DOUBLE_ELEMENTS or
- // to HOLEY_ELEMENTS.
- Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* heap_number_map = __ HeapNumberMapConstant();
- Node* check3 = __ WordEqual(value_map, heap_number_map);
- auto if_value_not_heap_number = __ MakeLabel();
- __ GotoIfNot(check3, &if_value_not_heap_number);
- {
- // {value} is a HeapNumber.
- TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS,
- HOLEY_DOUBLE_ELEMENTS);
- __ Goto(&do_store, __ Int32Constant(HOLEY_DOUBLE_ELEMENTS));
- }
- __ Bind(&if_value_not_heap_number);
- {
- TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS);
- __ Goto(&do_store, __ Int32Constant(HOLEY_ELEMENTS));
- }
+ // {value} is a HeapNumber.
+ TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS,
+ HOLEY_DOUBLE_ELEMENTS);
+ __ Goto(&do_store, __ Int32Constant(HOLEY_DOUBLE_ELEMENTS));
}
- __ Bind(&if_array_not_fast_smi);
+ __ Bind(&if_value_not_heap_number);
{
- Node* check3 = IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS);
- __ GotoIfNot(check3, &do_store, kind);
- // We have double elements kind.
- Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* heap_number_map = __ HeapNumberMapConstant();
- Node* check4 = __ WordEqual(value_map, heap_number_map);
- __ GotoIf(check4, &do_store, kind);
- // But the value is not a heap number, so we must transition.
- TransitionElementsTo(node, array, HOLEY_DOUBLE_ELEMENTS, HOLEY_ELEMENTS);
+ TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS);
__ Goto(&do_store, __ Int32Constant(HOLEY_ELEMENTS));
}
}
+ __ Bind(&transition_double_to_fast); // deferred code.
+ {
+ TransitionElementsTo(node, array, HOLEY_DOUBLE_ELEMENTS, HOLEY_ELEMENTS);
+ __ Goto(&do_store, __ Int32Constant(HOLEY_ELEMENTS));
+ }
+
// Make sure kind is up-to-date.
__ Bind(&do_store);
kind = do_store.PhiAt(0);
Node* elements = __ LoadField(AccessBuilder::ForJSObjectElements(), array);
- Node* check2 = IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS);
auto if_kind_is_double = __ MakeLabel();
auto done = __ MakeLabel();
- __ GotoIf(check2, &if_kind_is_double);
+ __ GotoIf(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS),
+ &if_kind_is_double);
{
// Our ElementsKind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS.
__ StoreElement(AccessBuilder::ForFixedArrayElement(HOLEY_ELEMENTS),
@@ -2991,9 +3253,8 @@ void EffectControlLinearizer::LowerTransitionAndStoreElement(Node* node) {
__ Bind(&if_kind_is_double);
{
// Our ElementsKind is HOLEY_DOUBLE_ELEMENTS.
- Node* check1 = ObjectIsSmi(value);
auto do_double_store = __ MakeLabel();
- __ GotoIfNot(check1, &do_double_store);
+ __ GotoIfNot(ObjectIsSmi(value), &do_double_store);
{
Node* int_value = ChangeSmiToInt32(value);
Node* float_value = __ ChangeInt32ToFloat64(int_value);
@@ -3010,9 +3271,78 @@ void EffectControlLinearizer::LowerTransitionAndStoreElement(Node* node) {
__ Goto(&done);
}
}
+
+ __ Bind(&done);
+}
+
+void EffectControlLinearizer::LowerStoreSignedSmallElement(Node* node) {
+ Node* array = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ // Store a signed small in an output array.
+ //
+ // kind = ElementsKind(array)
+ //
+ // -- STORE PHASE ----------------------
+ // if kind == HOLEY_DOUBLE_ELEMENTS {
+ // float_value = convert smi to float
+ // Store array[index] = float_value
+ // } else {
+ // // kind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS
+ // Store array[index] = value
+ // }
+ //
+ Node* map = __ LoadField(AccessBuilder::ForMap(), array);
+ Node* kind;
+ {
+ Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), map);
+ Node* mask = __ Int32Constant(Map::ElementsKindBits::kMask);
+ Node* andit = __ Word32And(bit_field2, mask);
+ Node* shift = __ Int32Constant(Map::ElementsKindBits::kShift);
+ kind = __ Word32Shr(andit, shift);
+ }
+
+ Node* elements = __ LoadField(AccessBuilder::ForJSObjectElements(), array);
+ auto if_kind_is_double = __ MakeLabel();
+ auto done = __ MakeLabel();
+ __ GotoIf(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS),
+ &if_kind_is_double);
+ {
+ // Our ElementsKind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS.
+ // In this case, we know our value is a signed small, and we can optimize
+ // the ElementAccess information.
+ ElementAccess access = AccessBuilder::ForFixedArrayElement();
+ access.type = Type::SignedSmall();
+ access.machine_type = MachineType::TaggedSigned();
+ access.write_barrier_kind = kNoWriteBarrier;
+ __ StoreElement(access, elements, index, value);
+ __ Goto(&done);
+ }
+ __ Bind(&if_kind_is_double);
+ {
+ // Our ElementsKind is HOLEY_DOUBLE_ELEMENTS.
+ Node* int_value = ChangeSmiToInt32(value);
+ Node* float_value = __ ChangeInt32ToFloat64(int_value);
+ __ StoreElement(AccessBuilder::ForFixedDoubleArrayElement(), elements,
+ index, float_value);
+ __ Goto(&done);
+ }
+
__ Bind(&done);
}
+void EffectControlLinearizer::LowerRuntimeAbort(Node* node) {
+ BailoutReason reason = BailoutReasonOf(node->op());
+ Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+ Runtime::FunctionId id = Runtime::kAbort;
+ CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
+ __ Call(desc, __ CEntryStubConstant(1), jsgraph()->SmiConstant(reason),
+ __ ExternalConstant(ExternalReference(id, isolate())),
+ __ Int32Constant(1), __ NoContextConstant());
+}
+
Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundUp(Node* node) {
// Nothing to be done if a fast hardware instruction is available.
if (machine()->Float64RoundUp().IsSupported()) {
@@ -3339,13 +3669,13 @@ Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node) {
return Just(done.PhiAt(0));
}
-Node* EffectControlLinearizer::LowerLookupHashStorageIndex(Node* node) {
+Node* EffectControlLinearizer::LowerFindOrderedHashMapEntry(Node* node) {
Node* table = NodeProperties::GetValueInput(node, 0);
Node* key = NodeProperties::GetValueInput(node, 1);
{
Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kMapLookupHashIndex);
+ Builtins::CallableFor(isolate(), Builtins::kFindOrderedHashMapEntry);
Operator::Properties const properties = node->op()->properties();
CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -3356,10 +3686,93 @@ Node* EffectControlLinearizer::LowerLookupHashStorageIndex(Node* node) {
}
}
-Node* EffectControlLinearizer::LowerLoadHashMapValue(Node* node) {
+Node* EffectControlLinearizer::ComputeIntegerHash(Node* value) {
+ // See v8::internal::ComputeIntegerHash()
+ value = __ Int32Add(__ Word32Xor(value, __ Int32Constant(0xffffffff)),
+ __ Word32Shl(value, __ Int32Constant(15)));
+ value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(12)));
+ value = __ Int32Add(value, __ Word32Shl(value, __ Int32Constant(2)));
+ value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(4)));
+ value = __ Int32Mul(value, __ Int32Constant(2057));
+ value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(16)));
+ value = __ Word32And(value, __ Int32Constant(0x3fffffff));
+ return value;
+}
+
+Node* EffectControlLinearizer::LowerFindOrderedHashMapEntryForInt32Key(
+ Node* node) {
Node* table = NodeProperties::GetValueInput(node, 0);
- Node* index = NodeProperties::GetValueInput(node, 1);
- return __ LoadElement(AccessBuilder::ForFixedArrayElement(), table, index);
+ Node* key = NodeProperties::GetValueInput(node, 1);
+
+ // Compute the integer hash code.
+ Node* hash = ChangeUint32ToUintPtr(ComputeIntegerHash(key));
+
+ Node* number_of_buckets = ChangeSmiToIntPtr(__ LoadField(
+ AccessBuilder::ForOrderedHashTableBaseNumberOfBuckets(), table));
+ hash = __ WordAnd(hash, __ IntSub(number_of_buckets, __ IntPtrConstant(1)));
+ Node* first_entry = ChangeSmiToIntPtr(__ Load(
+ MachineType::TaggedSigned(), table,
+ __ IntAdd(__ WordShl(hash, __ IntPtrConstant(kPointerSizeLog2)),
+ __ IntPtrConstant(OrderedHashMap::kHashTableStartOffset -
+ kHeapObjectTag))));
+
+ auto loop = __ MakeLoopLabel(MachineType::PointerRepresentation());
+ auto done = __ MakeLabel(MachineRepresentation::kWord32);
+ __ Goto(&loop, first_entry);
+ __ Bind(&loop);
+ {
+ Node* entry = loop.PhiAt(0);
+ Node* check =
+ __ WordEqual(entry, __ IntPtrConstant(OrderedHashMap::kNotFound));
+ __ GotoIf(check, &done, __ Int32Constant(-1));
+ entry = __ IntAdd(
+ __ IntMul(entry, __ IntPtrConstant(OrderedHashMap::kEntrySize)),
+ number_of_buckets);
+
+ Node* candidate_key = __ Load(
+ MachineType::AnyTagged(), table,
+ __ IntAdd(__ WordShl(entry, __ IntPtrConstant(kPointerSizeLog2)),
+ __ IntPtrConstant(OrderedHashMap::kHashTableStartOffset -
+ kHeapObjectTag)));
+
+ auto if_match = __ MakeLabel();
+ auto if_notmatch = __ MakeLabel();
+ auto if_notsmi = __ MakeDeferredLabel();
+ __ GotoIfNot(ObjectIsSmi(candidate_key), &if_notsmi);
+ __ Branch(__ Word32Equal(ChangeSmiToInt32(candidate_key), key), &if_match,
+ &if_notmatch);
+
+ __ Bind(&if_notsmi);
+ __ GotoIfNot(
+ __ WordEqual(__ LoadField(AccessBuilder::ForMap(), candidate_key),
+ __ HeapNumberMapConstant()),
+ &if_notmatch);
+ __ Branch(__ Float64Equal(__ LoadField(AccessBuilder::ForHeapNumberValue(),
+ candidate_key),
+ __ ChangeInt32ToFloat64(key)),
+ &if_match, &if_notmatch);
+
+ __ Bind(&if_match);
+ {
+ Node* index = ChangeIntPtrToInt32(entry);
+ __ Goto(&done, index);
+ }
+
+ __ Bind(&if_notmatch);
+ {
+ Node* next_entry = ChangeSmiToIntPtr(__ Load(
+ MachineType::TaggedSigned(), table,
+ __ IntAdd(
+ __ WordShl(entry, __ IntPtrConstant(kPointerSizeLog2)),
+ __ IntPtrConstant(OrderedHashMap::kHashTableStartOffset +
+ OrderedHashMap::kChainOffset * kPointerSize -
+ kHeapObjectTag))));
+ __ Goto(&loop, next_entry);
+ }
+ }
+
+ __ Bind(&done);
+ return done.PhiAt(0);
}
#undef __
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index cf6e134b70..e17f097e9e 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -56,7 +56,6 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
Node* LowerCheckMaps(Node* node, Node* frame_state);
Node* LowerCompareMaps(Node* node);
- void LowerCheckMapValue(Node* node, Node* frame_state);
Node* LowerCheckNumber(Node* node, Node* frame_state);
Node* LowerCheckReceiver(Node* node, Node* frame_state);
Node* LowerCheckString(Node* node, Node* frame_state);
@@ -85,8 +84,11 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerTruncateTaggedToFloat64(Node* node);
Node* LowerTruncateTaggedToWord32(Node* node);
Node* LowerCheckedTruncateTaggedToWord32(Node* node, Node* frame_state);
+ Node* LowerObjectIsArrayBufferView(Node* node);
Node* LowerObjectIsCallable(Node* node);
+ Node* LowerObjectIsConstructor(Node* node);
Node* LowerObjectIsDetectableCallable(Node* node);
+ Node* LowerObjectIsMinusZero(Node* node);
Node* LowerObjectIsNaN(Node* node);
Node* LowerObjectIsNonCallable(Node* node);
Node* LowerObjectIsNumber(Node* node);
@@ -97,7 +99,9 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerObjectIsUndetectable(Node* node);
Node* LowerArgumentsFrame(Node* node);
Node* LowerArgumentsLength(Node* node);
- Node* LowerNewUnmappedArgumentsElements(Node* node);
+ Node* LowerNewDoubleElements(Node* node);
+ Node* LowerNewSmiOrObjectElements(Node* node);
+ Node* LowerNewArgumentsElements(Node* node);
Node* LowerArrayBufferWasNeutered(Node* node);
Node* LowerStringCharAt(Node* node);
Node* LowerStringCharCodeAt(Node* node);
@@ -119,11 +123,14 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerEnsureWritableFastElements(Node* node);
Node* LowerMaybeGrowFastElements(Node* node, Node* frame_state);
void LowerTransitionElementsKind(Node* node);
+ Node* LowerLoadFieldByIndex(Node* node);
Node* LowerLoadTypedElement(Node* node);
void LowerStoreTypedElement(Node* node);
- Node* LowerLookupHashStorageIndex(Node* node);
- Node* LowerLoadHashMapValue(Node* node);
+ void LowerStoreSignedSmallElement(Node* node);
+ Node* LowerFindOrderedHashMapEntry(Node* node);
+ Node* LowerFindOrderedHashMapEntryForInt32Key(Node* node);
void LowerTransitionAndStoreElement(Node* node);
+ void LowerRuntimeAbort(Node* node);
// Lowering of optional operators.
Maybe<Node*> LowerFloat64RoundUp(Node* node);
@@ -138,11 +145,15 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* value,
Node* frame_state);
Node* BuildFloat64RoundDown(Node* value);
+ Node* ComputeIntegerHash(Node* value);
Node* LowerStringComparison(Callable const& callable, Node* node);
Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);
Node* ChangeInt32ToSmi(Node* value);
+ Node* ChangeIntPtrToInt32(Node* value);
+ Node* ChangeUint32ToUintPtr(Node* value);
Node* ChangeUint32ToSmi(Node* value);
+ Node* ChangeSmiToIntPtr(Node* value);
Node* ChangeSmiToInt32(Node* value);
Node* ObjectIsSmi(Node* value);
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index 83e3178173..aa2a1b2f3a 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -1,14 +1,12 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
+// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/escape-analysis-reducer.h"
#include "src/compiler/all-nodes.h"
-#include "src/compiler/js-graph.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
-#include "src/counters.h"
#include "src/frame-constants.h"
namespace v8 {
@@ -24,111 +22,33 @@ namespace compiler {
#define TRACE(...)
#endif // DEBUG
-EscapeAnalysisReducer::EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
- EscapeAnalysis* escape_analysis,
- Zone* zone)
+EscapeAnalysisReducer::EscapeAnalysisReducer(
+ Editor* editor, JSGraph* jsgraph, EscapeAnalysisResult analysis_result,
+ Zone* zone)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
- escape_analysis_(escape_analysis),
- zone_(zone),
- fully_reduced_(static_cast<int>(jsgraph->graph()->NodeCount() * 2), zone),
- exists_virtual_allocate_(escape_analysis->ExistsVirtualAllocate()) {}
+ analysis_result_(analysis_result),
+ object_id_cache_(zone),
+ node_cache_(jsgraph->graph(), zone),
+ arguments_elements_(zone),
+ zone_(zone) {}
-Reduction EscapeAnalysisReducer::ReduceNode(Node* node) {
- if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
- fully_reduced_.Contains(node->id())) {
- return NoChange();
- }
-
- switch (node->opcode()) {
- case IrOpcode::kLoadField:
- case IrOpcode::kLoadElement:
- return ReduceLoad(node);
- case IrOpcode::kStoreField:
- case IrOpcode::kStoreElement:
- return ReduceStore(node);
- case IrOpcode::kCheckMaps:
- return ReduceCheckMaps(node);
- case IrOpcode::kAllocate:
- return ReduceAllocate(node);
- case IrOpcode::kFinishRegion:
- return ReduceFinishRegion(node);
- case IrOpcode::kReferenceEqual:
- return ReduceReferenceEqual(node);
- case IrOpcode::kObjectIsSmi:
- return ReduceObjectIsSmi(node);
- // FrameStates and Value nodes are preprocessed here,
- // and visited via ReduceFrameStateUses from their user nodes.
- case IrOpcode::kFrameState:
- case IrOpcode::kStateValues: {
- if (node->id() >= static_cast<NodeId>(fully_reduced_.length()) ||
- fully_reduced_.Contains(node->id())) {
- break;
- }
- bool depends_on_object_state = false;
- for (Node* input : node->inputs()) {
- switch (input->opcode()) {
- case IrOpcode::kAllocate:
- case IrOpcode::kFinishRegion:
- depends_on_object_state =
- depends_on_object_state || escape_analysis()->IsVirtual(input);
- break;
- case IrOpcode::kFrameState:
- case IrOpcode::kStateValues:
- depends_on_object_state =
- depends_on_object_state ||
- input->id() >= static_cast<NodeId>(fully_reduced_.length()) ||
- !fully_reduced_.Contains(input->id());
- break;
- default:
- break;
- }
- }
- if (!depends_on_object_state) {
- fully_reduced_.Add(node->id());
- }
- return NoChange();
- }
- case IrOpcode::kNewUnmappedArgumentsElements:
- arguments_elements_.insert(node);
- break;
- default:
- // TODO(sigurds): Change this to GetFrameStateInputCount once
- // it is working. For now we use EffectInputCount > 0 to determine
- // whether a node might have a frame state input.
- if (exists_virtual_allocate_ && node->op()->EffectInputCount() > 0) {
- return ReduceFrameStateUses(node);
- }
- break;
- }
- return NoChange();
-}
-
-Reduction EscapeAnalysisReducer::Reduce(Node* node) {
- Reduction reduction = ReduceNode(node);
- if (reduction.Changed() && node != reduction.replacement()) {
- escape_analysis()->SetReplacement(node, reduction.replacement());
- }
- return reduction;
-}
-
-namespace {
-
-Node* MaybeGuard(JSGraph* jsgraph, Zone* zone, Node* original,
- Node* replacement) {
+Node* EscapeAnalysisReducer::MaybeGuard(Node* original, Node* replacement) {
// We might need to guard the replacement if the type of the {replacement}
// node is not in a sub-type relation to the type of the the {original} node.
Type* const replacement_type = NodeProperties::GetType(replacement);
Type* const original_type = NodeProperties::GetType(original);
if (!replacement_type->Is(original_type)) {
Node* const control = NodeProperties::GetControlInput(original);
- replacement = jsgraph->graph()->NewNode(
- jsgraph->common()->TypeGuard(original_type), replacement, control);
+ replacement = jsgraph()->graph()->NewNode(
+ jsgraph()->common()->TypeGuard(original_type), replacement, control);
NodeProperties::SetType(replacement, original_type);
}
return replacement;
}
+namespace {
+
Node* SkipTypeGuards(Node* node) {
while (node->opcode() == IrOpcode::kTypeGuard) {
node = NodeProperties::GetValueInput(node, 0);
@@ -138,280 +58,181 @@ Node* SkipTypeGuards(Node* node) {
} // namespace
-Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
- DCHECK(node->opcode() == IrOpcode::kLoadField ||
- node->opcode() == IrOpcode::kLoadElement);
- if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
- fully_reduced_.Add(node->id());
- }
- if (escape_analysis()->IsVirtual(
- SkipTypeGuards(NodeProperties::GetValueInput(node, 0)))) {
- if (Node* rep = escape_analysis()->GetReplacement(node)) {
- TRACE("Replaced #%d (%s) with #%d (%s)\n", node->id(),
- node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
- rep = MaybeGuard(jsgraph(), zone(), node, rep);
- ReplaceWithValue(node, rep);
- return Replace(rep);
- }
- }
- return NoChange();
-}
-
-
-Reduction EscapeAnalysisReducer::ReduceStore(Node* node) {
- DCHECK(node->opcode() == IrOpcode::kStoreField ||
- node->opcode() == IrOpcode::kStoreElement);
- if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
- fully_reduced_.Add(node->id());
- }
- if (escape_analysis()->IsVirtual(
- SkipTypeGuards(NodeProperties::GetValueInput(node, 0)))) {
- TRACE("Removed #%d (%s) from effect chain\n", node->id(),
- node->op()->mnemonic());
- RelaxEffectsAndControls(node);
- return Changed(node);
- }
- return NoChange();
-}
-
-Reduction EscapeAnalysisReducer::ReduceCheckMaps(Node* node) {
- DCHECK(node->opcode() == IrOpcode::kCheckMaps);
- if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
- fully_reduced_.Add(node->id());
+Node* EscapeAnalysisReducer::ObjectIdNode(const VirtualObject* vobject) {
+ VirtualObject::Id id = vobject->id();
+ if (id >= object_id_cache_.size()) object_id_cache_.resize(id + 1);
+ if (!object_id_cache_[id]) {
+ Node* node = jsgraph()->graph()->NewNode(jsgraph()->common()->ObjectId(id));
+ NodeProperties::SetType(node, Type::Object());
+ object_id_cache_[id] = node;
}
- if (escape_analysis()->IsVirtual(
- SkipTypeGuards(NodeProperties::GetValueInput(node, 0))) &&
- !escape_analysis()->IsEscaped(node)) {
- TRACE("Removed #%d (%s) from effect chain\n", node->id(),
- node->op()->mnemonic());
- RelaxEffectsAndControls(node);
- return Changed(node);
- }
- return NoChange();
+ return object_id_cache_[id];
}
-Reduction EscapeAnalysisReducer::ReduceAllocate(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
- if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
- fully_reduced_.Add(node->id());
- }
- if (escape_analysis()->IsVirtual(node)) {
+Reduction EscapeAnalysisReducer::Reduce(Node* node) {
+ if (Node* replacement = analysis_result().GetReplacementOf(node)) {
+ DCHECK(node->opcode() != IrOpcode::kAllocate &&
+ node->opcode() != IrOpcode::kFinishRegion);
+ DCHECK_NE(replacement, node);
+ if (replacement != jsgraph()->Dead()) {
+ replacement = MaybeGuard(node, replacement);
+ }
RelaxEffectsAndControls(node);
- TRACE("Removed allocate #%d from effect chain\n", node->id());
- return Changed(node);
+ return Replace(replacement);
}
- return NoChange();
-}
-
-Reduction EscapeAnalysisReducer::ReduceFinishRegion(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
- Node* effect = NodeProperties::GetEffectInput(node, 0);
- if (effect->opcode() == IrOpcode::kBeginRegion) {
- // We only add it now to remove empty Begin/Finish region pairs
- // in the process.
- if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
- fully_reduced_.Add(node->id());
+ switch (node->opcode()) {
+ case IrOpcode::kAllocate: {
+ const VirtualObject* vobject = analysis_result().GetVirtualObject(node);
+ if (vobject && !vobject->HasEscaped()) {
+ RelaxEffectsAndControls(node);
+ }
+ return NoChange();
}
- RelaxEffectsAndControls(effect);
- RelaxEffectsAndControls(node);
-#ifdef DEBUG
- if (FLAG_trace_turbo_escape) {
- PrintF("Removed region #%d / #%d from effect chain,", effect->id(),
- node->id());
- PrintF(" %d user(s) of #%d remain(s):", node->UseCount(), node->id());
- for (Edge edge : node->use_edges()) {
- PrintF(" #%d", edge.from()->id());
+ case IrOpcode::kFinishRegion: {
+ Node* effect = NodeProperties::GetEffectInput(node, 0);
+ if (effect->opcode() == IrOpcode::kBeginRegion) {
+ RelaxEffectsAndControls(effect);
+ RelaxEffectsAndControls(node);
}
- PrintF("\n");
+ return NoChange();
}
-#endif // DEBUG
- return Changed(node);
- }
- return NoChange();
-}
-
-
-Reduction EscapeAnalysisReducer::ReduceReferenceEqual(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kReferenceEqual);
- Node* left = SkipTypeGuards(NodeProperties::GetValueInput(node, 0));
- Node* right = SkipTypeGuards(NodeProperties::GetValueInput(node, 1));
- if (escape_analysis()->IsVirtual(left)) {
- if (escape_analysis()->IsVirtual(right) &&
- escape_analysis()->CompareVirtualObjects(left, right)) {
- ReplaceWithValue(node, jsgraph()->TrueConstant());
- TRACE("Replaced ref eq #%d with true\n", node->id());
- return Replace(jsgraph()->TrueConstant());
+ case IrOpcode::kNewArgumentsElements:
+ arguments_elements_.insert(node);
+ return NoChange();
+ default: {
+ // TODO(sigurds): Change this to GetFrameStateInputCount once
+ // it is working. For now we use EffectInputCount > 0 to determine
+ // whether a node might have a frame state input.
+ if (node->op()->EffectInputCount() > 0) {
+ ReduceFrameStateInputs(node);
+ }
+ return NoChange();
}
- // Right-hand side is not a virtual object, or a different one.
- ReplaceWithValue(node, jsgraph()->FalseConstant());
- TRACE("Replaced ref eq #%d with false\n", node->id());
- return Replace(jsgraph()->FalseConstant());
- } else if (escape_analysis()->IsVirtual(right)) {
- // Left-hand side is not a virtual object.
- ReplaceWithValue(node, jsgraph()->FalseConstant());
- TRACE("Replaced ref eq #%d with false\n", node->id());
- return Replace(jsgraph()->FalseConstant());
}
- return NoChange();
}
-
-Reduction EscapeAnalysisReducer::ReduceObjectIsSmi(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kObjectIsSmi);
- Node* input = SkipTypeGuards(NodeProperties::GetValueInput(node, 0));
- if (escape_analysis()->IsVirtual(input)) {
- ReplaceWithValue(node, jsgraph()->FalseConstant());
- TRACE("Replaced ObjectIsSmi #%d with false\n", node->id());
- return Replace(jsgraph()->FalseConstant());
+// While doing DFS on the FrameState tree, we have to recognize duplicate
+// occurrences of virtual objects.
+class Deduplicator {
+ public:
+ explicit Deduplicator(Zone* zone) : is_duplicate_(zone) {}
+ bool SeenBefore(const VirtualObject* vobject) {
+ VirtualObject::Id id = vobject->id();
+ if (id >= is_duplicate_.size()) {
+ is_duplicate_.resize(id + 1);
+ }
+ bool is_duplicate = is_duplicate_[id];
+ is_duplicate_[id] = true;
+ return is_duplicate;
}
- return NoChange();
-}
+ private:
+ ZoneVector<bool> is_duplicate_;
+};
-Reduction EscapeAnalysisReducer::ReduceFrameStateUses(Node* node) {
+void EscapeAnalysisReducer::ReduceFrameStateInputs(Node* node) {
DCHECK_GE(node->op()->EffectInputCount(), 1);
- if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
- fully_reduced_.Add(node->id());
- }
- bool changed = false;
for (int i = 0; i < node->InputCount(); ++i) {
Node* input = node->InputAt(i);
if (input->opcode() == IrOpcode::kFrameState) {
- if (Node* ret = ReduceDeoptState(input, node, false)) {
+ Deduplicator deduplicator(zone());
+ if (Node* ret = ReduceDeoptState(input, node, &deduplicator)) {
node->ReplaceInput(i, ret);
- changed = true;
}
}
}
- if (changed) {
- return Changed(node);
- }
- return NoChange();
}
-
-// Returns the clone if it duplicated the node, and null otherwise.
Node* EscapeAnalysisReducer::ReduceDeoptState(Node* node, Node* effect,
- bool multiple_users) {
- DCHECK(node->opcode() == IrOpcode::kFrameState ||
- node->opcode() == IrOpcode::kStateValues);
- if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
- fully_reduced_.Contains(node->id())) {
- return nullptr;
- }
- TRACE("Reducing %s %d\n", node->op()->mnemonic(), node->id());
- Node* clone = nullptr;
- bool node_multiused = node->UseCount() > 1;
- bool multiple_users_rec = multiple_users || node_multiused;
- for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
- Node* input = NodeProperties::GetValueInput(node, i);
- if (input->opcode() == IrOpcode::kStateValues) {
- if (Node* ret = ReduceDeoptState(input, effect, multiple_users_rec)) {
- if (node_multiused || (multiple_users && !clone)) {
- TRACE(" Cloning #%d", node->id());
- node = clone = jsgraph()->graph()->CloneNode(node);
- TRACE(" to #%d\n", node->id());
- node_multiused = false;
- }
- NodeProperties::ReplaceValueInput(node, ret, i);
- }
- } else {
- if (Node* ret = ReduceStateValueInput(node, i, effect, node_multiused,
- clone, multiple_users)) {
- DCHECK_NULL(clone);
- node_multiused = false; // Don't clone anymore.
- node = clone = ret;
- }
- }
- }
+ Deduplicator* deduplicator) {
if (node->opcode() == IrOpcode::kFrameState) {
- Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
- if (outer_frame_state->opcode() == IrOpcode::kFrameState) {
- if (Node* ret =
- ReduceDeoptState(outer_frame_state, effect, multiple_users_rec)) {
- if (node_multiused || (multiple_users && !clone)) {
- TRACE(" Cloning #%d", node->id());
- node = clone = jsgraph()->graph()->CloneNode(node);
- TRACE(" to #%d\n", node->id());
- }
- NodeProperties::ReplaceFrameStateInput(node, ret);
- }
+ NodeHashCache::Constructor new_node(&node_cache_, node);
+ // This input order is important to match the DFS traversal used in the
+ // instruction selector. Otherwise, the instruction selector might find a
+ // duplicate node before the original one.
+ for (int input_id : {kFrameStateOuterStateInput, kFrameStateFunctionInput,
+ kFrameStateParametersInput, kFrameStateContextInput,
+ kFrameStateLocalsInput, kFrameStateStackInput}) {
+ Node* input = node->InputAt(input_id);
+ new_node.ReplaceInput(ReduceDeoptState(input, effect, deduplicator),
+ input_id);
}
- }
- if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
- fully_reduced_.Add(node->id());
- }
- return clone;
-}
-
-
-// Returns the clone if it duplicated the node, and null otherwise.
-Node* EscapeAnalysisReducer::ReduceStateValueInput(Node* node, int node_index,
- Node* effect,
- bool node_multiused,
- bool already_cloned,
- bool multiple_users) {
- Node* input = SkipTypeGuards(NodeProperties::GetValueInput(node, node_index));
- if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
- fully_reduced_.Contains(node->id())) {
- return nullptr;
- }
- TRACE("Reducing State Input #%d (%s)\n", input->id(),
- input->op()->mnemonic());
- Node* clone = nullptr;
- if (input->opcode() == IrOpcode::kFinishRegion ||
- input->opcode() == IrOpcode::kAllocate) {
- if (escape_analysis()->IsVirtual(input)) {
- if (escape_analysis()->IsCyclicObjectState(effect, input)) {
- // TODO(mstarzinger): Represent cyclic object states differently to
- // ensure the scheduler can properly handle such object states.
- compilation_failed_ = true;
- return nullptr;
- }
- if (Node* object_state =
- escape_analysis()->GetOrCreateObjectState(effect, input)) {
- if (node_multiused || (multiple_users && !already_cloned)) {
- TRACE("Cloning #%d", node->id());
- node = clone = jsgraph()->graph()->CloneNode(node);
- TRACE(" to #%d\n", node->id());
- node_multiused = false;
- already_cloned = true;
+ return new_node.Get();
+ } else if (node->opcode() == IrOpcode::kStateValues) {
+ NodeHashCache::Constructor new_node(&node_cache_, node);
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ new_node.ReplaceValueInput(ReduceDeoptState(input, effect, deduplicator),
+ i);
+ }
+ return new_node.Get();
+ } else if (const VirtualObject* vobject =
+ analysis_result().GetVirtualObject(SkipTypeGuards(node))) {
+ if (vobject->HasEscaped()) return node;
+ if (deduplicator->SeenBefore(vobject)) {
+ return ObjectIdNode(vobject);
+ } else {
+ std::vector<Node*> inputs;
+ for (int offset = 0; offset < vobject->size(); offset += kPointerSize) {
+ Node* field =
+ analysis_result().GetVirtualObjectField(vobject, offset, effect);
+ CHECK_NOT_NULL(field);
+ if (field != jsgraph()->Dead()) {
+ inputs.push_back(ReduceDeoptState(field, effect, deduplicator));
}
- NodeProperties::ReplaceValueInput(node, object_state, node_index);
- TRACE("Replaced state #%d input #%d with object state #%d\n",
- node->id(), input->id(), object_state->id());
- } else {
- TRACE("No object state replacement for #%d at effect #%d available.\n",
- input->id(), effect->id());
- UNREACHABLE();
}
+ int num_inputs = static_cast<int>(inputs.size());
+ NodeHashCache::Constructor new_node(
+ &node_cache_,
+ jsgraph()->common()->ObjectState(vobject->id(), num_inputs),
+ num_inputs, &inputs.front(), NodeProperties::GetType(node));
+ return new_node.Get();
}
+ } else {
+ return node;
}
- return clone;
}
-
void EscapeAnalysisReducer::VerifyReplacement() const {
-#ifdef DEBUG
AllNodes all(zone(), jsgraph()->graph());
for (Node* node : all.reachable) {
if (node->opcode() == IrOpcode::kAllocate) {
- CHECK(!escape_analysis_->IsVirtual(node));
+ if (const VirtualObject* vobject =
+ analysis_result().GetVirtualObject(node)) {
+ if (!vobject->HasEscaped()) {
+ V8_Fatal(__FILE__, __LINE__,
+ "Escape analysis failed to remove node %s#%d\n",
+ node->op()->mnemonic(), node->id());
+ }
+ }
}
}
-#endif // DEBUG
}
void EscapeAnalysisReducer::Finalize() {
for (Node* node : arguments_elements_) {
- DCHECK(node->opcode() == IrOpcode::kNewUnmappedArgumentsElements);
+ DCHECK_EQ(IrOpcode::kNewArgumentsElements, node->opcode());
+ int mapped_count = OpParameter<int>(node);
Node* arguments_frame = NodeProperties::GetValueInput(node, 0);
if (arguments_frame->opcode() != IrOpcode::kArgumentsFrame) continue;
Node* arguments_length = NodeProperties::GetValueInput(node, 1);
if (arguments_length->opcode() != IrOpcode::kArgumentsLength) continue;
+ // If mapped arguments are specified, then their number is always equal to
+ // the number of formal parameters. This allows to use just the three-value
+ // {ArgumentsStateType} enum because the deoptimizer can reconstruct the
+ // value of {mapped_count} from the number of formal parameters.
+ DCHECK_IMPLIES(
+ mapped_count != 0,
+ mapped_count == FormalParameterCountOf(arguments_length->op()));
+ ArgumentsStateType type = IsRestLengthOf(arguments_length->op())
+ ? ArgumentsStateType::kRestParameter
+ : (mapped_count == 0)
+ ? ArgumentsStateType::kUnmappedArguments
+ : ArgumentsStateType::kMappedArguments;
+
Node* arguments_length_state = nullptr;
for (Edge edge : arguments_length->use_edges()) {
Node* use = edge.from();
@@ -422,8 +243,7 @@ void EscapeAnalysisReducer::Finalize() {
case IrOpcode::kTypedStateValues:
if (!arguments_length_state) {
arguments_length_state = jsgraph()->graph()->NewNode(
- jsgraph()->common()->ArgumentsLengthState(
- IsRestLengthOf(arguments_length->op())));
+ jsgraph()->common()->ArgumentsLengthState(type));
NodeProperties::SetType(arguments_length_state,
Type::OtherInternal());
}
@@ -450,7 +270,11 @@ void EscapeAnalysisReducer::Finalize() {
case IrOpcode::kTypedObjectState:
break;
case IrOpcode::kLoadElement:
- loads.push_back(use);
+ if (mapped_count == 0) {
+ loads.push_back(use);
+ } else {
+ escaping_use = true;
+ }
break;
case IrOpcode::kLoadField:
if (FieldAccessOf(use->op()).offset == FixedArray::kLengthOffset) {
@@ -469,8 +293,7 @@ void EscapeAnalysisReducer::Finalize() {
}
if (!escaping_use) {
Node* arguments_elements_state = jsgraph()->graph()->NewNode(
- jsgraph()->common()->ArgumentsElementsState(
- IsRestLengthOf(arguments_length->op())));
+ jsgraph()->common()->ArgumentsElementsState(type));
NodeProperties::SetType(arguments_elements_state, Type::OtherInternal());
ReplaceWithValue(node, arguments_elements_state);
@@ -517,6 +340,88 @@ void EscapeAnalysisReducer::Finalize() {
}
}
+Node* NodeHashCache::Query(Node* node) {
+ auto it = cache_.find(node);
+ if (it != cache_.end()) {
+ return *it;
+ } else {
+ return nullptr;
+ }
+}
+
+NodeHashCache::Constructor::Constructor(NodeHashCache* cache,
+ const Operator* op, int input_count,
+ Node** inputs, Type* type)
+ : node_cache_(cache), from_(nullptr) {
+ if (node_cache_->temp_nodes_.size() > 0) {
+ tmp_ = node_cache_->temp_nodes_.back();
+ node_cache_->temp_nodes_.pop_back();
+ int tmp_input_count = tmp_->InputCount();
+ if (input_count <= tmp_input_count) {
+ tmp_->TrimInputCount(input_count);
+ }
+ for (int i = 0; i < input_count; ++i) {
+ if (i < tmp_input_count) {
+ tmp_->ReplaceInput(i, inputs[i]);
+ } else {
+ tmp_->AppendInput(node_cache_->graph_->zone(), inputs[i]);
+ }
+ }
+ NodeProperties::ChangeOp(tmp_, op);
+ } else {
+ tmp_ = node_cache_->graph_->NewNode(op, input_count, inputs);
+ }
+ NodeProperties::SetType(tmp_, type);
+}
+
+Node* NodeHashCache::Constructor::Get() {
+ DCHECK(tmp_ || from_);
+ Node* node;
+ if (!tmp_) {
+ node = node_cache_->Query(from_);
+ if (!node) node = from_;
+ } else {
+ node = node_cache_->Query(tmp_);
+ if (node) {
+ node_cache_->temp_nodes_.push_back(tmp_);
+ } else {
+ node = tmp_;
+ node_cache_->Insert(node);
+ }
+ }
+ tmp_ = from_ = nullptr;
+ return node;
+}
+
+Node* NodeHashCache::Constructor::MutableNode() {
+ DCHECK(tmp_ || from_);
+ if (!tmp_) {
+ if (node_cache_->temp_nodes_.empty()) {
+ tmp_ = node_cache_->graph_->CloneNode(from_);
+ } else {
+ tmp_ = node_cache_->temp_nodes_.back();
+ node_cache_->temp_nodes_.pop_back();
+ int from_input_count = from_->InputCount();
+ int tmp_input_count = tmp_->InputCount();
+ if (from_input_count <= tmp_input_count) {
+ tmp_->TrimInputCount(from_input_count);
+ }
+ for (int i = 0; i < from_input_count; ++i) {
+ if (i < tmp_input_count) {
+ tmp_->ReplaceInput(i, from_->InputAt(i));
+ } else {
+ tmp_->AppendInput(node_cache_->graph_->zone(), from_->InputAt(i));
+ }
+ }
+ NodeProperties::SetType(tmp_, NodeProperties::GetType(from_));
+ NodeProperties::ChangeOp(tmp_, from_->op());
+ }
+ }
+ return tmp_;
+}
+
+#undef TRACE
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index 9bbabeb221..b89d4d03e8 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -1,4 +1,4 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
+// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,7 +6,6 @@
#define V8_COMPILER_ESCAPE_ANALYSIS_REDUCER_H_
#include "src/base/compiler-specific.h"
-#include "src/bit-vector.h"
#include "src/compiler/escape-analysis.h"
#include "src/compiler/graph-reducer.h"
#include "src/globals.h"
@@ -15,55 +14,101 @@ namespace v8 {
namespace internal {
namespace compiler {
-// Forward declarations.
+class Deduplicator;
class JSGraph;
+// Perform hash-consing when creating or mutating nodes. Used to avoid duplicate
+// nodes when creating ObjectState, StateValues and FrameState nodes
+class NodeHashCache {
+ public:
+ NodeHashCache(Graph* graph, Zone* zone)
+ : graph_(graph), cache_(zone), temp_nodes_(zone) {}
+
+ // Handle to a conceptually new mutable node. Tries to re-use existing nodes
+ // and to recycle memory if possible.
+ class Constructor {
+ public:
+ // Construct a new node as a clone of [from].
+ Constructor(NodeHashCache* cache, Node* from)
+ : node_cache_(cache), from_(from), tmp_(nullptr) {}
+ // Construct a new node from scratch.
+ Constructor(NodeHashCache* cache, const Operator* op, int input_count,
+ Node** inputs, Type* type);
+
+ // Modify the new node.
+ void ReplaceValueInput(Node* input, int i) {
+ if (!tmp_ && input == NodeProperties::GetValueInput(from_, i)) return;
+ Node* node = MutableNode();
+ NodeProperties::ReplaceValueInput(node, input, i);
+ }
+ void ReplaceInput(Node* input, int i) {
+ if (!tmp_ && input == from_->InputAt(i)) return;
+ Node* node = MutableNode();
+ node->ReplaceInput(i, input);
+ }
+
+ // Obtain the mutated node or a cached copy. Invalidates the [Constructor].
+ Node* Get();
+
+ private:
+ Node* MutableNode();
+
+ NodeHashCache* node_cache_;
+ // Original node, copied on write.
+ Node* from_;
+ // Temporary node used for mutations, can be recycled if cache is hit.
+ Node* tmp_;
+ };
+
+ private:
+ Node* Query(Node* node);
+ void Insert(Node* node) { cache_.insert(node); }
+
+ Graph* graph_;
+ struct NodeEquals {
+ bool operator()(Node* a, Node* b) const {
+ return NodeProperties::Equals(a, b);
+ }
+ };
+ struct NodeHashCode {
+ size_t operator()(Node* n) const { return NodeProperties::HashCode(n); }
+ };
+ ZoneUnorderedSet<Node*, NodeHashCode, NodeEquals> cache_;
+ // Unused nodes whose memory can be recycled.
+ ZoneVector<Node*> temp_nodes_;
+};
+
+// Modify the graph according to the information computed in the previous phase.
class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
- EscapeAnalysis* escape_analysis, Zone* zone);
+ EscapeAnalysisResult analysis_result, Zone* zone);
+ Reduction Reduce(Node* node) override;
const char* reducer_name() const override { return "EscapeAnalysisReducer"; }
-
- Reduction Reduce(Node* node) final;
-
void Finalize() override;
// Verifies that all virtual allocation nodes have been dealt with. Run it
- // after this reducer has been applied. Has no effect in release mode.
+ // after this reducer has been applied.
void VerifyReplacement() const;
- bool compilation_failed() const { return compilation_failed_; }
-
private:
- Reduction ReduceNode(Node* node);
- Reduction ReduceLoad(Node* node);
- Reduction ReduceStore(Node* node);
- Reduction ReduceCheckMaps(Node* node);
- Reduction ReduceAllocate(Node* node);
- Reduction ReduceFinishRegion(Node* node);
- Reduction ReduceReferenceEqual(Node* node);
- Reduction ReduceObjectIsSmi(Node* node);
- Reduction ReduceFrameStateUses(Node* node);
- Node* ReduceDeoptState(Node* node, Node* effect, bool multiple_users);
- Node* ReduceStateValueInput(Node* node, int node_index, Node* effect,
- bool node_multiused, bool already_cloned,
- bool multiple_users);
+ void ReduceFrameStateInputs(Node* node);
+ Node* ReduceDeoptState(Node* node, Node* effect, Deduplicator* deduplicator);
+ Node* ObjectIdNode(const VirtualObject* vobject);
+ Node* MaybeGuard(Node* original, Node* replacement);
JSGraph* jsgraph() const { return jsgraph_; }
- EscapeAnalysis* escape_analysis() const { return escape_analysis_; }
+ EscapeAnalysisResult analysis_result() const { return analysis_result_; }
Zone* zone() const { return zone_; }
JSGraph* const jsgraph_;
- EscapeAnalysis* escape_analysis_;
+ EscapeAnalysisResult analysis_result_;
+ ZoneVector<Node*> object_id_cache_;
+ NodeHashCache node_cache_;
+ ZoneSet<Node*> arguments_elements_;
Zone* const zone_;
- // This bit vector marks nodes we already processed (allocs, loads, stores)
- // and nodes that do not need a visit from ReduceDeoptState etc.
- BitVector fully_reduced_;
- bool exists_virtual_allocate_;
- std::set<Node*> arguments_elements_;
- bool compilation_failed_ = false;
DISALLOW_COPY_AND_ASSIGN(EscapeAnalysisReducer);
};
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 1b34d2cf53..ab2b06a952 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -1,31 +1,15 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
+// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/escape-analysis.h"
-#include <limits>
-
-#include "src/base/flags.h"
#include "src/bootstrapper.h"
-#include "src/compilation-dependencies.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-reducer.h"
-#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/node.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/compiler/type-cache.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-typedef NodeId Alias;
+#include "src/zone/zone-list-inl.h" // TODO(mstarzinger): Fix zone-handle-set.h instead!
#ifdef DEBUG
#define TRACE(...) \
@@ -36,1760 +20,779 @@ typedef NodeId Alias;
#define TRACE(...)
#endif
-// EscapeStatusAnalysis determines for each allocation whether it escapes.
-class EscapeStatusAnalysis : public ZoneObject {
- public:
- enum Status {
- kUnknown = 0u,
- kTracked = 1u << 0,
- kEscaped = 1u << 1,
- kOnStack = 1u << 2,
- kVisited = 1u << 3,
- // A node is dangling, if it is a load of some kind, and does not have
- // an effect successor.
- kDanglingComputed = 1u << 4,
- kDangling = 1u << 5,
- // A node is is an effect branch point, if it has more than 2 non-dangling
- // effect successors.
- kBranchPointComputed = 1u << 6,
- kBranchPoint = 1u << 7,
- kInQueue = 1u << 8
- };
- typedef base::Flags<Status, uint16_t> StatusFlags;
-
- void RunStatusAnalysis();
-
- bool IsVirtual(Node* node);
- bool IsEscaped(Node* node);
- bool IsAllocation(Node* node);
-
- bool IsInQueue(NodeId id);
- void SetInQueue(NodeId id, bool on_stack);
-
- void DebugPrint();
-
- EscapeStatusAnalysis(EscapeAnalysis* object_analysis, Graph* graph,
- Zone* zone);
- void EnqueueForStatusAnalysis(Node* node);
- bool SetEscaped(Node* node);
- bool IsEffectBranchPoint(Node* node);
- bool IsDanglingEffectNode(Node* node);
- void ResizeStatusVector();
- size_t GetStatusVectorSize();
- bool IsVirtual(NodeId id);
-
- Graph* graph() const { return graph_; }
- void AssignAliases();
- Alias GetAlias(NodeId id) const { return aliases_[id]; }
- const ZoneVector<Alias>& GetAliasMap() const { return aliases_; }
- Alias AliasCount() const { return next_free_alias_; }
- static const Alias kNotReachable;
- static const Alias kUntrackable;
-
- bool IsNotReachable(Node* node);
-
- private:
- void Process(Node* node);
- void ProcessAllocate(Node* node);
- void ProcessFinishRegion(Node* node);
- void ProcessStoreField(Node* node);
- void ProcessStoreElement(Node* node);
- bool CheckUsesForEscape(Node* node, bool phi_escaping = false) {
- return CheckUsesForEscape(node, node, phi_escaping);
- }
- bool CheckUsesForEscape(Node* node, Node* rep, bool phi_escaping = false);
- void RevisitUses(Node* node);
- void RevisitInputs(Node* node);
-
- Alias NextAlias() { return next_free_alias_++; }
-
- bool HasEntry(Node* node);
-
- bool IsAllocationPhi(Node* node);
-
- ZoneVector<Node*> stack_;
- EscapeAnalysis* object_analysis_;
- Graph* const graph_;
- ZoneVector<StatusFlags> status_;
- Alias next_free_alias_;
- ZoneVector<Node*> status_stack_;
- ZoneVector<Alias> aliases_;
-
- DISALLOW_COPY_AND_ASSIGN(EscapeStatusAnalysis);
-};
-
-DEFINE_OPERATORS_FOR_FLAGS(EscapeStatusAnalysis::StatusFlags)
-
-const Alias EscapeStatusAnalysis::kNotReachable =
- std::numeric_limits<Alias>::max();
-const Alias EscapeStatusAnalysis::kUntrackable =
- std::numeric_limits<Alias>::max() - 1;
-
-namespace impl {
+namespace v8 {
+namespace internal {
+namespace compiler {
-class VirtualObject : public ZoneObject {
+template <class T>
+class Sidetable {
public:
- enum Status {
- kInitial = 0,
- kTracked = 1u << 0,
- kInitialized = 1u << 1,
- kCopyRequired = 1u << 2,
- };
- typedef base::Flags<Status, unsigned char> StatusFlags;
-
- VirtualObject(NodeId id, VirtualState* owner, Zone* zone)
- : id_(id),
- status_(kInitial),
- fields_(zone),
- phi_(zone),
- object_state_(nullptr),
- owner_(owner) {}
-
- VirtualObject(VirtualState* owner, const VirtualObject& other)
- : id_(other.id_),
- status_(other.status_ & ~kCopyRequired),
- fields_(other.fields_),
- phi_(other.phi_),
- object_state_(other.object_state_),
- owner_(owner) {}
-
- VirtualObject(NodeId id, VirtualState* owner, Zone* zone, size_t field_number,
- bool initialized)
- : id_(id),
- status_(kTracked | (initialized ? kInitialized : kInitial)),
- fields_(zone),
- phi_(zone),
- object_state_(nullptr),
- owner_(owner) {
- fields_.resize(field_number);
- phi_.resize(field_number, false);
- }
-
- Node* GetField(size_t offset) { return fields_[offset]; }
-
- bool IsCreatedPhi(size_t offset) { return phi_[offset]; }
-
- void SetField(size_t offset, Node* node, bool created_phi = false) {
- TRACE(" VirtualObject(%p)[%zu] changes from #%i to #%i\n",
- static_cast<void*>(this), offset,
- fields_[offset] ? fields_[offset]->id() : -1, node ? node->id() : -1);
- fields_[offset] = node;
- phi_[offset] = created_phi;
- }
- bool IsTracked() const { return status_ & kTracked; }
- bool IsInitialized() const { return status_ & kInitialized; }
- bool SetInitialized() { return status_ |= kInitialized; }
- VirtualState* owner() const { return owner_; }
-
- Node** fields_array() { return &fields_.front(); }
- size_t field_count() { return fields_.size(); }
- bool ResizeFields(size_t field_count) {
- if (field_count > fields_.size()) {
- fields_.resize(field_count);
- phi_.resize(field_count);
- return true;
- }
- return false;
- }
- void ClearAllFields() {
- for (size_t i = 0; i < fields_.size(); ++i) {
- fields_[i] = nullptr;
- phi_[i] = false;
+ explicit Sidetable(Zone* zone) : map_(zone) {}
+ T& operator[](const Node* node) {
+ NodeId id = node->id();
+ if (id >= map_.size()) {
+ map_.resize(id + 1);
}
+ return map_[id];
}
- bool AllFieldsClear() {
- for (size_t i = 0; i < fields_.size(); ++i) {
- if (fields_[i] != nullptr) {
- return false;
- }
- }
- return true;
- }
- bool UpdateFrom(const VirtualObject& other);
- bool MergeFrom(MergeCache* cache, Node* at, Graph* graph,
- CommonOperatorBuilder* common, bool initialMerge);
- void SetObjectState(Node* node) { object_state_ = node; }
- Node* GetObjectState() const { return object_state_; }
- bool IsCopyRequired() const { return status_ & kCopyRequired; }
- void SetCopyRequired() { status_ |= kCopyRequired; }
- bool NeedCopyForModification() {
- if (!IsCopyRequired() || !IsInitialized()) {
- return false;
- }
- return true;
- }
-
- NodeId id() const { return id_; }
- void id(NodeId id) { id_ = id; }
private:
- bool MergeFields(size_t i, Node* at, MergeCache* cache, Graph* graph,
- CommonOperatorBuilder* common);
-
- NodeId id_;
- StatusFlags status_;
- ZoneVector<Node*> fields_;
- ZoneVector<bool> phi_;
- Node* object_state_;
- VirtualState* owner_;
-
- DISALLOW_COPY_AND_ASSIGN(VirtualObject);
+ ZoneVector<T> map_;
};
-DEFINE_OPERATORS_FOR_FLAGS(VirtualObject::StatusFlags)
-
-bool VirtualObject::UpdateFrom(const VirtualObject& other) {
- TRACE("%p.UpdateFrom(%p)\n", static_cast<void*>(this),
- static_cast<const void*>(&other));
- bool changed = status_ != other.status_;
- status_ = other.status_;
- phi_ = other.phi_;
- if (fields_.size() != other.fields_.size()) {
- fields_ = other.fields_;
- return true;
- }
- for (size_t i = 0; i < fields_.size(); ++i) {
- if (fields_[i] != other.fields_[i]) {
- changed = true;
- fields_[i] = other.fields_[i];
- }
- }
- return changed;
-}
-
-class VirtualState : public ZoneObject {
+template <class T>
+class SparseSidetable {
public:
- VirtualState(Node* owner, Zone* zone, size_t size)
- : info_(size, nullptr, zone),
- initialized_(static_cast<int>(size), zone),
- owner_(owner) {}
-
- VirtualState(Node* owner, const VirtualState& state)
- : info_(state.info_.size(), nullptr, state.info_.get_allocator().zone()),
- initialized_(state.initialized_.length(),
- state.info_.get_allocator().zone()),
- owner_(owner) {
- for (size_t i = 0; i < info_.size(); ++i) {
- if (state.info_[i]) {
- info_[i] = state.info_[i];
- }
+ explicit SparseSidetable(Zone* zone, T def_value = T())
+ : def_value_(std::move(def_value)), map_(zone) {}
+ void Set(const Node* node, T value) {
+ auto iter = map_.find(node->id());
+ if (iter != map_.end()) {
+ iter->second = std::move(value);
+ } else if (value != def_value_) {
+ map_.insert(iter, std::make_pair(node->id(), std::move(value)));
}
}
-
- VirtualObject* VirtualObjectFromAlias(size_t alias);
- void SetVirtualObject(Alias alias, VirtualObject* state);
- bool UpdateFrom(VirtualState* state, Zone* zone);
- bool MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
- CommonOperatorBuilder* common, Node* at);
- size_t size() const { return info_.size(); }
- Node* owner() const { return owner_; }
- VirtualObject* Copy(VirtualObject* obj, Alias alias);
- void SetCopyRequired() {
- for (VirtualObject* obj : info_) {
- if (obj) obj->SetCopyRequired();
- }
+ const T& Get(const Node* node) const {
+ auto iter = map_.find(node->id());
+ return iter != map_.end() ? iter->second : def_value_;
}
private:
- ZoneVector<VirtualObject*> info_;
- BitVector initialized_;
- Node* owner_;
-
- DISALLOW_COPY_AND_ASSIGN(VirtualState);
+ T def_value_;
+ ZoneUnorderedMap<NodeId, T> map_;
};
-class MergeCache : public ZoneObject {
+// Keeps track of the changes to the current node during reduction.
+// Encapsulates the current state of the IR graph and the reducer state like
+// side-tables. All access to the IR and the reducer state should happen through
+// a ReduceScope to ensure that changes and dependencies are tracked and all
+// necessary node revisitations happen.
+class ReduceScope {
public:
- explicit MergeCache(Zone* zone)
- : states_(zone), objects_(zone), fields_(zone) {
- states_.reserve(5);
- objects_.reserve(5);
- fields_.reserve(5);
- }
- ZoneVector<VirtualState*>& states() { return states_; }
- ZoneVector<VirtualObject*>& objects() { return objects_; }
- ZoneVector<Node*>& fields() { return fields_; }
- void Clear() {
- states_.clear();
- objects_.clear();
- fields_.clear();
- }
- size_t LoadVirtualObjectsFromStatesFor(Alias alias);
- void LoadVirtualObjectsForFieldsFrom(VirtualState* state,
- const ZoneVector<Alias>& aliases);
- Node* GetFields(size_t pos);
+ typedef EffectGraphReducer::Reduction Reduction;
+ explicit ReduceScope(Node* node, Reduction* reduction)
+ : current_node_(node), reduction_(reduction) {}
- private:
- ZoneVector<VirtualState*> states_;
- ZoneVector<VirtualObject*> objects_;
- ZoneVector<Node*> fields_;
+ protected:
+ Node* current_node() const { return current_node_; }
+ Reduction* reduction() { return reduction_; }
- DISALLOW_COPY_AND_ASSIGN(MergeCache);
+ private:
+ Node* current_node_;
+ Reduction* reduction_;
};
-size_t MergeCache::LoadVirtualObjectsFromStatesFor(Alias alias) {
- objects_.clear();
- DCHECK_GT(states_.size(), 0u);
- size_t min = std::numeric_limits<size_t>::max();
- for (VirtualState* state : states_) {
- if (VirtualObject* obj = state->VirtualObjectFromAlias(alias)) {
- objects_.push_back(obj);
- min = std::min(obj->field_count(), min);
- }
- }
- return min;
-}
-
-void MergeCache::LoadVirtualObjectsForFieldsFrom(
- VirtualState* state, const ZoneVector<Alias>& aliases) {
- objects_.clear();
- size_t max_alias = state->size();
- for (Node* field : fields_) {
- Alias alias = aliases[field->id()];
- if (alias >= max_alias) continue;
- if (VirtualObject* obj = state->VirtualObjectFromAlias(alias)) {
- objects_.push_back(obj);
- }
- }
-}
-
-Node* MergeCache::GetFields(size_t pos) {
- fields_.clear();
- Node* rep = pos >= objects_.front()->field_count()
- ? nullptr
- : objects_.front()->GetField(pos);
- for (VirtualObject* obj : objects_) {
- if (pos >= obj->field_count()) continue;
- Node* field = obj->GetField(pos);
- if (field) {
- fields_.push_back(field);
- }
- if (field != rep) {
- rep = nullptr;
- }
- }
- return rep;
-}
-
-VirtualObject* VirtualState::Copy(VirtualObject* obj, Alias alias) {
- if (obj->owner() == this) return obj;
- VirtualObject* new_obj =
- new (info_.get_allocator().zone()) VirtualObject(this, *obj);
- TRACE("At state %p, alias @%d (#%d), copying virtual object from %p to %p\n",
- static_cast<void*>(this), alias, obj->id(), static_cast<void*>(obj),
- static_cast<void*>(new_obj));
- info_[alias] = new_obj;
- return new_obj;
-}
-
-VirtualObject* VirtualState::VirtualObjectFromAlias(size_t alias) {
- return info_[alias];
-}
-
-void VirtualState::SetVirtualObject(Alias alias, VirtualObject* obj) {
- info_[alias] = obj;
- if (obj) initialized_.Add(alias);
-}
-
-bool VirtualState::UpdateFrom(VirtualState* from, Zone* zone) {
- if (from == this) return false;
- bool changed = false;
- for (Alias alias = 0; alias < size(); ++alias) {
- VirtualObject* ls = VirtualObjectFromAlias(alias);
- VirtualObject* rs = from->VirtualObjectFromAlias(alias);
-
- if (ls == rs || rs == nullptr) continue;
-
- if (ls == nullptr) {
- ls = new (zone) VirtualObject(this, *rs);
- SetVirtualObject(alias, ls);
- changed = true;
- continue;
- }
-
- TRACE(" Updating fields of @%d\n", alias);
-
- changed = ls->UpdateFrom(*rs) || changed;
- }
- return false;
-}
-
-namespace {
-
-bool IsEquivalentPhi(Node* node1, Node* node2) {
- if (node1 == node2) return true;
- if (node1->opcode() != IrOpcode::kPhi || node2->opcode() != IrOpcode::kPhi ||
- node1->op()->ValueInputCount() != node2->op()->ValueInputCount()) {
- return false;
- }
- for (int i = 0; i < node1->op()->ValueInputCount(); ++i) {
- Node* input1 = NodeProperties::GetValueInput(node1, i);
- Node* input2 = NodeProperties::GetValueInput(node2, i);
- if (!IsEquivalentPhi(input1, input2)) {
- return false;
- }
- }
- return true;
-}
+// A VariableTracker object keeps track of the values of variables at all points
+// of the effect chain and introduces new phi nodes when necessary.
+// Initially and by default, variables are mapped to nullptr, which means that
+// the variable allocation point does not dominate the current point on the
+// effect chain. We map variables that represent uninitialized memory to the
+// Dead node to ensure it is not read.
+// Unmapped values are impossible by construction, it is indistinguishable if a
+// PersistentMap does not contain an element or maps it to the default element.
+class VariableTracker {
+ private:
+ // The state of all variables at one point in the effect chain.
+ class State {
+ typedef PersistentMap<Variable, Node*> Map;
+
+ public:
+ explicit State(Zone* zone) : map_(zone) {}
+ Node* Get(Variable var) const {
+ CHECK(var != Variable::Invalid());
+ return map_.Get(var);
+ }
+ void Set(Variable var, Node* node) {
+ CHECK(var != Variable::Invalid());
+ return map_.Set(var, node);
+ }
+ Map::iterator begin() const { return map_.begin(); }
+ Map::iterator end() const { return map_.end(); }
+ bool operator!=(const State& other) const { return map_ != other.map_; }
+
+ private:
+ Map map_;
+ };
-} // namespace
+ public:
+ VariableTracker(JSGraph* graph, EffectGraphReducer* reducer, Zone* zone);
+ Variable NewVariable() { return Variable(next_variable_++); }
+ Node* Get(Variable var, Node* effect) { return table_.Get(effect).Get(var); }
+ Zone* zone() { return zone_; }
+
+ class Scope : public ReduceScope {
+ public:
+ Scope(VariableTracker* tracker, Node* node, Reduction* reduction);
+ ~Scope();
+ Maybe<Node*> Get(Variable var) {
+ Node* node = current_state_.Get(var);
+ if (node && node->opcode() == IrOpcode::kDead) {
+ // TODO(tebbi): We use {Dead} as a sentinel for uninitialized memory.
+ // Reading uninitialized memory can only happen in unreachable code. In
+ // this case, we have to mark the object as escaping to avoid dead nodes
+ // in the graph. This is a workaround that should be removed once we can
+ // handle dead nodes everywhere.
+ return Nothing<Node*>();
+ }
+ return Just(node);
+ }
+ void Set(Variable var, Node* node) { current_state_.Set(var, node); }
+
+ private:
+ VariableTracker* states_;
+ State current_state_;
+ };
-bool VirtualObject::MergeFields(size_t i, Node* at, MergeCache* cache,
- Graph* graph, CommonOperatorBuilder* common) {
- bool changed = false;
- int value_input_count = static_cast<int>(cache->fields().size());
- Node* rep = GetField(i);
- if (!rep || !IsCreatedPhi(i)) {
- for (Node* input : cache->fields()) {
- CHECK_NOT_NULL(input);
- CHECK(!input->IsDead());
- }
- Node* control = NodeProperties::GetControlInput(at);
- cache->fields().push_back(control);
- Node* phi = graph->NewNode(
- common->Phi(MachineRepresentation::kTagged, value_input_count),
- value_input_count + 1, &cache->fields().front());
- NodeProperties::SetType(phi, Type::Any());
- SetField(i, phi, true);
+ private:
+ State MergeInputs(Node* effect_phi);
+ Zone* zone_;
+ JSGraph* graph_;
+ SparseSidetable<State> table_;
+ ZoneVector<Node*> buffer_;
+ EffectGraphReducer* reducer_;
+ int next_variable_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(VariableTracker);
+};
-#ifdef DEBUG
- if (FLAG_trace_turbo_escape) {
- PrintF(" Creating Phi #%d as merge of", phi->id());
- for (int i = 0; i < value_input_count; i++) {
- PrintF(" #%d (%s)", cache->fields()[i]->id(),
- cache->fields()[i]->op()->mnemonic());
- }
- PrintF("\n");
- }
-#endif
- changed = true;
- } else {
- DCHECK(rep->opcode() == IrOpcode::kPhi);
- for (int n = 0; n < value_input_count; ++n) {
- Node* old = NodeProperties::GetValueInput(rep, n);
- if (old != cache->fields()[n]) {
- changed = true;
- NodeProperties::ReplaceValueInput(rep, cache->fields()[n], n);
+// Encapsulates the current state of the escape analysis reducer to preserve
+// invariants regarding changes and re-visitation.
+class EscapeAnalysisTracker : public ZoneObject {
+ public:
+ EscapeAnalysisTracker(JSGraph* jsgraph, EffectGraphReducer* reducer,
+ Zone* zone)
+ : virtual_objects_(zone),
+ replacements_(zone),
+ variable_states_(jsgraph, reducer, zone),
+ jsgraph_(jsgraph),
+ zone_(zone) {}
+
+ class Scope : public VariableTracker::Scope {
+ public:
+ Scope(EffectGraphReducer* reducer, EscapeAnalysisTracker* tracker,
+ Node* node, Reduction* reduction)
+ : VariableTracker::Scope(&tracker->variable_states_, node, reduction),
+ tracker_(tracker),
+ reducer_(reducer) {}
+ const VirtualObject* GetVirtualObject(Node* node) {
+ VirtualObject* vobject = tracker_->virtual_objects_.Get(node);
+ if (vobject) vobject->AddDependency(current_node());
+ return vobject;
+ }
+ // Create or retrieve a virtual object for the current node.
+ const VirtualObject* InitVirtualObject(int size) {
+ DCHECK_EQ(IrOpcode::kAllocate, current_node()->opcode());
+ VirtualObject* vobject = tracker_->virtual_objects_.Get(current_node());
+ if (vobject) {
+ CHECK(vobject->size() == size);
+ } else {
+ vobject = tracker_->NewVirtualObject(size);
}
+ if (vobject) vobject->AddDependency(current_node());
+ vobject_ = vobject;
+ return vobject;
}
- }
- return changed;
-}
-bool VirtualObject::MergeFrom(MergeCache* cache, Node* at, Graph* graph,
- CommonOperatorBuilder* common,
- bool initialMerge) {
- DCHECK(at->opcode() == IrOpcode::kEffectPhi ||
- at->opcode() == IrOpcode::kPhi);
- bool changed = false;
- for (size_t i = 0; i < field_count(); ++i) {
- if (!initialMerge && GetField(i) == nullptr) continue;
- Node* field = cache->GetFields(i);
- if (field && !IsCreatedPhi(i)) {
- changed = changed || GetField(i) != field;
- SetField(i, field);
- TRACE(" Field %zu agree on rep #%d\n", i, field->id());
- } else {
- size_t arity = at->opcode() == IrOpcode::kEffectPhi
- ? at->op()->EffectInputCount()
- : at->op()->ValueInputCount();
- if (cache->fields().size() == arity) {
- changed = MergeFields(i, at, cache, graph, common) || changed;
- } else {
- if (GetField(i) != nullptr) {
- TRACE(" Field %zu cleared\n", i);
- changed = true;
- }
- SetField(i, nullptr);
- }
+ void SetVirtualObject(Node* object) {
+ vobject_ = tracker_->virtual_objects_.Get(object);
}
- }
- return changed;
-}
-bool VirtualState::MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
- CommonOperatorBuilder* common, Node* at) {
- DCHECK_GT(cache->states().size(), 0u);
- bool changed = false;
- for (Alias alias = 0; alias < size(); ++alias) {
- cache->objects().clear();
- VirtualObject* mergeObject = VirtualObjectFromAlias(alias);
- bool copy_merge_object = false;
- size_t fields = std::numeric_limits<size_t>::max();
- for (VirtualState* state : cache->states()) {
- if (VirtualObject* obj = state->VirtualObjectFromAlias(alias)) {
- cache->objects().push_back(obj);
- if (mergeObject == obj) {
- copy_merge_object = true;
- }
- fields = std::min(obj->field_count(), fields);
+ void SetEscaped(Node* node) {
+ if (VirtualObject* object = tracker_->virtual_objects_.Get(node)) {
+ if (object->HasEscaped()) return;
+ TRACE("Setting %s#%d to escaped because of use by %s#%d\n",
+ node->op()->mnemonic(), node->id(),
+ current_node()->op()->mnemonic(), current_node()->id());
+ object->SetEscaped();
+ object->RevisitDependants(reducer_);
}
}
- if (cache->objects().size() == cache->states().size() &&
- (mergeObject || !initialized_.Contains(alias))) {
- bool initialMerge = false;
- if (!mergeObject) {
- initialMerge = true;
- VirtualObject* obj = new (zone)
- VirtualObject(cache->objects().front()->id(), this, zone, fields,
- cache->objects().front()->IsInitialized());
- SetVirtualObject(alias, obj);
- mergeObject = obj;
- changed = true;
- } else if (copy_merge_object) {
- VirtualObject* obj = new (zone) VirtualObject(this, *mergeObject);
- SetVirtualObject(alias, obj);
- mergeObject = obj;
- changed = true;
- } else {
- changed = mergeObject->ResizeFields(fields) || changed;
- }
-#ifdef DEBUG
- if (FLAG_trace_turbo_escape) {
- PrintF(" Alias @%d, merging into %p virtual objects", alias,
- static_cast<void*>(mergeObject));
- for (size_t i = 0; i < cache->objects().size(); i++) {
- PrintF(" %p", static_cast<void*>(cache->objects()[i]));
- }
- PrintF("\n");
- }
-#endif // DEBUG
- changed =
- mergeObject->MergeFrom(cache, at, graph, common, initialMerge) ||
- changed;
- } else {
- if (mergeObject) {
- TRACE(" Alias %d, virtual object removed\n", alias);
- changed = true;
- }
- SetVirtualObject(alias, nullptr);
+ // The inputs of the current node have to be accessed through the scope to
+ // ensure that they respect the node replacements.
+ Node* ValueInput(int i) {
+ return tracker_->ResolveReplacement(
+ NodeProperties::GetValueInput(current_node(), i));
}
- }
- return changed;
-}
-
-} // namespace impl
-using namespace impl;
-
-EscapeStatusAnalysis::EscapeStatusAnalysis(EscapeAnalysis* object_analysis,
- Graph* graph, Zone* zone)
- : stack_(zone),
- object_analysis_(object_analysis),
- graph_(graph),
- status_(zone),
- next_free_alias_(0),
- status_stack_(zone),
- aliases_(zone) {}
-
-bool EscapeStatusAnalysis::HasEntry(Node* node) {
- return status_[node->id()] & (kTracked | kEscaped);
-}
-
-bool EscapeStatusAnalysis::IsVirtual(Node* node) {
- return IsVirtual(node->id());
-}
-
-bool EscapeStatusAnalysis::IsVirtual(NodeId id) {
- return (status_[id] & kTracked) && !(status_[id] & kEscaped);
-}
-
-bool EscapeStatusAnalysis::IsEscaped(Node* node) {
- return status_[node->id()] & kEscaped;
-}
-
-bool EscapeStatusAnalysis::IsAllocation(Node* node) {
- return node->opcode() == IrOpcode::kAllocate ||
- node->opcode() == IrOpcode::kFinishRegion;
-}
-
-bool EscapeStatusAnalysis::SetEscaped(Node* node) {
- bool changed = !(status_[node->id()] & kEscaped);
- status_[node->id()] |= kEscaped | kTracked;
- return changed;
-}
-
-bool EscapeStatusAnalysis::IsInQueue(NodeId id) {
- return status_[id] & kInQueue;
-}
-
-void EscapeStatusAnalysis::SetInQueue(NodeId id, bool on_stack) {
- if (on_stack) {
- status_[id] |= kInQueue;
- } else {
- status_[id] &= ~kInQueue;
- }
-}
-
-void EscapeStatusAnalysis::ResizeStatusVector() {
- if (status_.size() <= graph()->NodeCount()) {
- status_.resize(graph()->NodeCount() * 1.1, kUnknown);
- }
-}
-
-size_t EscapeStatusAnalysis::GetStatusVectorSize() { return status_.size(); }
-
-void EscapeStatusAnalysis::RunStatusAnalysis() {
- // TODO(tebbi): This checks for faulty VirtualObject states, which can happen
- // due to bug https://bugs.chromium.org/p/v8/issues/detail?id=6302. As a
- // workaround, we set everything to escaped if such a faulty state was
- // detected.
- bool all_objects_complete = object_analysis_->AllObjectsComplete();
- ResizeStatusVector();
- while (!status_stack_.empty()) {
- Node* node = status_stack_.back();
- status_stack_.pop_back();
- status_[node->id()] &= ~kOnStack;
- Process(node);
- status_[node->id()] |= kVisited;
- if (!all_objects_complete) SetEscaped(node);
- }
-}
-
-void EscapeStatusAnalysis::EnqueueForStatusAnalysis(Node* node) {
- DCHECK_NOT_NULL(node);
- if (!(status_[node->id()] & kOnStack)) {
- status_stack_.push_back(node);
- status_[node->id()] |= kOnStack;
- }
-}
-
-void EscapeStatusAnalysis::RevisitInputs(Node* node) {
- for (Edge edge : node->input_edges()) {
- Node* input = edge.to();
- if (!(status_[input->id()] & kOnStack)) {
- status_stack_.push_back(input);
- status_[input->id()] |= kOnStack;
+ Node* ContextInput() {
+ return tracker_->ResolveReplacement(
+ NodeProperties::GetContextInput(current_node()));
}
- }
-}
-void EscapeStatusAnalysis::RevisitUses(Node* node) {
- for (Edge edge : node->use_edges()) {
- Node* use = edge.from();
- if (!(status_[use->id()] & kOnStack) && !IsNotReachable(use)) {
- status_stack_.push_back(use);
- status_[use->id()] |= kOnStack;
+ void SetReplacement(Node* replacement) {
+ replacement_ = replacement;
+ vobject_ =
+ replacement ? tracker_->virtual_objects_.Get(replacement) : nullptr;
+ TRACE("Set %s#%d as replacement.\n", replacement->op()->mnemonic(),
+ replacement->id());
}
- }
-}
-void EscapeStatusAnalysis::Process(Node* node) {
- switch (node->opcode()) {
- case IrOpcode::kAllocate:
- ProcessAllocate(node);
- break;
- case IrOpcode::kFinishRegion:
- ProcessFinishRegion(node);
- break;
- case IrOpcode::kStoreField:
- ProcessStoreField(node);
- break;
- case IrOpcode::kStoreElement:
- ProcessStoreElement(node);
- break;
- case IrOpcode::kLoadField:
- case IrOpcode::kLoadElement: {
- if (Node* rep = object_analysis_->GetReplacement(node)) {
- if (IsAllocation(rep) && CheckUsesForEscape(node, rep)) {
- RevisitInputs(rep);
- RevisitUses(rep);
- }
- } else {
- Node* from = NodeProperties::GetValueInput(node, 0);
- from = object_analysis_->ResolveReplacement(from);
- if (SetEscaped(from)) {
- TRACE("Setting #%d (%s) to escaped because of unresolved load #%i\n",
- from->id(), from->op()->mnemonic(), node->id());
- RevisitInputs(from);
- RevisitUses(from);
- }
+ void MarkForDeletion() { SetReplacement(tracker_->jsgraph_->Dead()); }
+
+ ~Scope() {
+ if (replacement_ != tracker_->replacements_[current_node()] ||
+ vobject_ != tracker_->virtual_objects_.Get(current_node())) {
+ reduction()->set_value_changed();
}
- RevisitUses(node);
- break;
+ tracker_->replacements_[current_node()] = replacement_;
+ tracker_->virtual_objects_.Set(current_node(), vobject_);
}
- case IrOpcode::kPhi:
- if (!HasEntry(node)) {
- status_[node->id()] |= kTracked;
- RevisitUses(node);
- }
- if (!IsAllocationPhi(node) && SetEscaped(node)) {
- RevisitInputs(node);
- RevisitUses(node);
- }
- CheckUsesForEscape(node);
- default:
- break;
- }
-}
-
-bool EscapeStatusAnalysis::IsAllocationPhi(Node* node) {
- for (Edge edge : node->input_edges()) {
- Node* input = edge.to();
- if (input->opcode() == IrOpcode::kPhi && !IsEscaped(input)) continue;
- if (IsAllocation(input)) continue;
- return false;
- }
- return true;
-}
-
-void EscapeStatusAnalysis::ProcessStoreField(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kStoreField);
- Node* to = NodeProperties::GetValueInput(node, 0);
- Node* val = NodeProperties::GetValueInput(node, 1);
- if ((IsEscaped(to) || !IsAllocation(to)) && SetEscaped(val)) {
- RevisitUses(val);
- RevisitInputs(val);
- TRACE("Setting #%d (%s) to escaped because of store to field of #%d\n",
- val->id(), val->op()->mnemonic(), to->id());
- }
-}
-
-void EscapeStatusAnalysis::ProcessStoreElement(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kStoreElement);
- Node* to = NodeProperties::GetValueInput(node, 0);
- Node* val = NodeProperties::GetValueInput(node, 2);
- if ((IsEscaped(to) || !IsAllocation(to)) && SetEscaped(val)) {
- RevisitUses(val);
- RevisitInputs(val);
- TRACE("Setting #%d (%s) to escaped because of store to field of #%d\n",
- val->id(), val->op()->mnemonic(), to->id());
- }
-}
-void EscapeStatusAnalysis::ProcessAllocate(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
- if (!HasEntry(node)) {
- status_[node->id()] |= kTracked;
- TRACE("Created status entry for node #%d (%s)\n", node->id(),
- node->op()->mnemonic());
- NumberMatcher size(node->InputAt(0));
- DCHECK(node->InputAt(0)->opcode() != IrOpcode::kInt32Constant &&
- node->InputAt(0)->opcode() != IrOpcode::kInt64Constant &&
- node->InputAt(0)->opcode() != IrOpcode::kFloat32Constant &&
- node->InputAt(0)->opcode() != IrOpcode::kFloat64Constant);
- RevisitUses(node);
- if (!size.HasValue() && SetEscaped(node)) {
- TRACE("Setting #%d to escaped because of non-const alloc\n", node->id());
- // This node is already known to escape, uses do not have to be checked
- // for escape.
- return;
- }
- }
- if (CheckUsesForEscape(node, true)) {
- RevisitUses(node);
- }
-}
+ private:
+ EscapeAnalysisTracker* tracker_;
+ EffectGraphReducer* reducer_;
+ VirtualObject* vobject_ = nullptr;
+ Node* replacement_ = nullptr;
+ };
-bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
- bool phi_escaping) {
- for (Edge edge : uses->use_edges()) {
- Node* use = edge.from();
- if (IsNotReachable(use)) continue;
- if (edge.index() >= use->op()->ValueInputCount() +
- OperatorProperties::GetContextInputCount(use->op()))
- continue;
- switch (use->opcode()) {
- case IrOpcode::kPhi:
- if (phi_escaping && SetEscaped(rep)) {
- TRACE(
- "Setting #%d (%s) to escaped because of use by phi node "
- "#%d (%s)\n",
- rep->id(), rep->op()->mnemonic(), use->id(),
- use->op()->mnemonic());
- return true;
- }
- // Fallthrough.
- case IrOpcode::kStoreField:
- case IrOpcode::kLoadField:
- case IrOpcode::kStoreElement:
- case IrOpcode::kLoadElement:
- case IrOpcode::kFrameState:
- case IrOpcode::kStateValues:
- case IrOpcode::kReferenceEqual:
- case IrOpcode::kFinishRegion:
- case IrOpcode::kCheckMaps:
- if (IsEscaped(use) && SetEscaped(rep)) {
- TRACE(
- "Setting #%d (%s) to escaped because of use by escaping node "
- "#%d (%s)\n",
- rep->id(), rep->op()->mnemonic(), use->id(),
- use->op()->mnemonic());
- return true;
- }
- break;
- case IrOpcode::kObjectIsSmi:
- if (!IsAllocation(rep) && SetEscaped(rep)) {
- TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
- rep->id(), rep->op()->mnemonic(), use->id(),
- use->op()->mnemonic());
- return true;
- }
- break;
- case IrOpcode::kSelect:
- // TODO(mstarzinger): The following list of operators will eventually be
- // handled by the EscapeAnalysisReducer (similar to ObjectIsSmi).
- case IrOpcode::kConvertTaggedHoleToUndefined:
- case IrOpcode::kStringEqual:
- case IrOpcode::kStringLessThan:
- case IrOpcode::kStringLessThanOrEqual:
- case IrOpcode::kTypeGuard:
- case IrOpcode::kPlainPrimitiveToNumber:
- case IrOpcode::kPlainPrimitiveToWord32:
- case IrOpcode::kPlainPrimitiveToFloat64:
- case IrOpcode::kStringCharAt:
- case IrOpcode::kStringCharCodeAt:
- case IrOpcode::kSeqStringCharCodeAt:
- case IrOpcode::kStringIndexOf:
- case IrOpcode::kStringToLowerCaseIntl:
- case IrOpcode::kStringToUpperCaseIntl:
- case IrOpcode::kObjectIsCallable:
- case IrOpcode::kObjectIsDetectableCallable:
- case IrOpcode::kObjectIsNaN:
- case IrOpcode::kObjectIsNonCallable:
- case IrOpcode::kObjectIsNumber:
- case IrOpcode::kObjectIsReceiver:
- case IrOpcode::kObjectIsString:
- case IrOpcode::kObjectIsSymbol:
- case IrOpcode::kObjectIsUndetectable:
- case IrOpcode::kNumberLessThan:
- case IrOpcode::kNumberLessThanOrEqual:
- case IrOpcode::kNumberEqual:
-#define CASE(opcode) case IrOpcode::k##opcode:
- SIMPLIFIED_NUMBER_BINOP_LIST(CASE)
- SIMPLIFIED_NUMBER_UNOP_LIST(CASE)
-#undef CASE
- if (SetEscaped(rep)) {
- TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
- rep->id(), rep->op()->mnemonic(), use->id(),
- use->op()->mnemonic());
- return true;
- }
- break;
- default:
- DCHECK(use->op()->EffectInputCount() > 0 ||
- uses->op()->EffectInputCount() == 0 ||
- IrOpcode::IsJsOpcode(use->opcode()));
- if (SetEscaped(rep)) {
- TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
- rep->id(), rep->op()->mnemonic(), use->id(),
- use->op()->mnemonic());
- return true;
- }
+ Node* GetReplacementOf(Node* node) { return replacements_[node]; }
+ Node* ResolveReplacement(Node* node) {
+ if (Node* replacement = GetReplacementOf(node)) {
+ // Replacements cannot have replacements. This is important to ensure
+ // re-visitation: If a replacement is replaced, then all nodes accessing
+ // the replacement have to be updated.
+ DCHECK_NULL(GetReplacementOf(replacement));
+ return replacement;
}
+ return node;
}
- return false;
-}
-void EscapeStatusAnalysis::ProcessFinishRegion(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
- if (!HasEntry(node)) {
- status_[node->id()] |= kTracked;
- RevisitUses(node);
- }
- if (CheckUsesForEscape(node, true)) {
- RevisitInputs(node);
- RevisitUses(node);
- }
-}
+ private:
+ friend class EscapeAnalysisResult;
+ static const size_t kMaxTrackedObjects = 100;
-void EscapeStatusAnalysis::DebugPrint() {
- for (NodeId id = 0; id < status_.size(); id++) {
- if (status_[id] & kTracked) {
- PrintF("Node #%d is %s\n", id,
- (status_[id] & kEscaped) ? "escaping" : "virtual");
- }
+ VirtualObject* NewVirtualObject(int size) {
+ if (next_object_id_ >= kMaxTrackedObjects) return nullptr;
+ return new (zone_)
+ VirtualObject(&variable_states_, next_object_id_++, size);
}
-}
-
-EscapeAnalysis::EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common,
- Zone* zone)
- : zone_(zone),
- slot_not_analyzed_(graph->NewNode(common->NumberConstant(0x1c0debad))),
- common_(common),
- status_analysis_(new (zone) EscapeStatusAnalysis(this, graph, zone)),
- virtual_states_(zone),
- replacements_(zone),
- cycle_detection_(zone),
- cache_(nullptr) {
- // Type slot_not_analyzed_ manually.
- double v = OpParameter<double>(slot_not_analyzed_);
- NodeProperties::SetType(slot_not_analyzed_, Type::Range(v, v, zone));
-}
-EscapeAnalysis::~EscapeAnalysis() {}
+ SparseSidetable<VirtualObject*> virtual_objects_;
+ Sidetable<Node*> replacements_;
+ VariableTracker variable_states_;
+ VirtualObject::Id next_object_id_ = 0;
+ JSGraph* const jsgraph_;
+ Zone* const zone_;
-bool EscapeAnalysis::Run() {
- replacements_.resize(graph()->NodeCount());
- status_analysis_->AssignAliases();
- if (status_analysis_->AliasCount() > 0) {
- cache_ = new (zone()) MergeCache(zone());
- replacements_.resize(graph()->NodeCount());
- status_analysis_->ResizeStatusVector();
- RunObjectAnalysis();
- status_analysis_->RunStatusAnalysis();
- return true;
- } else {
- return false;
- }
-}
+ DISALLOW_COPY_AND_ASSIGN(EscapeAnalysisTracker);
+};
-void EscapeStatusAnalysis::AssignAliases() {
- size_t max_size = 1024;
- size_t min_size = 32;
- size_t stack_size =
- std::min(std::max(graph()->NodeCount() / 5, min_size), max_size);
- stack_.reserve(stack_size);
- ResizeStatusVector();
- stack_.push_back(graph()->end());
- CHECK_LT(graph()->NodeCount(), kUntrackable);
- aliases_.resize(graph()->NodeCount(), kNotReachable);
- aliases_[graph()->end()->id()] = kUntrackable;
- status_stack_.reserve(8);
- TRACE("Discovering trackable nodes");
+EffectGraphReducer::EffectGraphReducer(
+ Graph* graph, std::function<void(Node*, Reduction*)> reduce, Zone* zone)
+ : graph_(graph),
+ state_(graph, kNumStates),
+ revisit_(zone),
+ stack_(zone),
+ reduce_(reduce) {}
+
+void EffectGraphReducer::ReduceFrom(Node* node) {
+ // Perform DFS and eagerly trigger revisitation as soon as possible.
+ // A stack element {node, i} indicates that input i of node should be visited
+ // next.
+ DCHECK(stack_.empty());
+ stack_.push({node, 0});
while (!stack_.empty()) {
- Node* node = stack_.back();
- stack_.pop_back();
- switch (node->opcode()) {
- case IrOpcode::kAllocate:
- if (aliases_[node->id()] >= kUntrackable) {
- aliases_[node->id()] = NextAlias();
- TRACE(" @%d:%s#%u", aliases_[node->id()], node->op()->mnemonic(),
- node->id());
- EnqueueForStatusAnalysis(node);
+ Node* current = stack_.top().node;
+ int& input_index = stack_.top().input_index;
+ if (input_index < current->InputCount()) {
+ Node* input = current->InputAt(input_index);
+ input_index++;
+ switch (state_.Get(input)) {
+ case State::kVisited:
+ // The input is already reduced.
+ break;
+ case State::kOnStack:
+ // The input is on the DFS stack right now, so it will be revisited
+ // later anyway.
+ break;
+ case State::kUnvisited:
+ case State::kRevisit: {
+ state_.Set(input, State::kOnStack);
+ stack_.push({input, 0});
+ break;
}
- break;
- case IrOpcode::kFinishRegion: {
- Node* allocate = NodeProperties::GetValueInput(node, 0);
- DCHECK_NOT_NULL(allocate);
- if (allocate->opcode() == IrOpcode::kAllocate) {
- if (aliases_[allocate->id()] >= kUntrackable) {
- if (aliases_[allocate->id()] == kNotReachable) {
- stack_.push_back(allocate);
- }
- aliases_[allocate->id()] = NextAlias();
- TRACE(" @%d:%s#%u", aliases_[allocate->id()],
- allocate->op()->mnemonic(), allocate->id());
- EnqueueForStatusAnalysis(allocate);
- }
- aliases_[node->id()] = aliases_[allocate->id()];
- TRACE(" @%d:%s#%u", aliases_[node->id()], node->op()->mnemonic(),
- node->id());
+ }
+ } else {
+ stack_.pop();
+ Reduction reduction;
+ reduce_(current, &reduction);
+ for (Edge edge : current->use_edges()) {
+ // Mark uses for revisitation.
+ Node* use = edge.from();
+ if (NodeProperties::IsEffectEdge(edge)) {
+ if (reduction.effect_changed()) Revisit(use);
+ } else {
+ if (reduction.value_changed()) Revisit(use);
}
- break;
}
- default:
- DCHECK_EQ(aliases_[node->id()], kUntrackable);
- break;
- }
- for (Edge edge : node->input_edges()) {
- Node* input = edge.to();
- if (aliases_[input->id()] == kNotReachable) {
- stack_.push_back(input);
- aliases_[input->id()] = kUntrackable;
+ state_.Set(current, State::kVisited);
+ // Process the revisitation buffer immediately. This improves performance
+ // of escape analysis. Using a stack for {revisit_} reverses the order in
+ // which the revisitation happens. This also seems to improve performance.
+ while (!revisit_.empty()) {
+ Node* revisit = revisit_.top();
+ if (state_.Get(revisit) == State::kRevisit) {
+ state_.Set(revisit, State::kOnStack);
+ stack_.push({revisit, 0});
+ }
+ revisit_.pop();
}
}
}
- TRACE("\n");
}
-bool EscapeStatusAnalysis::IsNotReachable(Node* node) {
- if (node->id() >= aliases_.size()) {
- return false;
+void EffectGraphReducer::Revisit(Node* node) {
+ if (state_.Get(node) == State::kVisited) {
+ TRACE(" Queueing for revisit: %s#%d\n", node->op()->mnemonic(),
+ node->id());
+ state_.Set(node, State::kRevisit);
+ revisit_.push(node);
}
- return aliases_[node->id()] == kNotReachable;
}
-bool EscapeAnalysis::AllObjectsComplete() {
- for (VirtualState* state : virtual_states_) {
- if (state) {
- for (size_t i = 0; i < state->size(); ++i) {
- if (VirtualObject* object = state->VirtualObjectFromAlias(i)) {
- if (!object->AllFieldsClear()) {
- for (size_t i = 0; i < object->field_count(); ++i) {
- if (object->GetField(i) == nullptr) {
- return false;
- }
- }
- }
+VariableTracker::VariableTracker(JSGraph* graph, EffectGraphReducer* reducer,
+ Zone* zone)
+ : zone_(zone),
+ graph_(graph),
+ table_(zone, State(zone)),
+ buffer_(zone),
+ reducer_(reducer) {}
+
+VariableTracker::Scope::Scope(VariableTracker* states, Node* node,
+ Reduction* reduction)
+ : ReduceScope(node, reduction),
+ states_(states),
+ current_state_(states->zone_) {
+ switch (node->opcode()) {
+ case IrOpcode::kEffectPhi:
+ current_state_ = states_->MergeInputs(node);
+ break;
+ default:
+ int effect_inputs = node->op()->EffectInputCount();
+ if (effect_inputs == 1) {
+ current_state_ =
+ states_->table_.Get(NodeProperties::GetEffectInput(node, 0));
+ } else {
+ DCHECK_EQ(0, effect_inputs);
+ }
+ }
+}
+
+VariableTracker::Scope::~Scope() {
+ if (!reduction()->effect_changed() &&
+ states_->table_.Get(current_node()) != current_state_) {
+ reduction()->set_effect_changed();
+ }
+ states_->table_.Set(current_node(), current_state_);
+}
+
+VariableTracker::State VariableTracker::MergeInputs(Node* effect_phi) {
+ // A variable that is mapped to [nullptr] was not assigned a value on every
+ // execution path to the current effect phi. Relying on the invariant that
+ // every variable is initialized (at least with a sentinel like the Dead
+ // node), this means that the variable initialization does not dominate the
+ // current point. So for loop effect phis, we can keep nullptr for a variable
+ // as long as the first input of the loop has nullptr for this variable. For
+ // non-loop effect phis, we can even keep it nullptr as long as any input has
+ // nullptr.
+ DCHECK_EQ(IrOpcode::kEffectPhi, effect_phi->opcode());
+ int arity = effect_phi->op()->EffectInputCount();
+ Node* control = NodeProperties::GetControlInput(effect_phi, 0);
+ TRACE("control: %s#%d\n", control->op()->mnemonic(), control->id());
+ bool is_loop = control->opcode() == IrOpcode::kLoop;
+ buffer_.reserve(arity + 1);
+
+ State first_input = table_.Get(NodeProperties::GetEffectInput(effect_phi, 0));
+ State result = first_input;
+ for (std::pair<Variable, Node*> var_value : first_input) {
+ if (Node* value = var_value.second) {
+ Variable var = var_value.first;
+ TRACE("var %i:\n", var.id_);
+ buffer_.clear();
+ buffer_.push_back(value);
+ bool identical_inputs = true;
+ int num_defined_inputs = 1;
+ TRACE(" input 0: %s#%d\n", value->op()->mnemonic(), value->id());
+ for (int i = 1; i < arity; ++i) {
+ Node* next_value =
+ table_.Get(NodeProperties::GetEffectInput(effect_phi, i)).Get(var);
+ if (next_value != value) identical_inputs = false;
+ if (next_value != nullptr) {
+ num_defined_inputs++;
+ TRACE(" input %i: %s#%d\n", i, next_value->op()->mnemonic(),
+ next_value->id());
+ } else {
+ TRACE(" input %i: nullptr\n", i);
}
+ buffer_.push_back(next_value);
}
- }
- }
- return true;
-}
-void EscapeAnalysis::RunObjectAnalysis() {
- virtual_states_.resize(graph()->NodeCount());
- ZoneDeque<Node*> queue(zone());
- queue.push_back(graph()->start());
- ZoneVector<Node*> danglers(zone());
- while (!queue.empty()) {
- Node* node = queue.back();
- queue.pop_back();
- status_analysis_->SetInQueue(node->id(), false);
- if (Process(node)) {
- for (Edge edge : node->use_edges()) {
- Node* use = edge.from();
- if (status_analysis_->IsNotReachable(use)) {
- continue;
+ Node* old_value = table_.Get(effect_phi).Get(var);
+ if (old_value) {
+ TRACE(" old: %s#%d\n", old_value->op()->mnemonic(), old_value->id());
+ } else {
+ TRACE(" old: nullptr\n");
+ }
+ // Reuse a previously created phi node if possible.
+ if (old_value && old_value->opcode() == IrOpcode::kPhi &&
+ NodeProperties::GetControlInput(old_value, 0) == control) {
+ // Since a phi node can never dominate its control node,
+ // [old_value] cannot originate from the inputs. Thus [old_value]
+ // must have been created by a previous reduction of this [effect_phi].
+ for (int i = 0; i < arity; ++i) {
+ NodeProperties::ReplaceValueInput(
+ old_value, buffer_[i] ? buffer_[i] : graph_->Dead(), i);
+ // This change cannot affect the rest of the reducer, so there is no
+ // need to trigger additional revisitations.
}
- if (NodeProperties::IsEffectEdge(edge)) {
- // Iteration order: depth first, but delay phis.
- // We need DFS do avoid some duplication of VirtualStates and
- // VirtualObjects, and we want to delay phis to improve performance.
- if (use->opcode() == IrOpcode::kEffectPhi) {
- if (!status_analysis_->IsInQueue(use->id())) {
- status_analysis_->SetInQueue(use->id(), true);
- queue.push_front(use);
- }
- } else if ((use->opcode() != IrOpcode::kLoadField &&
- use->opcode() != IrOpcode::kLoadElement) ||
- !status_analysis_->IsDanglingEffectNode(use)) {
- if (!status_analysis_->IsInQueue(use->id())) {
- status_analysis_->SetInQueue(use->id(), true);
- queue.push_back(use);
- }
+ result.Set(var, old_value);
+ } else {
+ if (num_defined_inputs == 1 && is_loop) {
+ // For loop effect phis, the variable initialization dominates iff it
+ // dominates the first input.
+ DCHECK_EQ(2, arity);
+ DCHECK_EQ(value, buffer_[0]);
+ result.Set(var, value);
+ } else if (num_defined_inputs < arity) {
+ // If the variable is undefined on some input of this non-loop effect
+ // phi, then its initialization does not dominate this point.
+ result.Set(var, nullptr);
+ } else {
+ DCHECK_EQ(num_defined_inputs, arity);
+ // We only create a phi if the values are different.
+ if (identical_inputs) {
+ result.Set(var, value);
} else {
- danglers.push_back(use);
+ TRACE("Creating new phi\n");
+ buffer_.push_back(control);
+ Node* phi = graph_->graph()->NewNode(
+ graph_->common()->Phi(MachineRepresentation::kTagged, arity),
+ arity + 1, &buffer_.front());
+ // TODO(tebbi): Computing precise types here is tricky, because of
+ // the necessary revisitations. If we really need this, we should
+ // probably do it afterwards.
+ NodeProperties::SetType(phi, Type::Any());
+ reducer_->AddRoot(phi);
+ result.Set(var, phi);
}
}
}
- // Danglers need to be processed immediately, even if they are
- // on the stack. Since they do not have effect outputs,
- // we don't have to track whether they are on the stack.
- queue.insert(queue.end(), danglers.begin(), danglers.end());
- danglers.clear();
- }
- }
-
#ifdef DEBUG
- if (FLAG_trace_turbo_escape) {
- DebugPrint();
- }
-#endif
-}
-
-bool EscapeStatusAnalysis::IsDanglingEffectNode(Node* node) {
- if (status_[node->id()] & kDanglingComputed) {
- return status_[node->id()] & kDangling;
- }
- if (node->op()->EffectInputCount() == 0 ||
- node->op()->EffectOutputCount() == 0 ||
- (node->op()->EffectInputCount() == 1 &&
- NodeProperties::GetEffectInput(node)->opcode() == IrOpcode::kStart)) {
- // The start node is used as sentinel for nodes that are in general
- // effectful, but of which an analysis has determined that they do not
- // produce effects in this instance. We don't consider these nodes dangling.
- status_[node->id()] |= kDanglingComputed;
- return false;
- }
- for (Edge edge : node->use_edges()) {
- Node* use = edge.from();
- if (aliases_[use->id()] == kNotReachable) continue;
- if (NodeProperties::IsEffectEdge(edge)) {
- status_[node->id()] |= kDanglingComputed;
- return false;
- }
- }
- status_[node->id()] |= kDanglingComputed | kDangling;
- return true;
-}
-
-bool EscapeStatusAnalysis::IsEffectBranchPoint(Node* node) {
- if (status_[node->id()] & kBranchPointComputed) {
- return status_[node->id()] & kBranchPoint;
- }
- int count = 0;
- for (Edge edge : node->use_edges()) {
- Node* use = edge.from();
- if (aliases_[use->id()] == kNotReachable) continue;
- if (NodeProperties::IsEffectEdge(edge)) {
- if ((use->opcode() == IrOpcode::kLoadField ||
- use->opcode() == IrOpcode::kLoadElement ||
- use->opcode() == IrOpcode::kLoad) &&
- IsDanglingEffectNode(use))
- continue;
- if (++count > 1) {
- status_[node->id()] |= kBranchPointComputed | kBranchPoint;
- return true;
+ if (Node* result_node = result.Get(var)) {
+ TRACE(" result: %s#%d\n", result_node->op()->mnemonic(),
+ result_node->id());
+ } else {
+ TRACE(" result: nullptr\n");
}
+#endif
}
}
- status_[node->id()] |= kBranchPointComputed;
- return false;
+ return result;
}
namespace {
-bool HasFrameStateInput(const Operator* op) {
- if (op->opcode() == IrOpcode::kCall ||
- op->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
- op->opcode() == IrOpcode::kTailCall) {
- const CallDescriptor* d = CallDescriptorOf(op);
- return d->NeedsFrameState();
- } else {
- return OperatorProperties::HasFrameStateInput(op);
- }
-}
-
-} // namespace
-
-bool EscapeAnalysis::Process(Node* node) {
- switch (node->opcode()) {
- case IrOpcode::kAllocate:
- ProcessAllocation(node);
- break;
- case IrOpcode::kBeginRegion:
- ForwardVirtualState(node);
+int OffsetOfFieldAccess(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kLoadField ||
+ op->opcode() == IrOpcode::kStoreField);
+ FieldAccess access = FieldAccessOf(op);
+ return access.offset;
+}
+
+Maybe<int> OffsetOfElementsAccess(const Operator* op, Node* index_node) {
+ DCHECK(op->opcode() == IrOpcode::kLoadElement ||
+ op->opcode() == IrOpcode::kStoreElement);
+ Type* index_type = NodeProperties::GetType(index_node);
+ if (!index_type->Is(Type::Number())) return Nothing<int>();
+ double max = index_type->Max();
+ double min = index_type->Min();
+ int index = static_cast<int>(min);
+ if (!(index == min && index == max)) return Nothing<int>();
+ ElementAccess access = ElementAccessOf(op);
+ DCHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
+ kPointerSizeLog2);
+ return Just(access.header_size + (index << ElementSizeLog2Of(
+ access.machine_type.representation())));
+}
+
+Node* LowerCompareMapsWithoutLoad(Node* checked_map,
+ ZoneHandleSet<Map> const& checked_against,
+ JSGraph* jsgraph) {
+ Node* true_node = jsgraph->TrueConstant();
+ Node* false_node = jsgraph->FalseConstant();
+ Node* replacement = false_node;
+ for (Handle<Map> map : checked_against) {
+ Node* map_node = jsgraph->HeapConstant(map);
+ // We cannot create a HeapConstant type here as we are off-thread.
+ NodeProperties::SetType(map_node, Type::Internal());
+ Node* comparison = jsgraph->graph()->NewNode(
+ jsgraph->simplified()->ReferenceEqual(), checked_map, map_node);
+ NodeProperties::SetType(comparison, Type::Boolean());
+ if (replacement == false_node) {
+ replacement = comparison;
+ } else {
+ replacement = jsgraph->graph()->NewNode(
+ jsgraph->common()->Select(MachineRepresentation::kTaggedPointer),
+ comparison, true_node, replacement);
+ NodeProperties::SetType(replacement, Type::Boolean());
+ }
+ }
+ return replacement;
+}
+
+void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
+ JSGraph* jsgraph) {
+ switch (op->opcode()) {
+ case IrOpcode::kAllocate: {
+ NumberMatcher size(current->ValueInput(0));
+ if (!size.HasValue()) break;
+ int size_int = static_cast<int>(size.Value());
+ if (size_int != size.Value()) break;
+ if (const VirtualObject* vobject = current->InitVirtualObject(size_int)) {
+ // Initialize with dead nodes as a sentinel for uninitialized memory.
+ for (Variable field : *vobject) {
+ current->Set(field, jsgraph->Dead());
+ }
+ }
break;
+ }
case IrOpcode::kFinishRegion:
- ProcessFinishRegion(node);
- break;
- case IrOpcode::kStoreField:
- ProcessStoreField(node);
- break;
- case IrOpcode::kLoadField:
- ProcessLoadField(node);
- break;
- case IrOpcode::kStoreElement:
- ProcessStoreElement(node);
- break;
- case IrOpcode::kLoadElement:
- ProcessLoadElement(node);
- break;
- case IrOpcode::kCheckMaps:
- ProcessCheckMaps(node);
+ current->SetVirtualObject(current->ValueInput(0));
break;
- case IrOpcode::kStart:
- ProcessStart(node);
- break;
- case IrOpcode::kEffectPhi:
- return ProcessEffectPhi(node);
- break;
- default:
- if (node->op()->EffectInputCount() > 0) {
- ForwardVirtualState(node);
+ case IrOpcode::kStoreField: {
+ Node* object = current->ValueInput(0);
+ Node* value = current->ValueInput(1);
+ const VirtualObject* vobject = current->GetVirtualObject(object);
+ Variable var;
+ if (vobject && !vobject->HasEscaped() &&
+ vobject->FieldAt(OffsetOfFieldAccess(op)).To(&var)) {
+ current->Set(var, value);
+ current->MarkForDeletion();
+ } else {
+ current->SetEscaped(object);
+ current->SetEscaped(value);
}
- ProcessAllocationUsers(node);
break;
- }
- if (HasFrameStateInput(node->op())) {
- virtual_states_[node->id()]->SetCopyRequired();
- }
- return true;
-}
-
-void EscapeAnalysis::ProcessAllocationUsers(Node* node) {
- for (Edge edge : node->input_edges()) {
- Node* input = edge.to();
- Node* use = edge.from();
- if (edge.index() >= use->op()->ValueInputCount() +
- OperatorProperties::GetContextInputCount(use->op()))
- continue;
- switch (node->opcode()) {
- case IrOpcode::kStoreField:
- case IrOpcode::kLoadField:
- case IrOpcode::kStoreElement:
- case IrOpcode::kLoadElement:
- case IrOpcode::kFrameState:
- case IrOpcode::kStateValues:
- case IrOpcode::kReferenceEqual:
- case IrOpcode::kFinishRegion:
- case IrOpcode::kObjectIsSmi:
- break;
- case IrOpcode::kCheckMaps: {
- CheckMapsParameters params = CheckMapsParametersOf(node->op());
- if (params.flags() == CheckMapsFlag::kNone) break;
- } // Fallthrough.
- default:
- VirtualState* state = virtual_states_[node->id()];
- if (VirtualObject* obj =
- GetVirtualObject(state, ResolveReplacement(input))) {
- if (!obj->AllFieldsClear()) {
- obj = CopyForModificationAt(obj, state, node);
- obj->ClearAllFields();
- TRACE("Cleared all fields of @%d:#%d\n",
- status_analysis_->GetAlias(obj->id()), obj->id());
- }
- }
- break;
}
- }
-}
-
-VirtualState* EscapeAnalysis::CopyForModificationAt(VirtualState* state,
- Node* node) {
- if (state->owner() != node) {
- VirtualState* new_state = new (zone()) VirtualState(node, *state);
- virtual_states_[node->id()] = new_state;
- TRACE("Copying virtual state %p to new state %p at node %s#%d\n",
- static_cast<void*>(state), static_cast<void*>(new_state),
- node->op()->mnemonic(), node->id());
- return new_state;
- }
- return state;
-}
-
-VirtualObject* EscapeAnalysis::CopyForModificationAt(VirtualObject* obj,
- VirtualState* state,
- Node* node) {
- if (obj->NeedCopyForModification()) {
- state = CopyForModificationAt(state, node);
- // TODO(tebbi): this copies the complete virtual state. Replace with a more
- // precise analysis of which objects are actually affected by the change.
- Alias changed_alias = status_analysis_->GetAlias(obj->id());
- for (Alias alias = 0; alias < state->size(); ++alias) {
- if (VirtualObject* next_obj = state->VirtualObjectFromAlias(alias)) {
- if (alias != changed_alias && next_obj->NeedCopyForModification()) {
- state->Copy(next_obj, alias);
- }
+ case IrOpcode::kStoreElement: {
+ Node* object = current->ValueInput(0);
+ Node* index = current->ValueInput(1);
+ Node* value = current->ValueInput(2);
+ const VirtualObject* vobject = current->GetVirtualObject(object);
+ int offset;
+ Variable var;
+ if (vobject && !vobject->HasEscaped() &&
+ OffsetOfElementsAccess(op, index).To(&offset) &&
+ vobject->FieldAt(offset).To(&var)) {
+ current->Set(var, value);
+ current->MarkForDeletion();
+ } else {
+ current->SetEscaped(value);
+ current->SetEscaped(object);
}
+ break;
}
- return state->Copy(obj, changed_alias);
- }
- return obj;
-}
-
-void EscapeAnalysis::ForwardVirtualState(Node* node) {
- DCHECK_EQ(node->op()->EffectInputCount(), 1);
-#ifdef DEBUG
- if (node->opcode() != IrOpcode::kLoadField &&
- node->opcode() != IrOpcode::kLoadElement &&
- node->opcode() != IrOpcode::kLoad &&
- status_analysis_->IsDanglingEffectNode(node)) {
- PrintF("Dangeling effect node: #%d (%s)\n", node->id(),
- node->op()->mnemonic());
- UNREACHABLE();
- }
-#endif // DEBUG
- Node* effect = NodeProperties::GetEffectInput(node);
- DCHECK_NOT_NULL(virtual_states_[effect->id()]);
- if (virtual_states_[node->id()]) {
- TRACE("Updating virtual state %p at %s#%d from virtual state %p at %s#%d\n",
- static_cast<void*>(virtual_states_[node->id()]),
- node->op()->mnemonic(), node->id(),
- static_cast<void*>(virtual_states_[effect->id()]),
- effect->op()->mnemonic(), effect->id());
- virtual_states_[node->id()]->UpdateFrom(virtual_states_[effect->id()],
- zone());
- } else {
- virtual_states_[node->id()] = virtual_states_[effect->id()];
- TRACE("Forwarding object state %p from %s#%d to %s#%d",
- static_cast<void*>(virtual_states_[effect->id()]),
- effect->op()->mnemonic(), effect->id(), node->op()->mnemonic(),
- node->id());
- if (status_analysis_->IsEffectBranchPoint(effect)) {
- virtual_states_[node->id()]->SetCopyRequired();
- TRACE(", effect input %s#%d is branch point", effect->op()->mnemonic(),
- effect->id());
- }
- TRACE("\n");
- }
-}
-
-void EscapeAnalysis::ProcessStart(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kStart);
- virtual_states_[node->id()] =
- new (zone()) VirtualState(node, zone(), status_analysis_->AliasCount());
-}
-
-bool EscapeAnalysis::ProcessEffectPhi(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi);
- bool changed = false;
-
- VirtualState* mergeState = virtual_states_[node->id()];
- if (!mergeState) {
- mergeState =
- new (zone()) VirtualState(node, zone(), status_analysis_->AliasCount());
- virtual_states_[node->id()] = mergeState;
- changed = true;
- TRACE("Effect Phi #%d got new virtual state %p.\n", node->id(),
- static_cast<void*>(mergeState));
- }
-
- cache_->Clear();
-
- TRACE("At Effect Phi #%d, merging states into %p:", node->id(),
- static_cast<void*>(mergeState));
-
- for (int i = 0; i < node->op()->EffectInputCount(); ++i) {
- Node* input = NodeProperties::GetEffectInput(node, i);
- VirtualState* state = virtual_states_[input->id()];
- if (state) {
- cache_->states().push_back(state);
- if (state == mergeState) {
- mergeState = new (zone())
- VirtualState(node, zone(), status_analysis_->AliasCount());
- virtual_states_[node->id()] = mergeState;
- changed = true;
+ case IrOpcode::kLoadField: {
+ Node* object = current->ValueInput(0);
+ const VirtualObject* vobject = current->GetVirtualObject(object);
+ Variable var;
+ Node* value;
+ if (vobject && !vobject->HasEscaped() &&
+ vobject->FieldAt(OffsetOfFieldAccess(op)).To(&var) &&
+ current->Get(var).To(&value)) {
+ current->SetReplacement(value);
+ } else {
+ current->SetEscaped(object);
}
+ break;
}
- TRACE(" %p (from %d %s)", static_cast<void*>(state), input->id(),
- input->op()->mnemonic());
- }
- TRACE("\n");
-
- if (cache_->states().size() == 0) {
- return changed;
- }
-
- changed =
- mergeState->MergeFrom(cache_, zone(), graph(), common(), node) || changed;
-
- TRACE("Merge %s the node.\n", changed ? "changed" : "did not change");
-
- if (changed) {
- status_analysis_->ResizeStatusVector();
- }
- return changed;
-}
-
-void EscapeAnalysis::ProcessAllocation(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
- ForwardVirtualState(node);
- VirtualState* state = virtual_states_[node->id()];
- Alias alias = status_analysis_->GetAlias(node->id());
-
- // Check if we have already processed this node.
- if (state->VirtualObjectFromAlias(alias)) {
- return;
- }
-
- if (state->owner()->opcode() == IrOpcode::kEffectPhi) {
- state = CopyForModificationAt(state, node);
- }
-
- NumberMatcher size(node->InputAt(0));
- DCHECK(node->InputAt(0)->opcode() != IrOpcode::kInt32Constant &&
- node->InputAt(0)->opcode() != IrOpcode::kInt64Constant &&
- node->InputAt(0)->opcode() != IrOpcode::kFloat32Constant &&
- node->InputAt(0)->opcode() != IrOpcode::kFloat64Constant);
- if (size.HasValue()) {
- VirtualObject* obj = new (zone()) VirtualObject(
- node->id(), state, zone(), size.Value() / kPointerSize, false);
- state->SetVirtualObject(alias, obj);
- } else {
- state->SetVirtualObject(
- alias, new (zone()) VirtualObject(node->id(), state, zone()));
- }
-}
-
-void EscapeAnalysis::ProcessFinishRegion(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
- ForwardVirtualState(node);
- Node* allocation = NodeProperties::GetValueInput(node, 0);
- if (allocation->opcode() == IrOpcode::kAllocate) {
- VirtualState* state = virtual_states_[node->id()];
- VirtualObject* obj =
- state->VirtualObjectFromAlias(status_analysis_->GetAlias(node->id()));
- DCHECK_NOT_NULL(obj);
- obj->SetInitialized();
- }
-}
-
-Node* EscapeAnalysis::replacement(Node* node) {
- if (node->id() >= replacements_.size()) return nullptr;
- return replacements_[node->id()];
-}
-
-bool EscapeAnalysis::SetReplacement(Node* node, Node* rep) {
- bool changed = replacements_[node->id()] != rep;
- replacements_[node->id()] = rep;
- return changed;
-}
-
-bool EscapeAnalysis::UpdateReplacement(VirtualState* state, Node* node,
- Node* rep) {
- if (SetReplacement(node, rep)) {
- if (rep) {
- TRACE("Replacement of #%d is #%d (%s)\n", node->id(), rep->id(),
- rep->op()->mnemonic());
- } else {
- TRACE("Replacement of #%d cleared\n", node->id());
- }
- return true;
- }
- return false;
-}
-
-Node* EscapeAnalysis::ResolveReplacement(Node* node) {
- while (replacement(node)) {
- node = replacement(node);
- }
- return node;
-}
-
-Node* EscapeAnalysis::GetReplacement(Node* node) {
- Node* result = nullptr;
- while (replacement(node)) {
- node = result = replacement(node);
- }
- return result;
-}
-
-bool EscapeAnalysis::IsVirtual(Node* node) {
- if (node->id() >= status_analysis_->GetStatusVectorSize()) {
- return false;
- }
- return status_analysis_->IsVirtual(node);
-}
-
-bool EscapeAnalysis::IsEscaped(Node* node) {
- if (node->id() >= status_analysis_->GetStatusVectorSize()) {
- return false;
- }
- return status_analysis_->IsEscaped(node);
-}
-
-bool EscapeAnalysis::CompareVirtualObjects(Node* left, Node* right) {
- DCHECK(IsVirtual(left) && IsVirtual(right));
- left = ResolveReplacement(left);
- right = ResolveReplacement(right);
- if (IsEquivalentPhi(left, right)) {
- return true;
- }
- return false;
-}
-
-namespace {
-
-#ifdef DEBUG
-bool IsOffsetForFieldAccessCorrect(const FieldAccess& access) {
-#if V8_TARGET_LITTLE_ENDIAN
- return (access.offset % kPointerSize) == 0;
-#else
- return ((access.offset +
- (1 << ElementSizeLog2Of(access.machine_type.representation()))) %
- kPointerSize) == 0;
-#endif
-}
-#endif
-
-int OffsetForFieldAccess(Node* node) {
- FieldAccess access = FieldAccessOf(node->op());
- DCHECK(IsOffsetForFieldAccessCorrect(access));
- return access.offset / kPointerSize;
-}
-
-int OffsetForElementAccess(Node* node, int index) {
- ElementAccess access = ElementAccessOf(node->op());
- DCHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
- kPointerSizeLog2);
- DCHECK_EQ(access.header_size % kPointerSize, 0);
- return access.header_size / kPointerSize + index;
-}
-
-} // namespace
-
-void EscapeAnalysis::ProcessLoadField(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kLoadField);
- ForwardVirtualState(node);
- Node* from = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
- VirtualState* state = virtual_states_[node->id()];
- if (VirtualObject* object = GetVirtualObject(state, from)) {
- if (!object->IsTracked()) return;
- int offset = OffsetForFieldAccess(node);
- if (static_cast<size_t>(offset) >= object->field_count()) {
- // We have a load from a field that is not inside the {object}. This
- // can only happen with conflicting type feedback and for dead {node}s.
- // For now, we just mark the {object} as escaping.
- // TODO(turbofan): Consider introducing an Undefined or None operator
- // that we can replace this load with, since we know it's dead code.
- if (status_analysis_->SetEscaped(from)) {
- TRACE(
- "Setting #%d (%s) to escaped because load field #%d from "
- "offset %d outside of object\n",
- from->id(), from->op()->mnemonic(), node->id(), offset);
+ case IrOpcode::kLoadElement: {
+ Node* object = current->ValueInput(0);
+ Node* index = current->ValueInput(1);
+ const VirtualObject* vobject = current->GetVirtualObject(object);
+ int offset;
+ Variable var;
+ Node* value;
+ if (vobject && !vobject->HasEscaped() &&
+ OffsetOfElementsAccess(op, index).To(&offset) &&
+ vobject->FieldAt(offset).To(&var) && current->Get(var).To(&value)) {
+ current->SetReplacement(value);
+ } else {
+ current->SetEscaped(object);
}
- return;
+ break;
}
- Node* value = object->GetField(offset);
- if (value) {
- value = ResolveReplacement(value);
+ case IrOpcode::kTypeGuard: {
+ // The type-guard is re-introduced in the final reducer if the types
+ // don't match.
+ current->SetReplacement(current->ValueInput(0));
+ break;
}
- // Record that the load has this alias.
- UpdateReplacement(state, node, value);
- } else {
- UpdateReplacement(state, node, nullptr);
- }
-}
-
-void EscapeAnalysis::ProcessCheckMaps(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kCheckMaps);
- ForwardVirtualState(node);
- Node* checked = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
- if (FLAG_turbo_experimental) {
- VirtualState* state = virtual_states_[node->id()];
- if (VirtualObject* object = GetVirtualObject(state, checked)) {
- if (!object->IsTracked()) {
- if (status_analysis_->SetEscaped(node)) {
- TRACE(
- "Setting #%d (%s) to escaped because checked object #%i is not "
- "tracked\n",
- node->id(), node->op()->mnemonic(), object->id());
+ case IrOpcode::kReferenceEqual: {
+ Node* left = current->ValueInput(0);
+ Node* right = current->ValueInput(1);
+ const VirtualObject* left_object = current->GetVirtualObject(left);
+ const VirtualObject* right_object = current->GetVirtualObject(right);
+ Node* replacement = nullptr;
+ if (left_object && !left_object->HasEscaped()) {
+ if (right_object && !right_object->HasEscaped() &&
+ left_object->id() == right_object->id()) {
+ replacement = jsgraph->TrueConstant();
+ } else {
+ replacement = jsgraph->FalseConstant();
}
- return;
- }
- CheckMapsParameters params = CheckMapsParametersOf(node->op());
-
- Node* value = object->GetField(HeapObject::kMapOffset / kPointerSize);
- if (value) {
- value = ResolveReplacement(value);
- // TODO(tebbi): We want to extend this beyond constant folding with a
- // CheckMapsValue operator that takes the load-eliminated map value as
- // input.
- if (value->opcode() == IrOpcode::kHeapConstant &&
- params.maps().contains(ZoneHandleSet<Map>(bit_cast<Handle<Map>>(
- OpParameter<Handle<HeapObject>>(value))))) {
- TRACE("CheckMaps #%i seems to be redundant (until now).\n",
- node->id());
- return;
+ } else if (right_object && !right_object->HasEscaped()) {
+ replacement = jsgraph->FalseConstant();
+ }
+ if (replacement) {
+ // TODO(tebbi) This is a workaround for uninhabited types. If we
+ // replaced a value of uninhabited type with a constant, we would
+ // widen the type of the node. This could produce inconsistent
+ // types (which might confuse representation selection). We get
+ // around this by refusing to constant-fold and escape-analyze
+ // if the type is not inhabited.
+ if (NodeProperties::GetType(left)->IsInhabited() &&
+ NodeProperties::GetType(right)->IsInhabited()) {
+ current->SetReplacement(replacement);
+ } else {
+ current->SetEscaped(left);
+ current->SetEscaped(right);
}
}
+ break;
}
- }
- if (status_analysis_->SetEscaped(node)) {
- TRACE("Setting #%d (%s) to escaped (checking #%i)\n", node->id(),
- node->op()->mnemonic(), checked->id());
- }
-}
-
-void EscapeAnalysis::ProcessLoadElement(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kLoadElement);
- ForwardVirtualState(node);
- Node* from = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
- VirtualState* state = virtual_states_[node->id()];
- Node* index_node = node->InputAt(1);
- NumberMatcher index(index_node);
- DCHECK(index_node->opcode() != IrOpcode::kInt32Constant &&
- index_node->opcode() != IrOpcode::kInt64Constant &&
- index_node->opcode() != IrOpcode::kFloat32Constant &&
- index_node->opcode() != IrOpcode::kFloat64Constant);
- if (index.HasValue()) {
- if (VirtualObject* object = GetVirtualObject(state, from)) {
- if (!object->IsTracked()) return;
- int offset = OffsetForElementAccess(node, index.Value());
- if (static_cast<size_t>(offset) >= object->field_count()) return;
- Node* value = object->GetField(offset);
- if (value) {
- value = ResolveReplacement(value);
+ case IrOpcode::kCheckMaps: {
+ CheckMapsParameters params = CheckMapsParametersOf(op);
+ Node* checked = current->ValueInput(0);
+ const VirtualObject* vobject = current->GetVirtualObject(checked);
+ Variable map_field;
+ Node* map;
+ if (vobject && !vobject->HasEscaped() &&
+ vobject->FieldAt(HeapObject::kMapOffset).To(&map_field) &&
+ current->Get(map_field).To(&map)) {
+ if (map) {
+ Type* const map_type = NodeProperties::GetType(map);
+ if (map_type->IsHeapConstant() &&
+ params.maps().contains(ZoneHandleSet<Map>(bit_cast<Handle<Map>>(
+ map_type->AsHeapConstant()->Value())))) {
+ current->MarkForDeletion();
+ break;
+ }
+ } else {
+ // If the variable has no value, we have not reached the fixed-point
+ // yet.
+ break;
+ }
}
- // Record that the load has this alias.
- UpdateReplacement(state, node, value);
- } else {
- UpdateReplacement(state, node, nullptr);
- }
- } else {
- // We have a load from a non-const index, cannot eliminate object.
- if (status_analysis_->SetEscaped(from)) {
- TRACE(
- "Setting #%d (%s) to escaped because load element #%d from non-const "
- "index #%d (%s)\n",
- from->id(), from->op()->mnemonic(), node->id(), index_node->id(),
- index_node->op()->mnemonic());
+ current->SetEscaped(checked);
+ break;
}
- }
-}
-
-void EscapeAnalysis::ProcessStoreField(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kStoreField);
- ForwardVirtualState(node);
- Node* to = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
- VirtualState* state = virtual_states_[node->id()];
- if (VirtualObject* object = GetVirtualObject(state, to)) {
- if (!object->IsTracked()) return;
- int offset = OffsetForFieldAccess(node);
- if (static_cast<size_t>(offset) >= object->field_count()) {
- // We have a store to a field that is not inside the {object}. This
- // can only happen with conflicting type feedback and for dead {node}s.
- // For now, we just mark the {object} as escaping.
- // TODO(turbofan): Consider just eliminating the store in the reducer
- // pass, as it's dead code anyways.
- if (status_analysis_->SetEscaped(to)) {
- TRACE(
- "Setting #%d (%s) to escaped because store field #%d to "
- "offset %d outside of object\n",
- to->id(), to->op()->mnemonic(), node->id(), offset);
+ case IrOpcode::kCompareMaps: {
+ Node* object = current->ValueInput(0);
+ const VirtualObject* vobject = current->GetVirtualObject(object);
+ Variable map_field;
+ Node* object_map;
+ if (vobject && !vobject->HasEscaped() &&
+ vobject->FieldAt(HeapObject::kMapOffset).To(&map_field) &&
+ current->Get(map_field).To(&object_map)) {
+ if (object_map) {
+ current->SetReplacement(LowerCompareMapsWithoutLoad(
+ object_map, CompareMapsParametersOf(op), jsgraph));
+ break;
+ } else {
+ // If the variable has no value, we have not reached the fixed-point
+ // yet.
+ break;
+ }
}
- return;
- }
- Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 1));
- // TODO(mstarzinger): The following is a workaround to not track some well
- // known raw fields. We only ever store default initial values into these
- // fields which are hard-coded in {TranslatedState::MaterializeAt} as well.
- if (val->opcode() == IrOpcode::kInt32Constant ||
- val->opcode() == IrOpcode::kInt64Constant) {
- DCHECK(FieldAccessOf(node->op()).offset == Name::kHashFieldOffset);
- val = slot_not_analyzed_;
- }
- object = CopyForModificationAt(object, state, node);
- if (object->GetField(offset) != val) {
- object->SetField(offset, val);
+ current->SetEscaped(object);
+ break;
}
- }
-}
-
-void EscapeAnalysis::ProcessStoreElement(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kStoreElement);
- ForwardVirtualState(node);
- Node* to = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
- Node* index_node = node->InputAt(1);
- NumberMatcher index(index_node);
- DCHECK(index_node->opcode() != IrOpcode::kInt32Constant &&
- index_node->opcode() != IrOpcode::kInt64Constant &&
- index_node->opcode() != IrOpcode::kFloat32Constant &&
- index_node->opcode() != IrOpcode::kFloat64Constant);
- VirtualState* state = virtual_states_[node->id()];
- if (index.HasValue()) {
- if (VirtualObject* object = GetVirtualObject(state, to)) {
- if (!object->IsTracked()) return;
- int offset = OffsetForElementAccess(node, index.Value());
- if (static_cast<size_t>(offset) >= object->field_count()) return;
- Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 2));
- object = CopyForModificationAt(object, state, node);
- if (object->GetField(offset) != val) {
- object->SetField(offset, val);
+ case IrOpcode::kCheckHeapObject: {
+ Node* checked = current->ValueInput(0);
+ switch (checked->opcode()) {
+ case IrOpcode::kAllocate:
+ case IrOpcode::kFinishRegion:
+ case IrOpcode::kHeapConstant:
+ current->SetReplacement(checked);
+ break;
+ default:
+ current->SetEscaped(checked);
+ break;
}
+ break;
}
- } else {
- // We have a store to a non-const index, cannot eliminate object.
- if (status_analysis_->SetEscaped(to)) {
- TRACE(
- "Setting #%d (%s) to escaped because store element #%d to non-const "
- "index #%d (%s)\n",
- to->id(), to->op()->mnemonic(), node->id(), index_node->id(),
- index_node->op()->mnemonic());
- }
- if (VirtualObject* object = GetVirtualObject(state, to)) {
- if (!object->IsTracked()) return;
- object = CopyForModificationAt(object, state, node);
- if (!object->AllFieldsClear()) {
- object->ClearAllFields();
- TRACE("Cleared all fields of @%d:#%d\n",
- status_analysis_->GetAlias(object->id()), object->id());
+ case IrOpcode::kMapGuard: {
+ Node* object = current->ValueInput(0);
+ const VirtualObject* vobject = current->GetVirtualObject(object);
+ if (vobject && !vobject->HasEscaped()) {
+ current->MarkForDeletion();
}
+ break;
}
- }
-}
-
-Node* EscapeAnalysis::GetOrCreateObjectState(Node* effect, Node* node) {
- if ((node->opcode() == IrOpcode::kFinishRegion ||
- node->opcode() == IrOpcode::kAllocate) &&
- IsVirtual(node)) {
- if (VirtualObject* vobj = GetVirtualObject(virtual_states_[effect->id()],
- ResolveReplacement(node))) {
- if (Node* object_state = vobj->GetObjectState()) {
- return object_state;
- } else {
- cache_->fields().clear();
- for (size_t i = 0; i < vobj->field_count(); ++i) {
- if (Node* field = vobj->GetField(i)) {
- cache_->fields().push_back(ResolveReplacement(field));
- } else {
- return nullptr;
- }
- }
- int input_count = static_cast<int>(cache_->fields().size());
- Node* new_object_state =
- graph()->NewNode(common()->ObjectState(vobj->id(), input_count),
- input_count, &cache_->fields().front());
- NodeProperties::SetType(new_object_state, Type::OtherInternal());
- vobj->SetObjectState(new_object_state);
- TRACE(
- "Creating object state #%d for vobj %p (from node #%d) at effect "
- "#%d\n",
- new_object_state->id(), static_cast<void*>(vobj), node->id(),
- effect->id());
- // Now fix uses of other objects.
- for (size_t i = 0; i < vobj->field_count(); ++i) {
- if (Node* field = vobj->GetField(i)) {
- if (Node* field_object_state =
- GetOrCreateObjectState(effect, field)) {
- NodeProperties::ReplaceValueInput(
- new_object_state, field_object_state, static_cast<int>(i));
- }
- }
- }
- return new_object_state;
+ case IrOpcode::kStateValues:
+ case IrOpcode::kFrameState:
+ // These uses are always safe.
+ break;
+ default: {
+ // For unknown nodes, treat all value inputs as escaping.
+ int value_input_count = op->ValueInputCount();
+ for (int i = 0; i < value_input_count; ++i) {
+ Node* input = current->ValueInput(i);
+ current->SetEscaped(input);
+ }
+ if (OperatorProperties::HasContextInput(op)) {
+ current->SetEscaped(current->ContextInput());
}
+ break;
}
}
- return nullptr;
}
-bool EscapeAnalysis::IsCyclicObjectState(Node* effect, Node* node) {
- if ((node->opcode() == IrOpcode::kFinishRegion ||
- node->opcode() == IrOpcode::kAllocate) &&
- IsVirtual(node)) {
- if (VirtualObject* vobj = GetVirtualObject(virtual_states_[effect->id()],
- ResolveReplacement(node))) {
- if (cycle_detection_.find(vobj) != cycle_detection_.end()) return true;
- cycle_detection_.insert(vobj);
- bool cycle_detected = false;
- for (size_t i = 0; i < vobj->field_count(); ++i) {
- if (Node* field = vobj->GetField(i)) {
- if (IsCyclicObjectState(effect, field)) cycle_detected = true;
- }
- }
- cycle_detection_.erase(vobj);
- return cycle_detected;
- }
- }
- return false;
+} // namespace
+
+void EscapeAnalysis::Reduce(Node* node, Reduction* reduction) {
+ const Operator* op = node->op();
+ TRACE("Reducing %s#%d\n", op->mnemonic(), node->id());
+
+ EscapeAnalysisTracker::Scope current(this, tracker_, node, reduction);
+ ReduceNode(op, &current, jsgraph());
}
-void EscapeAnalysis::DebugPrintState(VirtualState* state) {
- PrintF("Dumping virtual state %p\n", static_cast<void*>(state));
- for (Alias alias = 0; alias < status_analysis_->AliasCount(); ++alias) {
- if (VirtualObject* object = state->VirtualObjectFromAlias(alias)) {
- PrintF(" Alias @%d: Object #%d with %zu fields\n", alias, object->id(),
- object->field_count());
- for (size_t i = 0; i < object->field_count(); ++i) {
- if (Node* f = object->GetField(i)) {
- PrintF(" Field %zu = #%d (%s)\n", i, f->id(), f->op()->mnemonic());
- }
- }
- }
- }
+EscapeAnalysis::EscapeAnalysis(JSGraph* jsgraph, Zone* zone)
+ : EffectGraphReducer(
+ jsgraph->graph(),
+ [this](Node* node, Reduction* reduction) { Reduce(node, reduction); },
+ zone),
+ tracker_(new (zone) EscapeAnalysisTracker(jsgraph, this, zone)),
+ jsgraph_(jsgraph) {}
+
+Node* EscapeAnalysisResult::GetReplacementOf(Node* node) {
+ return tracker_->GetReplacementOf(node);
}
-void EscapeAnalysis::DebugPrint() {
- ZoneVector<VirtualState*> object_states(zone());
- for (NodeId id = 0; id < virtual_states_.size(); id++) {
- if (VirtualState* states = virtual_states_[id]) {
- if (std::find(object_states.begin(), object_states.end(), states) ==
- object_states.end()) {
- object_states.push_back(states);
- }
- }
- }
- for (size_t n = 0; n < object_states.size(); n++) {
- DebugPrintState(object_states[n]);
- }
+Node* EscapeAnalysisResult::GetVirtualObjectField(const VirtualObject* vobject,
+ int field, Node* effect) {
+ return tracker_->variable_states_.Get(vobject->FieldAt(field).FromJust(),
+ effect);
}
-VirtualObject* EscapeAnalysis::GetVirtualObject(VirtualState* state,
- Node* node) {
- if (node->id() >= status_analysis_->GetAliasMap().size()) return nullptr;
- Alias alias = status_analysis_->GetAlias(node->id());
- if (alias >= state->size()) return nullptr;
- return state->VirtualObjectFromAlias(alias);
+const VirtualObject* EscapeAnalysisResult::GetVirtualObject(Node* node) {
+ return tracker_->virtual_objects_.Get(node);
}
-bool EscapeAnalysis::ExistsVirtualAllocate() {
- for (size_t id = 0; id < status_analysis_->GetAliasMap().size(); ++id) {
- Alias alias = status_analysis_->GetAlias(static_cast<NodeId>(id));
- if (alias < EscapeStatusAnalysis::kUntrackable) {
- if (status_analysis_->IsVirtual(static_cast<int>(id))) {
- return true;
- }
- }
+VirtualObject::VirtualObject(VariableTracker* var_states, VirtualObject::Id id,
+ int size)
+ : Dependable(var_states->zone()), id_(id), fields_(var_states->zone()) {
+ DCHECK_EQ(0, size % kPointerSize);
+ TRACE("Creating VirtualObject id:%d size:%d\n", id, size);
+ int num_fields = size / kPointerSize;
+ fields_.reserve(num_fields);
+ for (int i = 0; i < num_fields; ++i) {
+ fields_.push_back(var_states->NewVariable());
}
- return false;
}
-Graph* EscapeAnalysis::graph() const { return status_analysis_->graph(); }
-
#undef TRACE
} // namespace compiler
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index d8c654f521..504729bc81 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -1,90 +1,187 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
+// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_ESCAPE_ANALYSIS_H_
#define V8_COMPILER_ESCAPE_ANALYSIS_H_
-#include "src/compiler/graph.h"
+#include "src/base/functional.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/persistent-map.h"
#include "src/globals.h"
+#include "src/objects/name.h"
namespace v8 {
namespace internal {
namespace compiler {
-// Forward declarations.
class CommonOperatorBuilder;
-class EscapeStatusAnalysis;
-namespace impl {
-class MergeCache;
-class VirtualState;
-class VirtualObject;
-}; // namespace impl
-
-// EscapeObjectAnalysis simulates stores to determine values of loads if
-// an object is virtual and eliminated.
-class V8_EXPORT_PRIVATE EscapeAnalysis {
+class VariableTracker;
+class EscapeAnalysisTracker;
+
+// {EffectGraphReducer} reduces up to a fixed point. It distinguishes changes to
+// the effect output of a node from changes to the value output to reduce the
+// number of revisitations.
+class EffectGraphReducer {
+ public:
+ class Reduction {
+ public:
+ bool value_changed() const { return value_changed_; }
+ void set_value_changed() { value_changed_ = true; }
+ bool effect_changed() const { return effect_changed_; }
+ void set_effect_changed() { effect_changed_ = true; }
+
+ private:
+ bool value_changed_ = false;
+ bool effect_changed_ = false;
+ };
+
+ EffectGraphReducer(Graph* graph,
+ std::function<void(Node*, Reduction*)> reduce, Zone* zone);
+
+ void ReduceGraph() { ReduceFrom(graph_->end()); }
+
+ // Mark node for revisitation.
+ void Revisit(Node* node);
+
+ // Add a new root node to start reduction from. This is useful if the reducer
+ // adds nodes that are not yet reachable, but should already be considered
+ // part of the graph.
+ void AddRoot(Node* node) {
+ DCHECK_EQ(State::kUnvisited, state_.Get(node));
+ state_.Set(node, State::kRevisit);
+ revisit_.push(node);
+ }
+
+ bool Complete() { return stack_.empty() && revisit_.empty(); }
+
+ private:
+ struct NodeState {
+ Node* node;
+ int input_index;
+ };
+ void ReduceFrom(Node* node);
+ enum class State : uint8_t { kUnvisited = 0, kRevisit, kOnStack, kVisited };
+ const uint8_t kNumStates = static_cast<uint8_t>(State::kVisited) + 1;
+ Graph* graph_;
+ NodeMarker<State> state_;
+ ZoneStack<Node*> revisit_;
+ ZoneStack<NodeState> stack_;
+ std::function<void(Node*, Reduction*)> reduce_;
+};
+
+// A variable is an abstract storage location, which is lowered to SSA values
+// and phi nodes by {VariableTracker}.
+class Variable {
+ public:
+ Variable() : id_(kInvalid) {}
+ bool operator==(Variable other) const { return id_ == other.id_; }
+ bool operator!=(Variable other) const { return id_ != other.id_; }
+ bool operator<(Variable other) const { return id_ < other.id_; }
+ static Variable Invalid() { return Variable(kInvalid); }
+ friend V8_INLINE size_t hash_value(Variable v) {
+ return base::hash_value(v.id_);
+ }
+ friend std::ostream& operator<<(std::ostream& os, Variable var) {
+ return os << var.id_;
+ }
+
+ private:
+ typedef int Id;
+ explicit Variable(Id id) : id_(id) {}
+ Id id_;
+ static const Id kInvalid = -1;
+
+ friend class VariableTracker;
+};
+
+// An object that can track the nodes in the graph whose current reduction
+// depends on the value of the object.
+class Dependable : public ZoneObject {
public:
- EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common, Zone* zone);
- ~EscapeAnalysis();
-
- bool Run();
-
- Node* GetReplacement(Node* node);
- Node* ResolveReplacement(Node* node);
- bool IsVirtual(Node* node);
- bool IsEscaped(Node* node);
- bool CompareVirtualObjects(Node* left, Node* right);
- Node* GetOrCreateObjectState(Node* effect, Node* node);
- bool IsCyclicObjectState(Node* effect, Node* node);
- bool ExistsVirtualAllocate();
- bool SetReplacement(Node* node, Node* rep);
- bool AllObjectsComplete();
+ explicit Dependable(Zone* zone) : dependants_(zone) {}
+ void AddDependency(Node* node) { dependants_.push_back(node); }
+ void RevisitDependants(EffectGraphReducer* reducer) {
+ for (Node* node : dependants_) {
+ reducer->Revisit(node);
+ }
+ dependants_.clear();
+ }
+
+ private:
+ ZoneVector<Node*> dependants_;
+};
+
+// A virtual object represents an allocation site and tracks the Variables
+// associated with its fields as well as its global escape status.
+class VirtualObject : public Dependable {
+ public:
+ typedef uint32_t Id;
+ typedef ZoneVector<Variable>::const_iterator const_iterator;
+ VirtualObject(VariableTracker* var_states, Id id, int size);
+ Maybe<Variable> FieldAt(int offset) const {
+ if (offset % kPointerSize != 0) {
+ // We do not support fields that are not word-aligned. Bail out by
+ // treating the object as escaping. This can only happen for
+ // {Name::kHashFieldOffset} on 64bit big endian architectures.
+ DCHECK_EQ(Name::kHashFieldOffset, offset);
+ return Nothing<Variable>();
+ }
+ CHECK(!HasEscaped());
+ if (offset >= size()) {
+ // TODO(tebbi): Reading out-of-bounds can only happen in unreachable
+ // code. In this case, we have to mark the object as escaping to avoid
+ // dead nodes in the graph. This is a workaround that should be removed
+ // once we can handle dead nodes everywhere.
+ return Nothing<Variable>();
+ }
+ return Just(fields_.at(offset / kPointerSize));
+ }
+ Id id() const { return id_; }
+ int size() const { return static_cast<int>(kPointerSize * fields_.size()); }
+ // Escaped might mean that the object escaped to untracked memory or that it
+ // is used in an operation that requires materialization.
+ void SetEscaped() { escaped_ = true; }
+ bool HasEscaped() const { return escaped_; }
+ const_iterator begin() const { return fields_.begin(); }
+ const_iterator end() const { return fields_.end(); }
+
+ private:
+ bool escaped_ = false;
+ Id id_;
+ ZoneVector<Variable> fields_;
+};
+
+class EscapeAnalysisResult {
+ public:
+ explicit EscapeAnalysisResult(EscapeAnalysisTracker* tracker)
+ : tracker_(tracker) {}
+
+ const VirtualObject* GetVirtualObject(Node* node);
+ Node* GetVirtualObjectField(const VirtualObject* vobject, int field,
+ Node* effect);
+ Node* GetReplacementOf(Node* node);
+
+ private:
+ EscapeAnalysisTracker* tracker_;
+};
+
+class V8_EXPORT_PRIVATE EscapeAnalysis final
+ : public NON_EXPORTED_BASE(EffectGraphReducer) {
+ public:
+ EscapeAnalysis(JSGraph* jsgraph, Zone* zone);
+
+ EscapeAnalysisResult analysis_result() {
+ DCHECK(Complete());
+ return EscapeAnalysisResult(tracker_);
+ }
private:
- void RunObjectAnalysis();
- bool Process(Node* node);
- void ProcessLoadField(Node* node);
- void ProcessStoreField(Node* node);
- void ProcessLoadElement(Node* node);
- void ProcessStoreElement(Node* node);
- void ProcessCheckMaps(Node* node);
- void ProcessAllocationUsers(Node* node);
- void ProcessAllocation(Node* node);
- void ProcessFinishRegion(Node* node);
- void ProcessCall(Node* node);
- void ProcessStart(Node* node);
- bool ProcessEffectPhi(Node* node);
-
- void ForwardVirtualState(Node* node);
- impl::VirtualState* CopyForModificationAt(impl::VirtualState* state,
- Node* node);
- impl::VirtualObject* CopyForModificationAt(impl::VirtualObject* obj,
- impl::VirtualState* state,
- Node* node);
-
- Node* replacement(Node* node);
- bool UpdateReplacement(impl::VirtualState* state, Node* node, Node* rep);
-
- impl::VirtualObject* GetVirtualObject(impl::VirtualState* state, Node* node);
-
- void DebugPrint();
- void DebugPrintState(impl::VirtualState* state);
-
- Graph* graph() const;
- Zone* zone() const { return zone_; }
- CommonOperatorBuilder* common() const { return common_; }
-
- Zone* const zone_;
- Node* const slot_not_analyzed_;
- CommonOperatorBuilder* const common_;
- EscapeStatusAnalysis* status_analysis_;
- ZoneVector<impl::VirtualState*> virtual_states_;
- ZoneVector<Node*> replacements_;
- ZoneSet<impl::VirtualObject*> cycle_detection_;
- impl::MergeCache* cache_;
-
- DISALLOW_COPY_AND_ASSIGN(EscapeAnalysis);
+ void Reduce(Node* node, Reduction* reduction);
+ JSGraph* jsgraph() { return jsgraph_; }
+ EscapeAnalysisTracker* tracker_;
+ JSGraph* jsgraph_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index 4ea19f77e4..fe8008913d 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -85,12 +85,12 @@ class Frame : public ZoneObject {
inline int GetSpillSlotCount() const { return spill_slot_count_; }
void SetAllocatedRegisters(BitVector* regs) {
- DCHECK(allocated_registers_ == nullptr);
+ DCHECK_NULL(allocated_registers_);
allocated_registers_ = regs;
}
void SetAllocatedDoubleRegisters(BitVector* regs) {
- DCHECK(allocated_double_registers_ == nullptr);
+ DCHECK_NULL(allocated_double_registers_);
allocated_double_registers_ = regs;
}
@@ -169,12 +169,12 @@ class FrameOffset {
inline int offset() { return offset_ & ~1; }
inline static FrameOffset FromStackPointer(int offset) {
- DCHECK((offset & 1) == 0);
+ DCHECK_EQ(0, offset & 1);
return FrameOffset(offset | kFromSp);
}
inline static FrameOffset FromFramePointer(int offset) {
- DCHECK((offset & 1) == 0);
+ DCHECK_EQ(0, offset & 1);
return FrameOffset(offset | kFromFp);
}
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 041b2eefd2..b99f4da060 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -6,7 +6,6 @@
#include "src/code-factory.h"
#include "src/compiler/linkage.h"
-#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index a09a705477..dac2bc52bd 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -37,10 +37,12 @@ namespace compiler {
V(WordAnd) \
V(Word32Or) \
V(Word32And) \
+ V(Word32Xor) \
V(Word32Shr) \
V(Word32Shl) \
V(IntAdd) \
V(IntSub) \
+ V(IntMul) \
V(IntLessThan) \
V(UintLessThan) \
V(Int32Add) \
@@ -52,6 +54,7 @@ namespace compiler {
V(Int32LessThan) \
V(Float64Add) \
V(Float64Sub) \
+ V(Float64Div) \
V(Float64Mod) \
V(Float64Equal) \
V(Float64LessThan) \
@@ -77,13 +80,14 @@ namespace compiler {
V(UndefinedConstant) \
V(TheHoleConstant) \
V(FixedArrayMapConstant) \
+ V(FixedDoubleArrayMapConstant) \
V(ToNumberBuiltinConstant) \
V(AllocateInNewSpaceStubConstant) \
V(AllocateInOldSpaceStubConstant)
class GraphAssembler;
-enum class GraphAssemblerLabelType { kDeferred, kNonDeferred };
+enum class GraphAssemblerLabelType { kDeferred, kNonDeferred, kLoop };
// Label with statically known count of incoming branches and phis.
template <size_t VarCount>
@@ -92,9 +96,8 @@ class GraphAssemblerLabel {
Node* PhiAt(size_t index);
template <typename... Reps>
- explicit GraphAssemblerLabel(GraphAssemblerLabelType is_deferred,
- Reps... reps)
- : is_deferred_(is_deferred == GraphAssemblerLabelType::kDeferred) {
+ explicit GraphAssemblerLabel(GraphAssemblerLabelType type, Reps... reps)
+ : type_(type) {
STATIC_ASSERT(VarCount == sizeof...(reps));
MachineRepresentation reps_array[] = {MachineRepresentation::kNone,
reps...};
@@ -113,10 +116,13 @@ class GraphAssemblerLabel {
is_bound_ = true;
}
bool IsBound() const { return is_bound_; }
- bool IsDeferred() const { return is_deferred_; }
+ bool IsDeferred() const {
+ return type_ == GraphAssemblerLabelType::kDeferred;
+ }
+ bool IsLoop() const { return type_ == GraphAssemblerLabelType::kLoop; }
bool is_bound_ = false;
- bool is_deferred_;
+ GraphAssemblerLabelType const type_;
size_t merged_count_ = 0;
Node* effect_;
Node* control_;
@@ -133,8 +139,8 @@ class GraphAssembler {
// Create label.
template <typename... Reps>
static GraphAssemblerLabel<sizeof...(Reps)> MakeLabelFor(
- GraphAssemblerLabelType is_deferred, Reps... reps) {
- return GraphAssemblerLabel<sizeof...(Reps)>(is_deferred, reps...);
+ GraphAssemblerLabelType type, Reps... reps) {
+ return GraphAssemblerLabel<sizeof...(Reps)>(type, reps...);
}
// Convenience wrapper for creating non-deferred labels.
@@ -143,6 +149,12 @@ class GraphAssembler {
return MakeLabelFor(GraphAssemblerLabelType::kNonDeferred, reps...);
}
+ // Convenience wrapper for creating loop labels.
+ template <typename... Reps>
+ static GraphAssemblerLabel<sizeof...(Reps)> MakeLoopLabel(Reps... reps) {
+ return MakeLabelFor(GraphAssemblerLabelType::kLoop, reps...);
+ }
+
// Convenience wrapper for creating deferred labels.
template <typename... Reps>
static GraphAssemblerLabel<sizeof...(Reps)> MakeDeferredLabel(Reps... reps) {
@@ -263,48 +275,71 @@ Node* GraphAssemblerLabel<VarCount>::PhiAt(size_t index) {
template <typename... Vars>
void GraphAssembler::MergeState(GraphAssemblerLabel<sizeof...(Vars)>* label,
Vars... vars) {
- DCHECK(!label->IsBound());
-
int merged_count = static_cast<int>(label->merged_count_);
Node* var_array[] = {nullptr, vars...};
- if (merged_count == 0) {
- // Just set the control, effect and variables directly.
- label->control_ = current_control_;
- label->effect_ = current_effect_;
- for (size_t i = 0; i < sizeof...(vars); i++) {
- label->bindings_[i] = var_array[i + 1];
- }
- } else if (merged_count == 1) {
- // Create merge, effect phi and a phi for each variable.
- label->control_ =
- graph()->NewNode(common()->Merge(2), label->control_, current_control_);
- label->effect_ = graph()->NewNode(common()->EffectPhi(2), label->effect_,
- current_effect_, label->control_);
- for (size_t i = 0; i < sizeof...(vars); i++) {
- label->bindings_[i] = graph()->NewNode(
- common()->Phi(label->representations_[i], 2), label->bindings_[i],
- var_array[i + 1], label->control_);
+ if (label->IsLoop()) {
+ if (merged_count == 0) {
+ DCHECK(!label->IsBound());
+ label->control_ = graph()->NewNode(common()->Loop(2), current_control_,
+ current_control_);
+ label->effect_ = graph()->NewNode(common()->EffectPhi(2), current_effect_,
+ current_effect_, label->control_);
+ for (size_t i = 0; i < sizeof...(vars); i++) {
+ label->bindings_[i] = graph()->NewNode(
+ common()->Phi(label->representations_[i], 2), var_array[i + 1],
+ var_array[i + 1], label->control_);
+ }
+ } else {
+ DCHECK(label->IsBound());
+ DCHECK_EQ(1, merged_count);
+ label->control_->ReplaceInput(1, current_control_);
+ label->effect_->ReplaceInput(1, current_effect_);
+ for (size_t i = 0; i < sizeof...(vars); i++) {
+ label->bindings_[i]->ReplaceInput(1, var_array[i + 1]);
+ }
}
} else {
- // Append to the merge, effect phi and phis.
- DCHECK_EQ(IrOpcode::kMerge, label->control_->opcode());
- label->control_->AppendInput(graph()->zone(), current_control_);
- NodeProperties::ChangeOp(label->control_,
- common()->Merge(merged_count + 1));
-
- DCHECK_EQ(IrOpcode::kEffectPhi, label->effect_->opcode());
- label->effect_->ReplaceInput(merged_count, current_effect_);
- label->effect_->AppendInput(graph()->zone(), label->control_);
- NodeProperties::ChangeOp(label->effect_,
- common()->EffectPhi(merged_count + 1));
-
- for (size_t i = 0; i < sizeof...(vars); i++) {
- DCHECK_EQ(IrOpcode::kPhi, label->bindings_[i]->opcode());
- label->bindings_[i]->ReplaceInput(merged_count, var_array[i + 1]);
- label->bindings_[i]->AppendInput(graph()->zone(), label->control_);
- NodeProperties::ChangeOp(
- label->bindings_[i],
- common()->Phi(label->representations_[i], merged_count + 1));
+ DCHECK(!label->IsBound());
+ if (merged_count == 0) {
+ // Just set the control, effect and variables directly.
+ DCHECK(!label->IsBound());
+ label->control_ = current_control_;
+ label->effect_ = current_effect_;
+ for (size_t i = 0; i < sizeof...(vars); i++) {
+ label->bindings_[i] = var_array[i + 1];
+ }
+ } else if (merged_count == 1) {
+ // Create merge, effect phi and a phi for each variable.
+ label->control_ = graph()->NewNode(common()->Merge(2), label->control_,
+ current_control_);
+ label->effect_ = graph()->NewNode(common()->EffectPhi(2), label->effect_,
+ current_effect_, label->control_);
+ for (size_t i = 0; i < sizeof...(vars); i++) {
+ label->bindings_[i] = graph()->NewNode(
+ common()->Phi(label->representations_[i], 2), label->bindings_[i],
+ var_array[i + 1], label->control_);
+ }
+ } else {
+ // Append to the merge, effect phi and phis.
+ DCHECK_EQ(IrOpcode::kMerge, label->control_->opcode());
+ label->control_->AppendInput(graph()->zone(), current_control_);
+ NodeProperties::ChangeOp(label->control_,
+ common()->Merge(merged_count + 1));
+
+ DCHECK_EQ(IrOpcode::kEffectPhi, label->effect_->opcode());
+ label->effect_->ReplaceInput(merged_count, current_effect_);
+ label->effect_->AppendInput(graph()->zone(), label->control_);
+ NodeProperties::ChangeOp(label->effect_,
+ common()->EffectPhi(merged_count + 1));
+
+ for (size_t i = 0; i < sizeof...(vars); i++) {
+ DCHECK_EQ(IrOpcode::kPhi, label->bindings_[i]->opcode());
+ label->bindings_[i]->ReplaceInput(merged_count, var_array[i + 1]);
+ label->bindings_[i]->AppendInput(graph()->zone(), label->control_);
+ NodeProperties::ChangeOp(
+ label->bindings_[i],
+ common()->Phi(label->representations_[i], merged_count + 1));
+ }
}
}
label->merged_count_++;
@@ -312,9 +347,9 @@ void GraphAssembler::MergeState(GraphAssemblerLabel<sizeof...(Vars)>* label,
template <size_t VarCount>
void GraphAssembler::Bind(GraphAssemblerLabel<VarCount>* label) {
- DCHECK(current_control_ == nullptr);
- DCHECK(current_effect_ == nullptr);
- DCHECK(label->merged_count_ > 0);
+ DCHECK_NULL(current_control_);
+ DCHECK_NULL(current_effect_);
+ DCHECK_LT(0, label->merged_count_);
current_control_ = label->control_;
current_effect_ = label->effect_;
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index faf01e9d9e..8631810ebd 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -122,7 +122,7 @@ Reduction GraphReducer::Reduce(Node* const node) {
void GraphReducer::ReduceTop() {
NodeState& entry = stack_.top();
Node* node = entry.node;
- DCHECK(state_.Get(node) == State::kOnStack);
+ DCHECK_EQ(State::kOnStack, state_.Get(node));
if (node->IsDead()) return Pop(); // Node was killed while on stack.
@@ -269,7 +269,7 @@ void GraphReducer::Pop() {
void GraphReducer::Push(Node* const node) {
- DCHECK(state_.Get(node) != State::kOnStack);
+ DCHECK_NE(State::kOnStack, state_.Get(node));
state_.Set(node, State::kOnStack);
stack_.push({node, 0});
}
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 0b8933c626..8c771c2af6 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -22,7 +22,7 @@
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
#include "src/interpreter/bytecodes.h"
-#include "src/objects-inl.h"
+#include "src/objects/script-inl.h"
#include "src/ostreams.h"
namespace v8 {
@@ -46,8 +46,9 @@ std::unique_ptr<char[]> GetVisualizerLogFileName(CompilationInfo* info,
}
EmbeddedVector<char, 256> source_file(0);
bool source_available = false;
- if (FLAG_trace_file_names && !info->script().is_null()) {
- Object* source_name = info->script()->name();
+ if (FLAG_trace_file_names && info->has_shared_info() &&
+ info->shared_info()->script()->IsScript()) {
+ Object* source_name = Script::cast(info->shared_info()->script())->name();
if (source_name->IsString()) {
String* str = String::cast(source_name);
if (str->length() > 0) {
@@ -291,7 +292,7 @@ class GraphC1Visualizer {
visualizer_->indent_--;
visualizer_->PrintIndent();
visualizer_->os_ << "end_" << name_ << "\n";
- DCHECK(visualizer_->indent_ >= 0);
+ DCHECK_LE(0, visualizer_->indent_);
}
private:
@@ -355,8 +356,9 @@ void GraphC1Visualizer::PrintCompilation(const CompilationInfo* info) {
PrintStringProperty("name", name.get());
PrintStringProperty("method", "stub");
}
- PrintLongProperty("date",
- static_cast<int64_t>(base::OS::TimeCurrentMillis()));
+ PrintLongProperty(
+ "date",
+ static_cast<int64_t>(V8::GetCurrentPlatform()->CurrentClockTimeMillis()));
}
@@ -716,6 +718,90 @@ std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
}
return os;
}
+
+namespace {
+
+void PrintIndent(std::ostream& os, int indent) {
+ os << " ";
+ for (int i = 0; i < indent; i++) {
+ os << ". ";
+ }
+}
+
+void PrintScheduledNode(std::ostream& os, int indent, Node* n) {
+ PrintIndent(os, indent);
+ os << "#" << n->id() << ":" << *n->op() << "(";
+ // Print the inputs.
+ int j = 0;
+ for (Node* const i : n->inputs()) {
+ if (j++ > 0) os << ", ";
+ os << "#" << SafeId(i) << ":" << SafeMnemonic(i);
+ }
+ os << ")";
+ // Print the node type, if any.
+ if (NodeProperties::IsTyped(n)) {
+ os << " [Type: ";
+ NodeProperties::GetType(n)->PrintTo(os);
+ os << "]";
+ }
+}
+
+void PrintScheduledGraph(std::ostream& os, const Schedule* schedule) {
+ const BasicBlockVector* rpo = schedule->rpo_order();
+ for (size_t i = 0; i < rpo->size(); i++) {
+ BasicBlock* current = (*rpo)[i];
+ int indent = current->loop_depth();
+
+ os << " + Block B" << current->rpo_number() << " (pred:";
+ for (BasicBlock* predecessor : current->predecessors()) {
+ os << " B" << predecessor->rpo_number();
+ }
+ if (current->IsLoopHeader()) {
+ os << ", loop until B" << current->loop_end()->rpo_number();
+ } else if (current->loop_header()) {
+ os << ", in loop B" << current->loop_header()->rpo_number();
+ }
+ os << ")" << std::endl;
+
+ for (BasicBlock::const_iterator i = current->begin(); i != current->end();
+ ++i) {
+ Node* node = *i;
+ PrintScheduledNode(os, indent, node);
+ os << std::endl;
+ }
+
+ if (current->SuccessorCount() > 0) {
+ if (current->control_input() != nullptr) {
+ PrintScheduledNode(os, indent, current->control_input());
+ } else {
+ PrintIndent(os, indent);
+ os << "Goto";
+ }
+ os << " ->";
+
+ bool isFirst = true;
+ for (BasicBlock* successor : current->successors()) {
+ if (isFirst) {
+ isFirst = false;
+ } else {
+ os << ",";
+ }
+ os << " B" << successor->rpo_number();
+ }
+ os << std::endl;
+ } else {
+ DCHECK_NULL(current->control_input());
+ }
+ }
+}
+
+} // namespace
+
+std::ostream& operator<<(std::ostream& os, const AsScheduledGraph& scheduled) {
+ PrintScheduledGraph(os, scheduled.schedule);
+ return os;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/graph-visualizer.h b/deps/v8/src/compiler/graph-visualizer.h
index 356dd5e017..4b1c535549 100644
--- a/deps/v8/src/compiler/graph-visualizer.h
+++ b/deps/v8/src/compiler/graph-visualizer.h
@@ -48,7 +48,12 @@ struct AsC1VCompilation {
const CompilationInfo* info_;
};
+struct AsScheduledGraph {
+ explicit AsScheduledGraph(const Schedule* schedule) : schedule(schedule) {}
+ const Schedule* schedule;
+};
+std::ostream& operator<<(std::ostream& os, const AsScheduledGraph& scheduled);
struct AsC1V {
AsC1V(const char* phase, const Schedule* schedule,
const SourcePositionTable* positions = nullptr,
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index a574769cd4..dcb8184e55 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -4,6 +4,8 @@
#include "src/compiler/code-generator.h"
+#include "src/assembler-inl.h"
+#include "src/callable.h"
#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
@@ -11,6 +13,7 @@
#include "src/compiler/osr.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+#include "src/heap/heap-inl.h"
#include "src/ia32/assembler-ia32.h"
#include "src/ia32/macro-assembler-ia32.h"
@@ -41,10 +44,10 @@ class IA32OperandConverter : public InstructionOperandConverter {
Operand ToOperand(InstructionOperand* op, int extra = 0) {
if (op->IsRegister()) {
- DCHECK(extra == 0);
+ DCHECK_EQ(0, extra);
return Operand(ToRegister(op));
} else if (op->IsFPRegister()) {
- DCHECK(extra == 0);
+ DCHECK_EQ(0, extra);
return Operand(ToDoubleRegister(op));
}
DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
@@ -57,11 +60,6 @@ class IA32OperandConverter : public InstructionOperandConverter {
offset.offset() + extra);
}
- Operand OffsetOperand(InstructionOperand* op, int offset) {
- DCHECK(op->IsFPStackSlot());
- return ToOperand(op, offset);
- }
-
Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
if (constant.type() == Constant::kInt32 &&
@@ -253,6 +251,24 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
mode_(mode),
zone_(gen->zone()) {}
+ void SaveRegisters(RegList registers) {
+ DCHECK_LT(0, NumRegs(registers));
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ __ push(Register::from_code(i));
+ }
+ }
+ }
+
+ void RestoreRegisters(RegList registers) {
+ DCHECK_LT(0, NumRegs(registers));
+ for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
+ if ((registers >> i) & 1u) {
+ __ pop(Register::from_code(i));
+ }
+ }
+ }
+
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
@@ -260,15 +276,20 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, zero,
exit());
+ __ lea(scratch1_, operand_);
RememberedSetAction const remembered_set_action =
mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- __ lea(scratch1_, operand_);
+#ifdef V8_CSA_WRITE_BARRIER
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+#else
__ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
+#endif
}
private:
@@ -329,7 +350,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
RelocInfo::Mode rmode_buffer) \
: OutOfLineCode(gen), \
result_(result), \
- buffer_reg_({-1}), \
+ buffer_reg_(no_reg), \
buffer_int_(buffer), \
index1_(index1), \
index2_(index2), \
@@ -433,7 +454,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
RelocInfo::Mode rmode_buffer) \
: OutOfLineCode(gen), \
result_(result), \
- buffer_reg_({-1}), \
+ buffer_reg_(no_reg), \
buffer_int_(buffer), \
index1_(index1), \
index2_(index2), \
@@ -443,7 +464,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
\
void Generate() final { \
Label oob; \
- bool need_cache = !result_.is(index1_); \
+ bool need_cache = result_ != index1_; \
if (need_cache) __ push(index1_); \
__ lea(index1_, Operand(index1_, index2_)); \
__ cmp(index1_, Immediate(reinterpret_cast<Address>(length_), \
@@ -536,7 +557,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
XMMRegister value, RelocInfo::Mode rmode_length, \
RelocInfo::Mode rmode_buffer) \
: OutOfLineCode(gen), \
- buffer_reg_({-1}), \
+ buffer_reg_(no_reg), \
buffer_int_(buffer), \
index1_(index1), \
index2_(index2), \
@@ -635,7 +656,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Value value, RelocInfo::Mode rmode_length, \
RelocInfo::Mode rmode_buffer) \
: OutOfLineCode(gen), \
- buffer_reg_({-1}), \
+ buffer_reg_(no_reg), \
buffer_int_(buffer), \
index1_(index1), \
index2_(index2), \
@@ -899,6 +920,31 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check if the code object is marked for deoptimization. If it is, then it
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. load the address of the current instruction;
+// 2. read from memory the word that contains that bit, which can be found in
+// the first set of flags ({kKindSpecificFlags1Offset});
+// 3. test kMarkedForDeoptimizationBit in those flags; and
+// 4. if it is not zero then it jumps to the builtin.
+void CodeGenerator::BailoutIfDeoptimized() {
+ Label current;
+ __ call(&current);
+ int pc = __ pc_offset();
+ __ bind(&current);
+ // In order to get the address of the current instruction, we first need
+ // to use a call and then use a pop, thus pushing the return address to
+ // the stack and then popping it into the register.
+ __ pop(ecx);
+ int offset = Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc);
+ __ test(Operand(ecx, offset),
+ Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ Handle<Code> code = isolate()->builtins()->builtin_handle(
+ Builtins::kCompileLazyDeoptimizedCode);
+ __ j(not_zero, code, RelocInfo::CODE_TARGET);
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -967,13 +1013,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchSaveCallerRegisters: {
+ fp_mode_ =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// kReturnRegister0 should have been saved before entering the stub.
- __ PushCallerSaved(kSaveFPRegs, kReturnRegister0);
+ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
+ DCHECK_EQ(0, bytes % kPointerSize);
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ DCHECK(!caller_registers_saved_);
+ caller_registers_saved_ = true;
break;
}
case kArchRestoreCallerRegisters: {
+ DCHECK(fp_mode_ ==
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// Don't overwrite the returned value.
- __ PopCallerSaved(kSaveFPRegs, kReturnRegister0);
+ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ DCHECK(caller_registers_saved_);
+ caller_registers_saved_ = false;
break;
}
case kArchPrepareTailCall:
@@ -989,7 +1050,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CallCFunction(func, num_parameters);
}
frame_access_state()->SetFrameAccessToDefault();
+ // Ideally, we should decrement SP delta to match the change of stack
+ // pointer in CallCFunction. However, for certain architectures (e.g.
+ // ARM), there may be more strict alignment requirement, causing old SP
+ // to be saved on the stack. In those cases, we can not calculate the SP
+ // delta statically.
frame_access_state()->ClearSPDelta();
+ if (caller_registers_saved_) {
+ // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
+ // Here, we assume the sequence to be:
+ // kArchSaveCallerRegisters;
+ // kArchCallCFunction;
+ // kArchRestoreCallerRegisters;
+ int bytes =
+ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ }
break;
}
case kArchJmp:
@@ -1007,7 +1083,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchDebugAbort:
- DCHECK(i.InputRegister(0).is(edx));
+ DCHECK(i.InputRegister(0) == edx);
if (!frame_access_state()->has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
@@ -1082,12 +1158,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
- Register base;
- if (offset.from_stack_pointer()) {
- base = esp;
- } else {
- base = ebp;
- }
+ Register base = offset.from_stack_pointer() ? esp : ebp;
__ lea(i.OutputRegister(), Operand(base, offset.offset()));
break;
}
@@ -1141,7 +1212,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kIeee754Float64Pow: {
// TODO(bmeurer): Improve integration of the stub.
- if (!i.InputDoubleRegister(1).is(xmm2)) {
+ if (i.InputDoubleRegister(1) != xmm2) {
__ movaps(xmm2, i.InputDoubleRegister(0));
__ movaps(xmm1, i.InputDoubleRegister(1));
} else {
@@ -1813,7 +1884,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// in these cases are faster based on measurements.
if (mode == kMode_MI) {
__ Move(i.OutputRegister(), Immediate(i.InputInt32(0)));
- } else if (i.InputRegister(0).is(i.OutputRegister())) {
+ } else if (i.InputRegister(0) == i.OutputRegister()) {
if (mode == kMode_MRI) {
int32_t constant_summand = i.InputInt32(1);
if (constant_summand > 0) {
@@ -1822,7 +1893,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub(i.OutputRegister(), Immediate(-constant_summand));
}
} else if (mode == kMode_MR1) {
- if (i.InputRegister(1).is(i.OutputRegister())) {
+ if (i.InputRegister(1) == i.OutputRegister()) {
__ shl(i.OutputRegister(), 1);
} else {
__ add(i.OutputRegister(), i.InputRegister(1));
@@ -1837,7 +1908,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lea(i.OutputRegister(), i.MemoryOperand());
}
} else if (mode == kMode_MR1 &&
- i.InputRegister(1).is(i.OutputRegister())) {
+ i.InputRegister(1) == i.OutputRegister()) {
__ add(i.OutputRegister(), i.InputRegister(0));
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
@@ -1929,7 +2000,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32I32x4Neg: {
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(0);
- Register ireg = {dst.code()};
+ Register ireg = Register::from_code(dst.code());
if (src.is_reg(ireg)) {
__ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ Psignd(dst, kScratchDoubleReg);
@@ -2386,6 +2457,10 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ jmp(flabel);
}
+void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
+ BranchInfo* branch) {
+ AssembleArchBranch(instr, branch);
+}
void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
@@ -2427,7 +2502,10 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- __ Ret();
+ CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
+ size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+ // Use ecx as a scratch register, we return anyways immediately.
+ __ Ret(static_cast<int>(pop_size), ecx);
} else {
gen_->AssembleSourcePosition(instr_);
__ Call(__ isolate()->builtins()->builtin_handle(trap_id),
@@ -2788,7 +2866,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret(static_cast<int>(pop_size), ecx);
} else {
Register pop_reg = g.ToRegister(pop);
- Register scratch_reg = pop_reg.is(ecx) ? edx : ecx;
+ Register scratch_reg = pop_reg == ecx ? edx : ecx;
__ pop(scratch_reg);
__ lea(esp, Operand(esp, pop_reg, times_4, static_cast<int>(pop_size)));
__ jmp(scratch_reg);
@@ -2858,7 +2936,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else {
DCHECK(destination->IsFPStackSlot());
Operand dst0 = g.ToOperand(destination);
- Operand dst1 = g.OffsetOperand(destination, kPointerSize);
+ Operand dst1 = g.ToOperand(destination, kPointerSize);
__ Move(dst0, Immediate(lower));
__ Move(dst1, Immediate(upper));
}
@@ -2985,8 +3063,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ movsd(kScratchDoubleReg, dst0); // Save dst in scratch register.
__ push(src0); // Then use stack to copy src to destination.
__ pop(dst0);
- __ push(g.OffsetOperand(source, kPointerSize));
- __ pop(g.OffsetOperand(destination, kPointerSize));
+ __ push(g.ToOperand(source, kPointerSize));
+ __ pop(g.ToOperand(destination, kPointerSize));
__ movsd(src0, kScratchDoubleReg);
} else if (rep == MachineRepresentation::kFloat32) {
__ movss(kScratchDoubleReg, dst0); // Save dst in scratch register.
@@ -2998,12 +3076,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ movups(kScratchDoubleReg, dst0); // Save dst in scratch register.
__ push(src0); // Then use stack to copy src to destination.
__ pop(dst0);
- __ push(g.OffsetOperand(source, kPointerSize));
- __ pop(g.OffsetOperand(destination, kPointerSize));
- __ push(g.OffsetOperand(source, 2 * kPointerSize));
- __ pop(g.OffsetOperand(destination, 2 * kPointerSize));
- __ push(g.OffsetOperand(source, 3 * kPointerSize));
- __ pop(g.OffsetOperand(destination, 3 * kPointerSize));
+ __ push(g.ToOperand(source, kPointerSize));
+ __ pop(g.ToOperand(destination, kPointerSize));
+ __ push(g.ToOperand(source, 2 * kPointerSize));
+ __ pop(g.ToOperand(destination, 2 * kPointerSize));
+ __ push(g.ToOperand(source, 3 * kPointerSize));
+ __ pop(g.ToOperand(destination, 3 * kPointerSize));
__ movups(src0, kScratchDoubleReg);
}
} else {
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 2c0f8a4eb3..eb7a7d7cd5 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -1166,7 +1166,7 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
InstructionCode opcode, Node* left,
InstructionOperand right,
FlagsContinuation* cont) {
- DCHECK(left->opcode() == IrOpcode::kLoad);
+ DCHECK_EQ(IrOpcode::kLoad, left->opcode());
IA32OperandGenerator g(selector);
size_t input_count = 0;
InstructionOperand inputs[6];
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
index 9431687002..666fa60d95 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -90,10 +90,10 @@ InstructionScheduler::InstructionScheduler(Zone* zone,
void InstructionScheduler::StartBlock(RpoNumber rpo) {
DCHECK(graph_.empty());
- DCHECK(last_side_effect_instr_ == nullptr);
+ DCHECK_NULL(last_side_effect_instr_);
DCHECK(pending_loads_.empty());
- DCHECK(last_live_in_reg_marker_ == nullptr);
- DCHECK(last_deopt_or_trap_ == nullptr);
+ DCHECK_NULL(last_live_in_reg_marker_);
+ DCHECK_NULL(last_deopt_or_trap_);
DCHECK(operands_map_.empty());
sequence()->StartBlock(rpo);
}
@@ -402,7 +402,7 @@ void InstructionScheduler::ComputeTotalLatencies() {
int max_latency = 0;
for (ScheduleGraphNode* successor : node->successors()) {
- DCHECK(successor->total_latency() != -1);
+ DCHECK_NE(-1, successor->total_latency());
if (successor->total_latency() > max_latency) {
max_latency = successor->total_latency();
}
diff --git a/deps/v8/src/compiler/instruction-scheduler.h b/deps/v8/src/compiler/instruction-scheduler.h
index db2894a92a..3d7b88f8b6 100644
--- a/deps/v8/src/compiler/instruction-scheduler.h
+++ b/deps/v8/src/compiler/instruction-scheduler.h
@@ -58,7 +58,7 @@ class InstructionScheduler final : public ZoneObject {
// Record that we have scheduled one of the predecessors of this node.
void DropUnscheduledPredecessor() {
- DCHECK(unscheduled_predecessors_count_ > 0);
+ DCHECK_LT(0, unscheduled_predecessors_count_);
unscheduled_predecessors_count_--;
}
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index 7bb62fd253..4e07049de7 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -15,7 +15,6 @@
#include "src/compiler/schedule.h"
#include "src/compiler/state-values-utils.h"
#include "src/deoptimizer.h"
-#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -530,7 +529,7 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
switch (input->opcode()) {
case IrOpcode::kArgumentsElementsState: {
- values->PushArgumentsElements(IsRestOf(input->op()));
+ values->PushArgumentsElements(ArgumentsStateTypeOf(input->op()));
// The elements backing store of an arguments object participates in the
// duplicate object counting, but can itself never appear duplicated.
DCHECK_EQ(StateObjectDeduplicator::kNotDuplicated,
@@ -539,7 +538,7 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
return 0;
}
case IrOpcode::kArgumentsLengthState: {
- values->PushArgumentsLength(IsRestOf(input->op()));
+ values->PushArgumentsLength(ArgumentsStateTypeOf(input->op()));
return 0;
}
case IrOpcode::kObjectState: {
@@ -549,7 +548,7 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
case IrOpcode::kObjectId: {
size_t id = deduplicator->GetObjectId(input);
if (id == StateObjectDeduplicator::kNotDuplicated) {
- DCHECK(input->opcode() == IrOpcode::kTypedObjectState);
+ DCHECK_EQ(IrOpcode::kTypedObjectState, input->opcode());
size_t entries = 0;
id = deduplicator->InsertObject(input);
StateValueList* nested = values->PushRecursiveField(zone, id);
@@ -707,7 +706,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), nullptr);
for (Edge const edge : call->use_edges()) {
if (!NodeProperties::IsValueEdge(edge)) continue;
- DCHECK(edge.from()->opcode() == IrOpcode::kProjection);
+ DCHECK_EQ(IrOpcode::kProjection, edge.from()->opcode());
size_t const index = ProjectionIndexOf(edge.from()->op());
DCHECK_LT(index, buffer->output_nodes.size());
DCHECK(!buffer->output_nodes[index]);
@@ -820,7 +819,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
bool call_tail = (flags & kCallTail) != 0;
for (size_t index = 0; index < input_count; ++iter, ++index) {
DCHECK(iter != call->inputs().end());
- DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
+ DCHECK_NE(IrOpcode::kFrameState, (*iter)->op()->opcode());
if (index == 0) continue; // The first argument (callee) is already done.
LinkageLocation location = buffer->descriptor->GetInputLocation(index);
@@ -863,7 +862,9 @@ bool InstructionSelector::IsSourcePositionUsed(Node* node) {
node->opcode() == IrOpcode::kCall ||
node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
node->opcode() == IrOpcode::kTrapIf ||
- node->opcode() == IrOpcode::kTrapUnless);
+ node->opcode() == IrOpcode::kTrapUnless ||
+ node->opcode() == IrOpcode::kProtectedLoad ||
+ node->opcode() == IrOpcode::kProtectedStore);
}
void InstructionSelector::VisitBlock(BasicBlock* block) {
@@ -2474,6 +2475,7 @@ void InstructionSelector::VisitOsrValue(Node* node) {
void InstructionSelector::VisitPhi(Node* node) {
const int input_count = node->op()->ValueInputCount();
+ DCHECK_EQ(input_count, current_block_->PredecessorCount());
PhiInstruction* phi = new (instruction_zone())
PhiInstruction(instruction_zone(), GetVirtualRegister(node),
static_cast<size_t>(input_count));
@@ -2512,7 +2514,7 @@ void InstructionSelector::VisitProjection(Node* node) {
if (ProjectionIndexOf(node->op()) == 0u) {
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
} else {
- DCHECK(ProjectionIndexOf(node->op()) == 1u);
+ DCHECK_EQ(1u, ProjectionIndexOf(node->op()));
MarkAsUsed(value);
}
break;
@@ -2598,10 +2600,13 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
void InstructionSelector::VisitCallWithCallerSavedRegisters(
Node* node, BasicBlock* handler) {
OperandGenerator g(this);
-
- Emit(kArchSaveCallerRegisters, g.NoOutput());
+ const auto fp_mode = CallDescriptorOf(node->op())->get_save_fp_mode();
+ Emit(kArchSaveCallerRegisters | MiscField::encode(static_cast<int>(fp_mode)),
+ g.NoOutput());
VisitCall(node, handler);
- Emit(kArchRestoreCallerRegisters, g.NoOutput());
+ Emit(kArchRestoreCallerRegisters |
+ MiscField::encode(static_cast<int>(fp_mode)),
+ g.NoOutput());
}
void InstructionSelector::VisitTailCall(Node* node) {
@@ -2790,7 +2795,7 @@ bool InstructionSelector::CanProduceSignalingNaN(Node* node) {
FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
Node* state) {
- DCHECK(state->opcode() == IrOpcode::kFrameState);
+ DCHECK_EQ(IrOpcode::kFrameState, state->opcode());
DCHECK_EQ(kFrameStateInputCount, state->InputCount());
FrameStateInfo state_info = OpParameter<FrameStateInfo>(state);
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index 29ec556353..b1b322e1ee 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -861,7 +861,7 @@ int InstructionSequence::AddInstruction(Instruction* instr) {
instr->set_block(current_block_);
instructions_.push_back(instr);
if (instr->NeedsReferenceMap()) {
- DCHECK(instr->reference_map() == nullptr);
+ DCHECK_NULL(instr->reference_map());
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
reference_map->set_instruction_position(index);
instr->set_reference_map(reference_map);
@@ -993,7 +993,7 @@ const RegisterConfiguration*
const RegisterConfiguration*
InstructionSequence::RegisterConfigurationForTesting() {
- DCHECK(registerConfigurationForTesting_ != nullptr);
+ DCHECK_NOT_NULL(registerConfigurationForTesting_);
return registerConfigurationForTesting_;
}
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index d69826c255..72a8b0b06f 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -19,13 +19,11 @@
#include "src/globals.h"
#include "src/macro-assembler.h"
#include "src/register-configuration.h"
+#include "src/source-position.h"
#include "src/zone/zone-allocator.h"
namespace v8 {
namespace internal {
-
-class SourcePosition;
-
namespace compiler {
class Schedule;
@@ -775,7 +773,7 @@ class ReferenceMap final : public ZoneObject {
int instruction_position() const { return instruction_position_; }
void set_instruction_position(int pos) {
- DCHECK(instruction_position_ == -1);
+ DCHECK_EQ(-1, instruction_position_);
instruction_position_ = pos;
}
@@ -846,7 +844,7 @@ class V8_EXPORT_PRIVATE Instruction final {
size_t output_count, InstructionOperand* outputs,
size_t input_count, InstructionOperand* inputs,
size_t temp_count, InstructionOperand* temps) {
- DCHECK(opcode >= 0);
+ DCHECK_LE(0, opcode);
DCHECK(output_count == 0 || outputs != nullptr);
DCHECK(input_count == 0 || inputs != nullptr);
DCHECK(temp_count == 0 || temps != nullptr);
@@ -1123,16 +1121,16 @@ class StateValueDescriptor {
StateValueDescriptor()
: kind_(StateValueKind::kPlain), type_(MachineType::AnyTagged()) {}
- static StateValueDescriptor ArgumentsElements(bool is_rest) {
+ static StateValueDescriptor ArgumentsElements(ArgumentsStateType type) {
StateValueDescriptor descr(StateValueKind::kArgumentsElements,
MachineType::AnyTagged());
- descr.is_rest_ = is_rest;
+ descr.args_type_ = type;
return descr;
}
- static StateValueDescriptor ArgumentsLength(bool is_rest) {
+ static StateValueDescriptor ArgumentsLength(ArgumentsStateType type) {
StateValueDescriptor descr(StateValueKind::kArgumentsLength,
MachineType::AnyTagged());
- descr.is_rest_ = is_rest;
+ descr.args_type_ = type;
return descr;
}
static StateValueDescriptor Plain(MachineType type) {
@@ -1171,10 +1169,10 @@ class StateValueDescriptor {
kind_ == StateValueKind::kNested);
return id_;
}
- int is_rest() const {
+ ArgumentsStateType arguments_type() const {
DCHECK(kind_ == StateValueKind::kArgumentsElements ||
kind_ == StateValueKind::kArgumentsLength);
- return is_rest_;
+ return args_type_;
}
private:
@@ -1185,7 +1183,7 @@ class StateValueDescriptor {
MachineType type_;
union {
size_t id_;
- bool is_rest_;
+ ArgumentsStateType args_type_;
};
};
@@ -1245,11 +1243,11 @@ class StateValueList {
nested_.push_back(nested);
return nested;
}
- void PushArgumentsElements(bool is_rest) {
- fields_.push_back(StateValueDescriptor::ArgumentsElements(is_rest));
+ void PushArgumentsElements(ArgumentsStateType type) {
+ fields_.push_back(StateValueDescriptor::ArgumentsElements(type));
}
- void PushArgumentsLength(bool is_rest) {
- fields_.push_back(StateValueDescriptor::ArgumentsLength(is_rest));
+ void PushArgumentsLength(ArgumentsStateType type) {
+ fields_.push_back(StateValueDescriptor::ArgumentsLength(type));
}
void PushDuplicate(size_t id) {
fields_.push_back(StateValueDescriptor::Duplicate(id));
@@ -1367,15 +1365,15 @@ class V8_EXPORT_PRIVATE InstructionBlock final
// Instruction indexes (used by the register allocator).
int first_instruction_index() const {
- DCHECK(code_start_ >= 0);
- DCHECK(code_end_ > 0);
- DCHECK(code_end_ >= code_start_);
+ DCHECK_LE(0, code_start_);
+ DCHECK_LT(0, code_end_);
+ DCHECK_GE(code_end_, code_start_);
return code_start_;
}
int last_instruction_index() const {
- DCHECK(code_start_ >= 0);
- DCHECK(code_end_ > 0);
- DCHECK(code_end_ >= code_start_);
+ DCHECK_LE(0, code_start_);
+ DCHECK_LT(0, code_end_);
+ DCHECK_GE(code_end_, code_start_);
return code_end_ - 1;
}
@@ -1530,8 +1528,8 @@ class V8_EXPORT_PRIVATE InstructionSequence final
}
Instruction* InstructionAt(int index) const {
- DCHECK(index >= 0);
- DCHECK(index < static_cast<int>(instructions_.size()));
+ DCHECK_LE(0, index);
+ DCHECK_GT(instructions_.size(), index);
return instructions_[index];
}
@@ -1546,7 +1544,7 @@ class V8_EXPORT_PRIVATE InstructionSequence final
int AddConstant(int virtual_register, Constant constant) {
// TODO(titzer): allow RPO numbers as constants?
- DCHECK(constant.type() != Constant::kRpoNumber);
+ DCHECK_NE(Constant::kRpoNumber, constant.type());
DCHECK(virtual_register >= 0 && virtual_register < next_virtual_register_);
DCHECK(constants_.find(virtual_register) == constants_.end());
constants_.insert(std::make_pair(virtual_register, constant));
@@ -1578,8 +1576,8 @@ class V8_EXPORT_PRIVATE InstructionSequence final
return Constant(op->inline_value());
case ImmediateOperand::INDEXED: {
int index = op->indexed_value();
- DCHECK(index >= 0);
- DCHECK(index < static_cast<int>(immediates_.size()));
+ DCHECK_LE(0, index);
+ DCHECK_GT(immediates_.size(), index);
return immediates_[index];
}
}
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 19db874ca6..4710f35dcc 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/int64-lowering.h"
+
#include "src/compiler/common-operator.h"
#include "src/compiler/diamond.h"
#include "src/compiler/graph.h"
@@ -10,11 +11,8 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
-
#include "src/compiler/node.h"
#include "src/compiler/wasm-compiler.h"
-#include "src/objects-inl.h"
-#include "src/wasm/wasm-module.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -148,7 +146,7 @@ void Int64Lowering::LowerNode(Node* node) {
if (node->opcode() == IrOpcode::kLoad) {
rep = LoadRepresentationOf(node->op()).representation();
} else {
- DCHECK(node->opcode() == IrOpcode::kUnalignedLoad);
+ DCHECK_EQ(IrOpcode::kUnalignedLoad, node->opcode());
rep = UnalignedLoadRepresentationOf(node->op()).representation();
}
@@ -163,7 +161,7 @@ void Int64Lowering::LowerNode(Node* node) {
if (node->opcode() == IrOpcode::kLoad) {
load_op = machine()->Load(MachineType::Int32());
} else {
- DCHECK(node->opcode() == IrOpcode::kUnalignedLoad);
+ DCHECK_EQ(IrOpcode::kUnalignedLoad, node->opcode());
load_op = machine()->UnalignedLoad(MachineType::Int32());
}
@@ -193,7 +191,7 @@ void Int64Lowering::LowerNode(Node* node) {
if (node->opcode() == IrOpcode::kStore) {
rep = StoreRepresentationOf(node->op()).representation();
} else {
- DCHECK(node->opcode() == IrOpcode::kUnalignedStore);
+ DCHECK_EQ(IrOpcode::kUnalignedStore, node->opcode());
rep = UnalignedStoreRepresentationOf(node->op());
}
@@ -218,7 +216,7 @@ void Int64Lowering::LowerNode(Node* node) {
store_op = machine()->Store(StoreRepresentation(
MachineRepresentation::kWord32, write_barrier_kind));
} else {
- DCHECK(node->opcode() == IrOpcode::kUnalignedStore);
+ DCHECK_EQ(IrOpcode::kUnalignedStore, node->opcode());
store_op = machine()->UnalignedStore(MachineRepresentation::kWord32);
}
@@ -257,7 +255,7 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kParameter: {
- DCHECK(node->InputCount() == 1);
+ DCHECK_EQ(1, node->InputCount());
// Only exchange the node if the parameter count actually changed. We do
// not even have to do the default lowering because the the start node,
// the only input of a parameter node, only changes if the parameter count
@@ -265,7 +263,17 @@ void Int64Lowering::LowerNode(Node* node) {
if (GetParameterCountAfterLowering(signature()) !=
static_cast<int>(signature()->parameter_count())) {
int old_index = ParameterIndexOf(node->op());
+ // TODO(wasm): Make this part not wasm specific.
+ // Prevent special lowering of the WasmContext parameter.
+ if (old_index == kWasmContextParameterIndex) {
+ DefaultLowering(node);
+ break;
+ }
+ // Adjust old_index to be compliant with the signature.
+ --old_index;
int new_index = GetParameterIndexAfterLowering(signature(), old_index);
+ // Adjust new_index to consider the WasmContext parameter.
+ ++new_index;
NodeProperties::ChangeOp(node, common()->Parameter(new_index));
Node* high_node = nullptr;
@@ -313,7 +321,7 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kWord64And: {
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
@@ -327,14 +335,14 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kTruncateInt64ToInt32: {
- DCHECK(node->InputCount() == 1);
+ DCHECK_EQ(1, node->InputCount());
Node* input = node->InputAt(0);
ReplaceNode(node, GetReplacementLow(input), nullptr);
node->NullAllInputs();
break;
}
case IrOpcode::kInt64Add: {
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
Node* right = node->InputAt(1);
node->ReplaceInput(1, GetReplacementLow(right));
@@ -354,7 +362,7 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kInt64Sub: {
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
Node* right = node->InputAt(1);
node->ReplaceInput(1, GetReplacementLow(right));
@@ -374,7 +382,7 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kInt64Mul: {
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
Node* right = node->InputAt(1);
node->ReplaceInput(1, GetReplacementLow(right));
@@ -394,7 +402,7 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kWord64Or: {
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
@@ -408,7 +416,7 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kWord64Xor: {
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
@@ -424,7 +432,7 @@ void Int64Lowering::LowerNode(Node* node) {
case IrOpcode::kWord64Shl: {
// TODO(turbofan): if the shift count >= 32, then we can set the low word
// of the output to 0 and just calculate the high word.
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
Node* shift = node->InputAt(1);
if (HasReplacementLow(shift)) {
// We do not have to care about the high word replacement, because
@@ -448,7 +456,7 @@ void Int64Lowering::LowerNode(Node* node) {
case IrOpcode::kWord64Shr: {
// TODO(turbofan): if the shift count >= 32, then we can set the low word
// of the output to 0 and just calculate the high word.
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
Node* shift = node->InputAt(1);
if (HasReplacementLow(shift)) {
// We do not have to care about the high word replacement, because
@@ -472,7 +480,7 @@ void Int64Lowering::LowerNode(Node* node) {
case IrOpcode::kWord64Sar: {
// TODO(turbofan): if the shift count >= 32, then we can set the low word
// of the output to 0 and just calculate the high word.
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
Node* shift = node->InputAt(1);
if (HasReplacementLow(shift)) {
// We do not have to care about the high word replacement, because
@@ -494,7 +502,7 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kWord64Equal: {
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
@@ -533,7 +541,7 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kChangeInt32ToInt64: {
- DCHECK(node->InputCount() == 1);
+ DCHECK_EQ(1, node->InputCount());
Node* input = node->InputAt(0);
if (HasReplacementLow(input)) {
input = GetReplacementLow(input);
@@ -547,7 +555,7 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kChangeUint32ToUint64: {
- DCHECK(node->InputCount() == 1);
+ DCHECK_EQ(1, node->InputCount());
Node* input = node->InputAt(0);
if (HasReplacementLow(input)) {
input = GetReplacementLow(input);
@@ -557,7 +565,7 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kBitcastInt64ToFloat64: {
- DCHECK(node->InputCount() == 1);
+ DCHECK_EQ(1, node->InputCount());
Node* input = node->InputAt(0);
Node* stack_slot = graph()->NewNode(
machine()->StackSlot(MachineRepresentation::kWord64));
@@ -589,7 +597,7 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kBitcastFloat64ToInt64: {
- DCHECK(node->InputCount() == 1);
+ DCHECK_EQ(1, node->InputCount());
Node* input = node->InputAt(0);
if (HasReplacementLow(input)) {
input = GetReplacementLow(input);
@@ -618,7 +626,7 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kWord64Ror: {
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
Node* input = node->InputAt(0);
Node* shift = HasReplacementLow(node->InputAt(1))
? GetReplacementLow(node->InputAt(1))
@@ -726,7 +734,7 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kWord64Clz: {
- DCHECK(node->InputCount() == 1);
+ DCHECK_EQ(1, node->InputCount());
Node* input = node->InputAt(0);
Diamond d(
graph(), common(),
@@ -744,7 +752,7 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kWord64Ctz: {
- DCHECK(node->InputCount() == 1);
+ DCHECK_EQ(1, node->InputCount());
DCHECK(machine()->Word32Ctz().IsSupported());
Node* input = node->InputAt(0);
Diamond d(
@@ -763,7 +771,7 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kWord64Popcnt: {
- DCHECK(node->InputCount() == 1);
+ DCHECK_EQ(1, node->InputCount());
Node* input = node->InputAt(0);
// We assume that a Word64Popcnt node only has been created if
// Word32Popcnt is actually supported.
@@ -820,7 +828,7 @@ void Int64Lowering::LowerNode(Node* node) {
void Int64Lowering::LowerComparison(Node* node, const Operator* high_word_op,
const Operator* low_word_op) {
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Node* replacement = graph()->NewNode(
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index ed0fa321a4..597587f80b 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -608,7 +608,7 @@ Reduction JSBuiltinReducer::ReduceTypedArrayIteratorNext(
graph()->NewNode(javascript()->CreateKeyValueArray(), index,
value, context, etrue2);
} else {
- DCHECK(kind == IterationKind::kValues);
+ DCHECK_EQ(IterationKind::kValues, kind);
vtrue2 = value;
}
}
@@ -663,6 +663,77 @@ Reduction JSBuiltinReducer::ReduceTypedArrayIteratorNext(
return Replace(value);
}
+// ES #sec-get-%typedarray%.prototype-@@tostringtag
+Reduction JSBuiltinReducer::ReduceTypedArrayToStringTag(Node* node) {
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ NodeVector values(graph()->zone());
+ NodeVector effects(graph()->zone());
+ NodeVector controls(graph()->zone());
+
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+ control =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ values.push_back(jsgraph()->UndefinedConstant());
+ effects.push_back(effect);
+ controls.push_back(graph()->NewNode(common()->IfTrue(), control));
+
+ control = graph()->NewNode(common()->IfFalse(), control);
+ Node* receiver_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+ Node* receiver_bit_field2 = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapBitField2()), receiver_map,
+ effect, control);
+ Node* receiver_elements_kind = graph()->NewNode(
+ simplified()->NumberShiftRightLogical(),
+ graph()->NewNode(simplified()->NumberBitwiseAnd(), receiver_bit_field2,
+ jsgraph()->Constant(Map::ElementsKindBits::kMask)),
+ jsgraph()->Constant(Map::ElementsKindBits::kShift));
+
+ // Offset the elements kind by FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND,
+ // so that the branch cascade below is turned into a simple table
+ // switch by the ControlFlowOptimizer later.
+ receiver_elements_kind = graph()->NewNode(
+ simplified()->NumberSubtract(), receiver_elements_kind,
+ jsgraph()->Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND));
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ do { \
+ Node* check = graph()->NewNode( \
+ simplified()->NumberEqual(), receiver_elements_kind, \
+ jsgraph()->Constant(TYPE##_ELEMENTS - \
+ FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)); \
+ control = graph()->NewNode(common()->Branch(), check, control); \
+ values.push_back(jsgraph()->HeapConstant( \
+ factory()->InternalizeUtf8String(#Type "Array"))); \
+ effects.push_back(effect); \
+ controls.push_back(graph()->NewNode(common()->IfTrue(), control)); \
+ control = graph()->NewNode(common()->IfFalse(), control); \
+ } while (false);
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ values.push_back(jsgraph()->UndefinedConstant());
+ effects.push_back(effect);
+ controls.push_back(control);
+
+ int const count = static_cast<int>(controls.size());
+ control = graph()->NewNode(common()->Merge(count), count, &controls.front());
+ effects.push_back(control);
+ effect =
+ graph()->NewNode(common()->EffectPhi(count), count + 1, &effects.front());
+ values.push_back(control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count),
+ count + 1, &values.front());
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
Reduction JSBuiltinReducer::ReduceArrayIteratorNext(Node* node) {
Handle<Map> receiver_map;
if (GetMapWitness(node).ToHandle(&receiver_map)) {
@@ -930,12 +1001,11 @@ Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
// ES6 section 22.1.3.18 Array.prototype.push ( )
Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
- // We need exactly target, receiver and value parameters.
- if (node->op()->ValueInputCount() != 3) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ int const num_values = node->op()->ValueInputCount() - 2;
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- Node* value = NodeProperties::GetValueInput(node, 2);
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
@@ -945,6 +1015,12 @@ Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
// TODO(turbofan): Relax this to deal with multiple {receiver} maps.
Handle<Map> receiver_map = receiver_maps[0];
if (CanInlineArrayResizeOperation(receiver_map)) {
+ // Collect the value inputs to push.
+ std::vector<Node*> values(num_values);
+ for (int i = 0; i < num_values; ++i) {
+ values[i] = NodeProperties::GetValueInput(node, 2 + i);
+ }
+
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
dependencies()->AssumePropertyCell(factory()->array_protector());
@@ -966,22 +1042,24 @@ Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
}
}
- // TODO(turbofan): Perform type checks on the {value}. We are not guaranteed
- // to learn from these checks in case they fail, as the witness (i.e. the
- // map check from the LoadIC for a.push) might not be executed in baseline
- // code (after we stored the value in the builtin and thereby changed the
- // elements kind of a) before be decide to optimize this function again. We
- // currently don't have a proper way to deal with this; the proper solution
- // here is to learn on deopt, i.e. disable Array.prototype.push inlining
- // for this function.
- if (IsSmiElementsKind(receiver_map->elements_kind())) {
- value = effect =
- graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
- } else if (IsDoubleElementsKind(receiver_map->elements_kind())) {
- value = effect =
- graph()->NewNode(simplified()->CheckNumber(), value, effect, control);
- // Make sure we do not store signaling NaNs into double arrays.
- value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+ // TODO(turbofan): Perform type checks on the {values}. We are not
+ // guaranteed to learn from these checks in case they fail, as the witness
+ // (i.e. the map check from the LoadIC for a.push) might not be executed in
+ // baseline code (after we stored the value in the builtin and thereby
+ // changed the elements kind of a) before be decide to optimize this
+ // function again. We currently don't have a proper way to deal with this;
+ // the proper solution here is to learn on deopt, i.e. disable
+ // Array.prototype.push inlining for this function.
+ for (auto& value : values) {
+ if (IsSmiElementsKind(receiver_map->elements_kind())) {
+ value = effect =
+ graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
+ } else if (IsDoubleElementsKind(receiver_map->elements_kind())) {
+ value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
+ effect, control);
+ // Make sure we do not store signaling NaNs into double arrays.
+ value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+ }
}
// Load the "length" property of the {receiver}.
@@ -989,33 +1067,54 @@ Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
simplified()->LoadField(
AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
receiver, effect, control);
+ Node* value = length;
- // Load the elements backing store of the {receiver}.
- Node* elements = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
- effect, control);
-
- // TODO(turbofan): Check if we need to grow the {elements} backing store.
- // This will deopt if we cannot grow the array further, and we currently
- // don't necessarily learn from it. See the comment on the value type check
- // above.
- GrowFastElementsFlags flags = GrowFastElementsFlag::kArrayObject;
- if (IsDoubleElementsKind(receiver_map->elements_kind())) {
- flags |= GrowFastElementsFlag::kDoubleElements;
- }
- elements = effect =
- graph()->NewNode(simplified()->MaybeGrowFastElements(flags), receiver,
- elements, length, length, effect, control);
+ // Check if we have any {values} to push.
+ if (num_values > 0) {
+ // Compute the resulting "length" of the {receiver}.
+ Node* new_length = value = graph()->NewNode(
+ simplified()->NumberAdd(), length, jsgraph()->Constant(num_values));
- // Append the value to the {elements}.
- effect = graph()->NewNode(
- simplified()->StoreElement(
- AccessBuilder::ForFixedArrayElement(receiver_map->elements_kind())),
- elements, length, value, effect, control);
+ // Load the elements backing store of the {receiver}.
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, effect, control);
+ Node* elements_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ elements, effect, control);
+
+ // TODO(turbofan): Check if we need to grow the {elements} backing store.
+ // This will deopt if we cannot grow the array further, and we currently
+ // don't necessarily learn from it. See the comment on the value type
+ // check above.
+ GrowFastElementsMode mode =
+ IsDoubleElementsKind(receiver_map->elements_kind())
+ ? GrowFastElementsMode::kDoubleElements
+ : GrowFastElementsMode::kSmiOrObjectElements;
+ elements = effect = graph()->NewNode(
+ simplified()->MaybeGrowFastElements(mode), receiver, elements,
+ graph()->NewNode(simplified()->NumberAdd(), length,
+ jsgraph()->Constant(num_values - 1)),
+ elements_length, effect, control);
+
+ // Update the JSArray::length field. Since this is observable,
+ // there must be no other check after this.
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
+ receiver, new_length, effect, control);
- // Return the new length of the {receiver}.
- value = graph()->NewNode(simplified()->NumberAdd(), length,
- jsgraph()->OneConstant());
+ // Append the {values} to the {elements}.
+ for (int i = 0; i < num_values; ++i) {
+ Node* value = values[i];
+ Node* index = graph()->NewNode(simplified()->NumberAdd(), length,
+ jsgraph()->Constant(i));
+ effect = graph()->NewNode(
+ simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(
+ receiver_map->elements_kind())),
+ elements, index, value, effect, control);
+ }
+ }
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -1728,14 +1827,14 @@ Reduction JSBuiltinReducer::ReduceMapGet(Node* node) {
if (!HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE)) return NoChange();
- Node* storage = effect = graph()->NewNode(
+ Node* table = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), receiver,
effect, control);
- Node* index = effect = graph()->NewNode(
- simplified()->LookupHashStorageIndex(), storage, key, effect, control);
+ Node* entry = effect = graph()->NewNode(
+ simplified()->FindOrderedHashMapEntry(), table, key, effect, control);
- Node* check = graph()->NewNode(simplified()->NumberEqual(), index,
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), entry,
jsgraph()->MinusOneConstant());
Node* branch = graph()->NewNode(common()->Branch(), check, control);
@@ -1749,8 +1848,8 @@ Reduction JSBuiltinReducer::ReduceMapGet(Node* node) {
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* efalse = effect;
Node* vfalse = efalse = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()), storage,
- index, efalse, if_false);
+ simplified()->LoadElement(AccessBuilder::ForOrderedHashMapEntryValue()),
+ table, entry, efalse, if_false);
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
Node* value = graph()->NewNode(
@@ -1771,28 +1870,16 @@ Reduction JSBuiltinReducer::ReduceMapHas(Node* node) {
if (!HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE)) return NoChange();
- Node* storage = effect = graph()->NewNode(
+ Node* table = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), receiver,
effect, control);
Node* index = effect = graph()->NewNode(
- simplified()->LookupHashStorageIndex(), storage, key, effect, control);
+ simplified()->FindOrderedHashMapEntry(), table, key, effect, control);
- Node* check = graph()->NewNode(simplified()->NumberEqual(), index,
+ Node* value = graph()->NewNode(simplified()->NumberEqual(), index,
jsgraph()->MinusOneConstant());
- Node* branch = graph()->NewNode(common()->Branch(), check, control);
-
- // Key not found.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = jsgraph()->FalseConstant();
-
- // Key found.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = jsgraph()->TrueConstant();
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+ value = graph()->NewNode(simplified()->BooleanNot(), value);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -2397,6 +2484,52 @@ Reduction JSBuiltinReducer::ReduceObjectCreate(Node* node) {
return Replace(value);
}
+// ES #sec-object.is
+Reduction JSBuiltinReducer::ReduceObjectIs(Node* node) {
+ // TODO(turbofan): At some point we should probably introduce a new
+ // SameValue simplified operator (and also a StrictEqual simplified
+ // operator) and create unified handling in SimplifiedLowering.
+ JSCallReduction r(node);
+ if (r.GetJSCallArity() == 2 && r.left() == r.right()) {
+ // Object.is(x,x) => #true
+ Node* value = jsgraph()->TrueConstant();
+ return Replace(value);
+ } else if (r.InputsMatchTwo(Type::Unique(), Type::Unique())) {
+ // Object.is(x:Unique,y:Unique) => ReferenceEqual(x,y)
+ Node* left = r.GetJSCallInput(0);
+ Node* right = r.GetJSCallInput(1);
+ Node* value = graph()->NewNode(simplified()->ReferenceEqual(), left, right);
+ return Replace(value);
+ } else if (r.InputsMatchTwo(Type::MinusZero(), Type::Any())) {
+ // Object.is(x:MinusZero,y) => ObjectIsMinusZero(y)
+ Node* input = r.GetJSCallInput(1);
+ Node* value = graph()->NewNode(simplified()->ObjectIsMinusZero(), input);
+ return Replace(value);
+ } else if (r.InputsMatchTwo(Type::Any(), Type::MinusZero())) {
+ // Object.is(x,y:MinusZero) => ObjectIsMinusZero(x)
+ Node* input = r.GetJSCallInput(0);
+ Node* value = graph()->NewNode(simplified()->ObjectIsMinusZero(), input);
+ return Replace(value);
+ } else if (r.InputsMatchTwo(Type::NaN(), Type::Any())) {
+ // Object.is(x:NaN,y) => ObjectIsNaN(y)
+ Node* input = r.GetJSCallInput(1);
+ Node* value = graph()->NewNode(simplified()->ObjectIsNaN(), input);
+ return Replace(value);
+ } else if (r.InputsMatchTwo(Type::Any(), Type::NaN())) {
+ // Object.is(x,y:NaN) => ObjectIsNaN(x)
+ Node* input = r.GetJSCallInput(0);
+ Node* value = graph()->NewNode(simplified()->ObjectIsNaN(), input);
+ return Replace(value);
+ } else if (r.InputsMatchTwo(Type::String(), Type::String())) {
+ // Object.is(x:String,y:String) => StringEqual(x,y)
+ Node* left = r.GetJSCallInput(0);
+ Node* right = r.GetJSCallInput(1);
+ Node* value = graph()->NewNode(simplified()->StringEqual(), left, right);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
// ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
Reduction JSBuiltinReducer::ReduceStringFromCharCode(Node* node) {
JSCallReduction r(node);
@@ -2807,6 +2940,17 @@ Reduction JSBuiltinReducer::ReduceStringToUpperCaseIntl(Node* node) {
return NoChange();
}
+Reduction JSBuiltinReducer::ReduceArrayBufferIsView(Node* node) {
+ Node* value = node->op()->ValueInputCount() >= 3
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ RelaxEffectsAndControls(node);
+ node->ReplaceInput(0, value);
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->ObjectIsArrayBufferView());
+ return Changed(node);
+}
+
Reduction JSBuiltinReducer::ReduceArrayBufferViewAccessor(
Node* node, InstanceType instance_type, FieldAccess const& access) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -3017,6 +3161,9 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
case kObjectCreate:
reduction = ReduceObjectCreate(node);
break;
+ case kObjectIs:
+ reduction = ReduceObjectIs(node);
+ break;
case kSetEntries:
return ReduceCollectionIterator(
node, JS_SET_TYPE, Context::SET_KEY_VALUE_ITERATOR_MAP_INDEX);
@@ -3048,6 +3195,8 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceStringToLowerCaseIntl(node);
case kStringToUpperCaseIntl:
return ReduceStringToUpperCaseIntl(node);
+ case kArrayBufferIsView:
+ return ReduceArrayBufferIsView(node);
case kDataViewByteLength:
return ReduceArrayBufferViewAccessor(
node, JS_DATA_VIEW_TYPE,
@@ -3073,6 +3222,8 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceTypedArrayIterator(node, IterationKind::kKeys);
case kTypedArrayValues:
return ReduceTypedArrayIterator(node, IterationKind::kValues);
+ case kTypedArrayToStringTag:
+ return ReduceTypedArrayToStringTag(node);
default:
break;
}
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index 2f11dce302..d5bddcede5 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -39,6 +39,8 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction Reduce(Node* node) final;
private:
+ enum class ArrayIteratorKind { kArray, kTypedArray };
+
Reduction ReduceArrayIterator(Node* node, IterationKind kind);
Reduction ReduceTypedArrayIterator(Node* node, IterationKind kind);
Reduction ReduceArrayIterator(Handle<Map> receiver_map, Node* node,
@@ -49,6 +51,7 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
IterationKind kind);
Reduction ReduceTypedArrayIteratorNext(Handle<Map> iterator_map, Node* node,
IterationKind kind);
+ Reduction ReduceTypedArrayToStringTag(Node* node);
Reduction ReduceArrayIsArray(Node* node);
Reduction ReduceArrayPop(Node* node);
Reduction ReduceArrayPush(Node* node);
@@ -108,6 +111,7 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceNumberIsSafeInteger(Node* node);
Reduction ReduceNumberParseInt(Node* node);
Reduction ReduceObjectCreate(Node* node);
+ Reduction ReduceObjectIs(Node* node);
Reduction ReduceStringCharAt(Node* node);
Reduction ReduceStringCharCodeAt(Node* node);
Reduction ReduceStringConcat(Node* node);
@@ -117,6 +121,7 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceStringIteratorNext(Node* node);
Reduction ReduceStringToLowerCaseIntl(Node* node);
Reduction ReduceStringToUpperCaseIntl(Node* node);
+ Reduction ReduceArrayBufferIsView(Node* node);
Reduction ReduceArrayBufferViewAccessor(Node* node,
InstanceType instance_type,
FieldAccess const& access);
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index a807965618..d8fcf4553a 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -4,6 +4,7 @@
#include "src/compiler/js-call-reducer.h"
+#include "src/api.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/compilation-dependencies.h"
@@ -20,6 +21,62 @@ namespace v8 {
namespace internal {
namespace compiler {
+namespace {
+
+bool CanBePrimitive(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSCreate:
+ case IrOpcode::kJSCreateArguments:
+ case IrOpcode::kJSCreateArray:
+ case IrOpcode::kJSCreateClosure:
+ case IrOpcode::kJSCreateEmptyLiteralArray:
+ case IrOpcode::kJSCreateEmptyLiteralObject:
+ case IrOpcode::kJSCreateIterResultObject:
+ case IrOpcode::kJSCreateKeyValueArray:
+ case IrOpcode::kJSCreateLiteralArray:
+ case IrOpcode::kJSCreateLiteralObject:
+ case IrOpcode::kJSCreateLiteralRegExp:
+ case IrOpcode::kJSConstructForwardVarargs:
+ case IrOpcode::kJSConstruct:
+ case IrOpcode::kJSConstructWithArrayLike:
+ case IrOpcode::kJSConstructWithSpread:
+ case IrOpcode::kJSConvertReceiver:
+ case IrOpcode::kJSGetSuperConstructor:
+ case IrOpcode::kJSToObject:
+ return false;
+ case IrOpcode::kHeapConstant: {
+ Handle<HeapObject> value = HeapObjectMatcher(node).Value();
+ return value->IsPrimitive();
+ }
+ default:
+ return true;
+ }
+}
+
+bool CanBeNullOrUndefined(Node* node) {
+ if (CanBePrimitive(node)) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSToBoolean:
+ case IrOpcode::kJSToInteger:
+ case IrOpcode::kJSToLength:
+ case IrOpcode::kJSToName:
+ case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToString:
+ return false;
+ case IrOpcode::kHeapConstant: {
+ Handle<HeapObject> value = HeapObjectMatcher(node).Value();
+ Isolate* const isolate = value->GetIsolate();
+ return value->IsNullOrUndefined(isolate);
+ }
+ default:
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
Reduction JSCallReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSConstruct:
@@ -104,43 +161,30 @@ Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
return Changed(node);
}
-namespace {
-
-bool CanBeNullOrUndefined(Node* node) {
- switch (node->opcode()) {
- case IrOpcode::kJSCreate:
- case IrOpcode::kJSCreateArguments:
- case IrOpcode::kJSCreateArray:
- case IrOpcode::kJSCreateClosure:
- case IrOpcode::kJSCreateIterResultObject:
- case IrOpcode::kJSCreateKeyValueArray:
- case IrOpcode::kJSCreateLiteralArray:
- case IrOpcode::kJSCreateLiteralObject:
- case IrOpcode::kJSCreateLiteralRegExp:
- case IrOpcode::kJSConstruct:
- case IrOpcode::kJSConstructForwardVarargs:
- case IrOpcode::kJSConstructWithSpread:
- case IrOpcode::kJSConvertReceiver:
- case IrOpcode::kJSToBoolean:
- case IrOpcode::kJSToInteger:
- case IrOpcode::kJSToLength:
- case IrOpcode::kJSToName:
- case IrOpcode::kJSToNumber:
- case IrOpcode::kJSToObject:
- case IrOpcode::kJSToString:
- return false;
- case IrOpcode::kHeapConstant: {
- Handle<HeapObject> value = HeapObjectMatcher(node).Value();
- Isolate* const isolate = value->GetIsolate();
- return value->IsNull(isolate) || value->IsUndefined(isolate);
+// ES section #sec-object-constructor
+Reduction JSCallReducer::ReduceObjectConstructor(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.arity() < 3) return NoChange();
+ Node* value = (p.arity() >= 3) ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+
+ // We can fold away the Object(x) call if |x| is definitely not a primitive.
+ if (CanBePrimitive(value)) {
+ if (!CanBeNullOrUndefined(value)) {
+ // Turn the {node} into a {JSToObject} call if we know that
+ // the {value} cannot be null or undefined.
+ NodeProperties::ReplaceValueInputs(node, value);
+ NodeProperties::ChangeOp(node, javascript()->ToObject());
+ return Changed(node);
}
- default:
- return true;
+ } else {
+ ReplaceWithValue(node, value);
+ return Replace(node);
}
+ return NoChange();
}
-} // namespace
-
// ES6 section 19.2.3.1 Function.prototype.apply ( thisArg, argArray )
Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -411,55 +455,44 @@ Reduction JSCallReducer::ReduceObjectPrototypeHasOwnProperty(Node* node) {
// | +-+
// | |
// | JSToObject
- // | ^ ^
- // | | |
- // | | +--- JSForInPrepare
- // | | ^ ^
- // | | | |
- // | | +----+ +------+
- // | | | |
- // | | Projection[0] Projection[1]
- // | | (cache_type) (cache_array)
- // | | ^ ^
- // | | | |
- // | | +--------+ |
- // | | | |
- // | CheckMapValue |
- // | ^ |
- // | : +------------------+
- // | : |
- // | LoadElement
- // | ^
- // +----+ |
- // | |
+ // | ^
+ // | |
+ // | JSForInNext
+ // | ^
+ // +----+ |
+ // | |
// JSCall[hasOwnProperty]
- // We can constant-fold the {node} to True in this case, and insert a
- // (potentially redundant) CheckMapValue to guard the fact that the
- // {receiver} map didn't change since the initial CheckMapValue, which
- // was inserted by the BytecodeGraphBuilder for the ForInNext bytecode.
+ // We can constant-fold the {node} to True in this case, and insert
+ // a (potentially redundant) map check to guard the fact that the
+ // {receiver} map didn't change since the dominating JSForInNext. This
+ // map check is only necessary when TurboFan cannot prove that there
+ // is no observable side effect between the {JSForInNext} and the
+ // {JSCall} to Object.prototype.hasOwnProperty.
//
// Also note that it's safe to look through the {JSToObject}, since the
// Object.prototype.hasOwnProperty does an implicit ToObject anyway, and
// these operations are not observable.
- if (name->opcode() == IrOpcode::kLoadElement) {
- Node* cache_array = NodeProperties::GetValueInput(name, 0);
- Node* check_map = NodeProperties::GetEffectInput(name);
- if (cache_array->opcode() == IrOpcode::kProjection &&
- ProjectionIndexOf(cache_array->op()) == 1 &&
- check_map->opcode() == IrOpcode::kCheckMapValue) {
- Node* prepare = NodeProperties::GetValueInput(cache_array, 0);
- Node* object = NodeProperties::GetValueInput(check_map, 0);
- Node* cache_type = NodeProperties::GetValueInput(check_map, 1);
- if (cache_type->opcode() == IrOpcode::kProjection &&
- prepare->opcode() == IrOpcode::kJSForInPrepare &&
- ProjectionIndexOf(cache_type->op()) == 0 &&
- NodeProperties::GetValueInput(cache_type, 0) == prepare &&
- (object == receiver ||
- (object->opcode() == IrOpcode::kJSToObject &&
- receiver == NodeProperties::GetValueInput(object, 0)))) {
- effect = graph()->NewNode(check_map->op(), object, cache_type, effect,
- control);
+ if (name->opcode() == IrOpcode::kJSForInNext) {
+ ForInMode const mode = ForInModeOf(name->op());
+ if (mode != ForInMode::kGeneric) {
+ Node* object = NodeProperties::GetValueInput(name, 0);
+ Node* cache_type = NodeProperties::GetValueInput(name, 2);
+ if (object->opcode() == IrOpcode::kJSToObject) {
+ object = NodeProperties::GetValueInput(object, 0);
+ }
+ if (object == receiver) {
+ // No need to repeat the map check if we can prove that there's no
+ // observable side effect between {effect} and {name].
+ if (!NodeProperties::NoObservableSideEffectBetween(effect, name)) {
+ Node* receiver_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
+ receiver_map, cache_type);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ }
Node* value = jsgraph()->TrueConstant();
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -592,13 +625,26 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
if (result != NodeProperties::kReliableReceiverMaps) {
return NoChange();
}
- if (receiver_maps.size() != 1) return NoChange();
- Handle<Map> receiver_map(receiver_maps[0]);
- ElementsKind kind = receiver_map->elements_kind();
- // TODO(danno): Handle double packed elements
- if (!IsFastElementsKind(kind) || IsDoubleElementsKind(kind) ||
- !CanInlineArrayIteratingBuiltin(receiver_map)) {
- return NoChange();
+ if (receiver_maps.size() == 0) return NoChange();
+
+ ElementsKind kind = IsDoubleElementsKind(receiver_maps[0]->elements_kind())
+ ? PACKED_DOUBLE_ELEMENTS
+ : PACKED_ELEMENTS;
+ for (Handle<Map> receiver_map : receiver_maps) {
+ ElementsKind next_kind = receiver_map->elements_kind();
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
+ return NoChange();
+ }
+ if (!IsFastElementsKind(next_kind) ||
+ (IsDoubleElementsKind(next_kind) && IsHoleyElementsKind(next_kind))) {
+ return NoChange();
+ }
+ if (IsDoubleElementsKind(kind) != IsDoubleElementsKind(next_kind)) {
+ return NoChange();
+ }
+ if (IsHoleyElementsKind(next_kind)) {
+ kind = HOLEY_ELEMENTS;
+ }
}
// Install code dependencies on the {receiver} prototype maps and the
@@ -607,7 +653,7 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
Node* k = jsgraph()->ZeroConstant();
- Node* original_length = graph()->NewNode(
+ Node* original_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
receiver, effect, control);
@@ -626,7 +672,8 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
Node* check_throw = check_fail = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowCalledNonCallable), fncallback,
+ javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
+ jsgraph()->Constant(MessageTemplate::kCalledNonCallable), fncallback,
context, check_frame_state, effect, check_fail);
control = graph()->NewNode(common()->IfTrue(), check_branch);
@@ -659,18 +706,13 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
// Make sure the map hasn't changed during the iteration
- Node* orig_map = jsgraph()->HeapConstant(receiver_map);
- Node* array_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- receiver, effect, control);
- Node* check_map =
- graph()->NewNode(simplified()->ReferenceEqual(), array_map, orig_map);
- effect =
- graph()->NewNode(simplified()->CheckIf(), check_map, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
+ effect, control);
// Make sure that the access is still in bounds, since the callback could have
// changed the array's size.
- Node* length = graph()->NewNode(
+ Node* length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
receiver, effect, control);
k = effect =
@@ -679,12 +721,12 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
// Reload the elements pointer before calling the callback, since the previous
// callback might have resized the array causing the elements buffer to be
// re-allocated.
- Node* elements = graph()->NewNode(
+ Node* elements = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
effect, control);
- Node* element = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
+ Node* element = effect = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
elements, k, effect, control);
Node* next_k =
@@ -705,6 +747,12 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
hole_true = graph()->NewNode(common()->IfTrue(), branch);
hole_false = graph()->NewNode(common()->IfFalse(), branch);
control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = graph()->NewNode(common()->TypeGuard(Type::NonInternal()),
+ element, control);
}
frame_state = CreateJavaScriptBuiltinContinuationFrameState(
@@ -758,9 +806,9 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
control = if_false;
effect = eloop;
- // The above %ThrowCalledNonCallable runtime call is an unconditional
- // throw, making it impossible to return a successful completion in this
- // case. We simply connect the successful completion to the graph end.
+ // The above %ThrowTypeError runtime call is an unconditional throw, making
+ // it impossible to return a successful completion in this case. We simply
+ // connect the successful completion to the graph end.
Node* terminate =
graph()->NewNode(common()->Throw(), check_throw, check_fail);
NodeProperties::MergeControlToEnd(graph(), common(), terminate);
@@ -793,48 +841,49 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
if (result != NodeProperties::kReliableReceiverMaps) {
return NoChange();
}
- if (receiver_maps.size() != 1) return NoChange();
- Handle<Map> receiver_map(receiver_maps[0]);
- ElementsKind kind = receiver_map->elements_kind();
- // TODO(danno): Handle holey Smi and Object fast elements kinds and double
- // packed.
- if (!IsFastPackedElementsKind(kind) || IsDoubleElementsKind(kind)) {
+
+ // Ensure that any changes to the Array species constructor cause deopt.
+ if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+
+ if (receiver_maps.size() == 0) return NoChange();
+
+ const ElementsKind kind = receiver_maps[0]->elements_kind();
+
+ // TODO(danno): Handle holey elements kinds.
+ if (!IsFastPackedElementsKind(kind)) {
return NoChange();
}
- // We want the input to be a generic Array.
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
+ return NoChange();
+ }
+ // We can handle different maps, as long as their elements kind are the
+ // same.
+ if (receiver_map->elements_kind() != kind) {
+ return NoChange();
+ }
+ }
+
+ dependencies()->AssumePropertyCell(factory()->species_protector());
+
Handle<JSFunction> handle_constructor(
JSFunction::cast(
native_context()->GetInitialJSArrayMap(kind)->GetConstructor()),
isolate());
Node* array_constructor = jsgraph()->HeapConstant(handle_constructor);
- if (receiver_map->prototype() !=
- native_context()->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX)) {
- return NoChange();
- }
-
- // And ensure that any changes to the Array species constructor cause deopt.
- if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
- dependencies()->AssumePropertyCell(factory()->species_protector());
Node* k = jsgraph()->ZeroConstant();
- Node* orig_map = jsgraph()->HeapConstant(receiver_map);
// Make sure the map hasn't changed before we construct the output array.
- {
- Node* array_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- receiver, effect, control);
- Node* check_map =
- graph()->NewNode(simplified()->ReferenceEqual(), array_map, orig_map);
- effect =
- graph()->NewNode(simplified()->CheckIf(), check_map, effect, control);
- }
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
+ effect, control);
- Node* original_length = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
- receiver, effect, control);
+ Node* original_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
// This array should be HOLEY_SMI_ELEMENTS because of the non-zero length.
// Even though {JSCreateArray} is not marked as {kNoThrow}, we can elide the
@@ -859,7 +908,8 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
Node* check_throw = check_fail = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowCalledNonCallable), fncallback,
+ javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
+ jsgraph()->Constant(MessageTemplate::kCalledNonCallable), fncallback,
context, check_frame_state, effect, check_fail);
control = graph()->NewNode(common()->IfTrue(), check_branch);
@@ -892,31 +942,27 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
// Make sure the map hasn't changed during the iteration
- Node* array_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- receiver, effect, control);
- Node* check_map =
- graph()->NewNode(simplified()->ReferenceEqual(), array_map, orig_map);
- effect =
- graph()->NewNode(simplified()->CheckIf(), check_map, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
+ effect, control);
// Make sure that the access is still in bounds, since the callback could have
// changed the array's size.
- Node* length = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
- receiver, effect, control);
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
k = effect =
graph()->NewNode(simplified()->CheckBounds(), k, length, effect, control);
// Reload the elements pointer before calling the callback, since the previous
// callback might have resized the array causing the elements buffer to be
// re-allocated.
- Node* elements = graph()->NewNode(
+ Node* elements = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
effect, control);
- Node* element = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
+ Node* element = effect = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
elements, k, effect, control);
Node* next_k =
@@ -972,9 +1018,9 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
control = if_false;
effect = eloop;
- // The above %ThrowCalledNonCallable runtime call is an unconditional
- // throw, making it impossible to return a successful completion in this
- // case. We simply connect the successful completion to the graph end.
+ // The above %ThrowTypeError runtime call is an unconditional throw, making
+ // it impossible to return a successful completion in this case. We simply
+ // connect the successful completion to the graph end.
Node* terminate =
graph()->NewNode(common()->Throw(), check_throw, check_fail);
NodeProperties::MergeControlToEnd(graph(), common(), terminate);
@@ -1187,12 +1233,9 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
// TODO(turbofan): Further relax this constraint.
if (formal_parameter_count != 0) {
Node* effect = NodeProperties::GetEffectInput(node);
- while (effect != arguments_list) {
- if (effect->op()->EffectInputCount() != 1 ||
- !(effect->op()->properties() & Operator::kNoWrite)) {
- return NoChange();
- }
- effect = NodeProperties::GetEffectInput(effect);
+ if (!NodeProperties::NoObservableSideEffectBetween(effect,
+ arguments_list)) {
+ return NoChange();
}
}
} else if (type == CreateArgumentsType::kRestParameter) {
@@ -1255,6 +1298,56 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
} else {
NodeProperties::ChangeOp(
node, javascript()->Construct(arity + 2, frequency, feedback));
+ Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Check whether the given new target value is a constructor function. The
+ // replacement {JSConstruct} operator only checks the passed target value
+ // but relies on the new target value to be implicitly valid.
+ Node* check =
+ graph()->NewNode(simplified()->ObjectIsConstructor(), new_target);
+ Node* check_branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* check_fail = graph()->NewNode(common()->IfFalse(), check_branch);
+ Node* check_throw = check_fail =
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
+ jsgraph()->Constant(MessageTemplate::kNotConstructor),
+ new_target, context, frame_state, effect, check_fail);
+ control = graph()->NewNode(common()->IfTrue(), check_branch);
+ NodeProperties::ReplaceControlInput(node, control);
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ // Create appropriate {IfException} and {IfSuccess} nodes.
+ Node* if_exception =
+ graph()->NewNode(common()->IfException(), check_throw, check_fail);
+ check_fail = graph()->NewNode(common()->IfSuccess(), check_fail);
+
+ // Join the exception edges.
+ Node* merge =
+ graph()->NewNode(common()->Merge(2), if_exception, on_exception);
+ Node* ephi = graph()->NewNode(common()->EffectPhi(2), if_exception,
+ on_exception, merge);
+ Node* phi =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ if_exception, on_exception, merge);
+ ReplaceWithValue(on_exception, phi, ephi, merge);
+ merge->ReplaceInput(1, on_exception);
+ ephi->ReplaceInput(1, on_exception);
+ phi->ReplaceInput(1, on_exception);
+ }
+
+ // The above %ThrowTypeError runtime call is an unconditional throw, making
+ // it impossible to return a successful completion in this case. We simply
+ // connect the successful completion to the graph end.
+ Node* terminate =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+
Reduction const reduction = ReduceJSConstruct(node);
return reduction.Changed() ? reduction : Changed(node);
}
@@ -1326,6 +1419,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceFunctionPrototypeHasInstance(node);
case Builtins::kNumberConstructor:
return ReduceNumberConstructor(node);
+ case Builtins::kObjectConstructor:
+ return ReduceObjectConstructor(node);
case Builtins::kObjectGetPrototypeOf:
return ReduceObjectGetPrototypeOf(node);
case Builtins::kObjectPrototypeGetProto:
@@ -1562,6 +1657,27 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
NodeProperties::ChangeOp(node, javascript()->CreateArray(arity, site));
return Changed(node);
}
+
+ // Check for the ObjectConstructor.
+ if (*function == function->native_context()->object_function()) {
+ // If no value is passed, we can immediately lower to a simple
+ // JSCreate and don't need to do any massaging of the {node}.
+ if (arity == 0) {
+ NodeProperties::ChangeOp(node, javascript()->Create());
+ return Changed(node);
+ }
+
+ // Otherwise we can only lower to JSCreate if we know that
+ // the value parameter is ignored, which is only the case if
+ // the {new_target} and {target} are definitely not identical.
+ HeapObjectMatcher mnew_target(new_target);
+ if (mnew_target.HasValue() && *mnew_target.Value() != *function) {
+ // Drop the value inputs.
+ for (int i = arity; i > 0; --i) node->RemoveInput(i);
+ NodeProperties::ChangeOp(node, javascript()->Create());
+ return Changed(node);
+ }
+ }
}
// TODO(bmeurer): Also support optimizing bound functions and proxies here.
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index d5c9907c83..3fce912fde 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -60,6 +60,7 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceFunctionPrototypeApply(Node* node);
Reduction ReduceFunctionPrototypeCall(Node* node);
Reduction ReduceFunctionPrototypeHasInstance(Node* node);
+ Reduction ReduceObjectConstructor(Node* node);
Reduction ReduceObjectGetPrototype(Node* node, Node* object);
Reduction ReduceObjectGetPrototypeOf(Node* node);
Reduction ReduceObjectPrototypeGetProto(Node* node);
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index e682490386..95d32bc3fd 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -10,8 +10,7 @@
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
-#include "src/contexts.h"
-#include "src/objects-inl.h"
+#include "src/contexts-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 76b9a79aa0..bd4f1069ab 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -214,6 +214,8 @@ Reduction JSCreateLowering::Reduce(Node* node) {
return ReduceJSCreateArguments(node);
case IrOpcode::kJSCreateArray:
return ReduceJSCreateArray(node);
+ case IrOpcode::kJSCreateClosure:
+ return ReduceJSCreateClosure(node);
case IrOpcode::kJSCreateIterResultObject:
return ReduceJSCreateIterResultObject(node);
case IrOpcode::kJSCreateKeyValueArray:
@@ -253,13 +255,14 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
Node* const control = NodeProperties::GetControlInput(node);
// Extract constructor and original constructor function.
if (target_type->IsHeapConstant() && new_target_type->IsHeapConstant() &&
+ target_type->AsHeapConstant()->Value()->IsJSFunction() &&
new_target_type->AsHeapConstant()->Value()->IsJSFunction()) {
Handle<JSFunction> constructor =
Handle<JSFunction>::cast(target_type->AsHeapConstant()->Value());
+ if (!constructor->IsConstructor()) return NoChange();
Handle<JSFunction> original_constructor =
Handle<JSFunction>::cast(new_target_type->AsHeapConstant()->Value());
- DCHECK(constructor->IsConstructor());
- DCHECK(original_constructor->IsConstructor());
+ if (!original_constructor->IsConstructor()) return NoChange();
// Check if we can inline the allocation.
if (IsAllocationInlineable(constructor, original_constructor)) {
@@ -311,47 +314,38 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
case CreateArgumentsType::kMappedArguments: {
// TODO(mstarzinger): Duplicate parameters are not handled yet.
if (shared->has_duplicate_parameters()) return NoChange();
- // If there is no aliasing, the arguments object elements are not
- // special in any way, we can just return an unmapped backing store.
- if (shared->internal_formal_parameter_count() == 0) {
- Node* const callee = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* const arguments_frame =
- graph()->NewNode(simplified()->ArgumentsFrame());
- Node* const arguments_length = graph()->NewNode(
- simplified()->ArgumentsLength(0, false), arguments_frame);
- // Allocate the elements backing store.
- Node* const elements = effect =
- graph()->NewNode(simplified()->NewUnmappedArgumentsElements(),
- arguments_frame, arguments_length, effect);
- // Load the arguments object map.
- Node* const arguments_map = jsgraph()->HeapConstant(
- handle(native_context()->sloppy_arguments_map(), isolate()));
- // Actually allocate and initialize the arguments object.
- AllocationBuilder a(jsgraph(), effect, control);
- Node* properties = jsgraph()->EmptyFixedArrayConstant();
- STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kPointerSize);
- a.Allocate(JSSloppyArgumentsObject::kSize);
- a.Store(AccessBuilder::ForMap(), arguments_map);
- a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
- a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForArgumentsLength(), arguments_length);
- a.Store(AccessBuilder::ForArgumentsCallee(), callee);
- RelaxControls(node);
- a.FinishAndChange(node);
- } else {
- Callable callable = Builtins::CallableFor(
- isolate(), Builtins::kFastNewSloppyArguments);
- Operator::Properties properties = node->op()->properties();
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, properties);
- const Operator* new_op = common()->Call(desc);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- node->InsertInput(graph()->zone(), 0, stub_code);
- node->RemoveInput(3); // Remove the frame state.
- NodeProperties::ChangeOp(node, new_op);
- }
+ Node* const callee = NodeProperties::GetValueInput(node, 0);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* const arguments_frame =
+ graph()->NewNode(simplified()->ArgumentsFrame());
+ Node* const arguments_length = graph()->NewNode(
+ simplified()->ArgumentsLength(
+ shared->internal_formal_parameter_count(), false),
+ arguments_frame);
+ // Allocate the elements backing store.
+ bool has_aliased_arguments = false;
+ Node* const elements = effect = AllocateAliasedArguments(
+ effect, control, context, arguments_frame, arguments_length, shared,
+ &has_aliased_arguments);
+ // Load the arguments object map.
+ Node* const arguments_map = jsgraph()->HeapConstant(
+ handle(has_aliased_arguments
+ ? native_context()->fast_aliased_arguments_map()
+ : native_context()->sloppy_arguments_map(),
+ isolate()));
+ // Actually allocate and initialize the arguments object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kPointerSize);
+ a.Allocate(JSSloppyArgumentsObject::kSize);
+ a.Store(AccessBuilder::ForMap(), arguments_map);
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForArgumentsLength(), arguments_length);
+ a.Store(AccessBuilder::ForArgumentsCallee(), callee);
+ RelaxControls(node);
+ a.FinishAndChange(node);
return Changed(node);
}
case CreateArgumentsType::kUnmappedArguments: {
@@ -364,7 +358,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
arguments_frame);
// Allocate the elements backing store.
Node* const elements = effect =
- graph()->NewNode(simplified()->NewUnmappedArgumentsElements(),
+ graph()->NewNode(simplified()->NewArgumentsElements(0),
arguments_frame, arguments_length, effect);
// Load the arguments object map.
Node* const arguments_map = jsgraph()->HeapConstant(
@@ -386,15 +380,15 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* const arguments_frame =
graph()->NewNode(simplified()->ArgumentsFrame());
- int formal_parameter_count = shared->internal_formal_parameter_count();
Node* const rest_length = graph()->NewNode(
- simplified()->ArgumentsLength(formal_parameter_count, true),
+ simplified()->ArgumentsLength(
+ shared->internal_formal_parameter_count(), true),
arguments_frame);
- // Allocate the elements backing store. Since
- // NewUnmappedArgumentsElements copies from the end of the arguments
- // adapter frame, this is a suffix of the actual arguments.
+ // Allocate the elements backing store. Since NewArgumentsElements
+ // copies from the end of the arguments adapter frame, this is a suffix
+ // of the actual arguments.
Node* const elements = effect =
- graph()->NewNode(simplified()->NewUnmappedArgumentsElements(),
+ graph()->NewNode(simplified()->NewArgumentsElements(0),
arguments_frame, rest_length, effect);
// Load the JSArray object map.
Node* const jsarray_map = jsgraph()->HeapConstant(handle(
@@ -586,29 +580,73 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
return NoChange();
}
+// Constructs an array with a variable {length} when no upper bound
+// is known for the capacity.
+Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
+ Handle<Map> initial_map,
+ PretenureFlag pretenure) {
+ DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Constructing an Array via new Array(N) where N is an unsigned
+ // integer, always creates a holey backing store.
+ if (!IsHoleyElementsKind(initial_map->elements_kind())) {
+ initial_map = Map::AsElementsKind(
+ initial_map, GetHoleyElementsKind(initial_map->elements_kind()));
+ }
+
+ // Check that the {limit} is an unsigned integer in the valid range.
+ // This has to be kept in sync with src/runtime/runtime-array.cc,
+ // where this limit is protected.
+ length = effect = graph()->NewNode(
+ simplified()->CheckBounds(), length,
+ jsgraph()->Constant(JSArray::kInitialMaxFastElementArray), effect,
+ control);
+
+ // Construct elements and properties for the resulting JSArray.
+ Node* elements = effect =
+ graph()->NewNode(IsDoubleElementsKind(initial_map->elements_kind())
+ ? simplified()->NewDoubleElements(pretenure)
+ : simplified()->NewSmiOrObjectElements(pretenure),
+ length, effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+
+ // Perform the allocation of the actual JSArray object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(initial_map->instance_size(), pretenure);
+ a.Store(AccessBuilder::ForMap(), initial_map);
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForJSArrayLength(initial_map->elements_kind()),
+ length);
+ for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
+ jsgraph()->UndefinedConstant());
+ }
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
+// Constructs an array with a variable {length} when an actual
+// upper bound is known for the {capacity}.
Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
int capacity,
- Handle<AllocationSite> site) {
+ Handle<Map> initial_map,
+ PretenureFlag pretenure) {
DCHECK(node->opcode() == IrOpcode::kJSCreateArray ||
node->opcode() == IrOpcode::kJSCreateEmptyLiteralArray);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Extract transition and tenuring feedback from the {site} and add
- // appropriate code dependencies on the {site} if deoptimization is
- // enabled.
- PretenureFlag pretenure = site->GetPretenureMode();
- ElementsKind elements_kind = site->GetElementsKind();
- DCHECK(IsFastElementsKind(elements_kind));
- if (NodeProperties::GetType(length)->Max() > 0) {
+ // Determine the appropriate elements kind.
+ ElementsKind elements_kind = initial_map->elements_kind();
+ if (NodeProperties::GetType(length)->Max() > 0.0) {
elements_kind = GetHoleyElementsKind(elements_kind);
+ initial_map = Map::AsElementsKind(initial_map, elements_kind);
}
- dependencies()->AssumeTenuringDecision(site);
- dependencies()->AssumeTransitionStable(site);
-
- // Retrieve the initial map for the array.
- Node* js_array_map = jsgraph()->HeapConstant(
- handle(native_context()->GetInitialJSArrayMap(elements_kind), isolate()));
+ DCHECK(IsFastElementsKind(elements_kind));
// Setup elements and properties.
Node* elements;
@@ -622,11 +660,15 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
// Perform the allocation of the actual JSArray object.
AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(JSArray::kSize, pretenure);
- a.Store(AccessBuilder::ForMap(), js_array_map);
+ a.Allocate(initial_map->instance_size(), pretenure);
+ a.Store(AccessBuilder::ForMap(), initial_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
+ for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
+ jsgraph()->UndefinedConstant());
+ }
RelaxControls(node);
a.FinishAndChange(node);
return Changed(node);
@@ -634,19 +676,15 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
Reduction JSCreateLowering::ReduceNewArray(Node* node,
std::vector<Node*> values,
- Handle<AllocationSite> site) {
+ Handle<Map> initial_map,
+ PretenureFlag pretenure) {
DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Extract transition and tenuring feedback from the {site} and add
- // appropriate code dependencies on the {site} if deoptimization is
- // enabled.
- PretenureFlag pretenure = site->GetPretenureMode();
- ElementsKind elements_kind = site->GetElementsKind();
+ // Determine the appropriate elements kind.
+ ElementsKind elements_kind = initial_map->elements_kind();
DCHECK(IsFastElementsKind(elements_kind));
- dependencies()->AssumeTenuringDecision(site);
- dependencies()->AssumeTransitionStable(site);
// Check {values} based on the {elements_kind}. These checks are guarded
// by the {elements_kind} feedback on the {site}, so it's safe to just
@@ -669,10 +707,6 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node,
}
}
- // Retrieve the initial map for the array.
- Node* js_array_map = jsgraph()->HeapConstant(
- handle(native_context()->GetInitialJSArrayMap(elements_kind), isolate()));
-
// Setup elements, properties and length.
Node* elements = effect =
AllocateElements(effect, control, elements_kind, values, pretenure);
@@ -681,11 +715,15 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node,
// Perform the allocation of the actual JSArray object.
AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(JSArray::kSize, pretenure);
- a.Store(AccessBuilder::ForMap(), js_array_map);
+ a.Allocate(initial_map->instance_size(), pretenure);
+ a.Store(AccessBuilder::ForMap(), initial_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
+ for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
+ jsgraph()->UndefinedConstant());
+ }
RelaxControls(node);
a.FinishAndChange(node);
return Changed(node);
@@ -756,52 +794,186 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
+ int const arity = static_cast<int>(p.arity());
+ Handle<AllocationSite> const site = p.site();
+ PretenureFlag pretenure = NOT_TENURED;
+ Handle<JSFunction> constructor(native_context()->array_function(), isolate());
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, 1);
+ Type* new_target_type = (target == new_target)
+ ? Type::HeapConstant(constructor, zone())
+ : NodeProperties::GetType(new_target);
- // TODO(bmeurer): Optimize the subclassing case.
- if (target != new_target) return NoChange();
+ // Extract original constructor function.
+ if (new_target_type->IsHeapConstant() &&
+ new_target_type->AsHeapConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> original_constructor =
+ Handle<JSFunction>::cast(new_target_type->AsHeapConstant()->Value());
+ DCHECK(constructor->IsConstructor());
+ DCHECK(original_constructor->IsConstructor());
- // Check if we have a feedback {site} on the {node}.
- Handle<AllocationSite> site = p.site();
- if (!site.is_null()) {
- // Attempt to inline calls to the Array constructor for the relevant cases
- // where either no arguments are provided, or exactly one unsigned number
- // argument is given.
- if (site->CanInlineCall()) {
- if (p.arity() == 0) {
+ // Check if we can inline the allocation.
+ if (IsAllocationInlineable(constructor, original_constructor)) {
+ // Force completion of inobject slack tracking before
+ // generating code to finalize the instance size.
+ original_constructor->CompleteInobjectSlackTrackingIfActive();
+ Handle<Map> initial_map(original_constructor->initial_map(), isolate());
+
+ // Add a dependency on the {initial_map} to make sure that this code is
+ // deoptimized whenever the {initial_map} changes.
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+
+ // Tells whether we are protected by either the {site} or a
+ // protector cell to do certain speculative optimizations.
+ bool can_inline_call = false;
+
+ // Check if we have a feedback {site} on the {node}.
+ if (!site.is_null()) {
+ ElementsKind elements_kind = site->GetElementsKind();
+ if (initial_map->elements_kind() != elements_kind) {
+ initial_map = Map::AsElementsKind(initial_map, elements_kind);
+ }
+ can_inline_call = site->CanInlineCall();
+ pretenure = site->GetPretenureMode();
+
+ dependencies()->AssumeTransitionStable(site);
+ dependencies()->AssumeTenuringDecision(site);
+ } else {
+ can_inline_call = isolate()->IsArrayConstructorIntact();
+ }
+
+ if (arity == 0) {
Node* length = jsgraph()->ZeroConstant();
int capacity = JSArray::kPreallocatedArrayElements;
- return ReduceNewArray(node, length, capacity, site);
- } else if (p.arity() == 1) {
+ return ReduceNewArray(node, length, capacity, initial_map, pretenure);
+ } else if (arity == 1) {
Node* length = NodeProperties::GetValueInput(node, 2);
Type* length_type = NodeProperties::GetType(length);
if (!length_type->Maybe(Type::Number())) {
// Handle the single argument case, where we know that the value
// cannot be a valid Array length.
- return ReduceNewArray(node, {length}, site);
+ ElementsKind elements_kind = initial_map->elements_kind();
+ elements_kind = GetMoreGeneralElementsKind(
+ elements_kind, IsHoleyElementsKind(elements_kind)
+ ? HOLEY_ELEMENTS
+ : PACKED_ELEMENTS);
+ initial_map = Map::AsElementsKind(initial_map, elements_kind);
+ return ReduceNewArray(node, std::vector<Node*>{length}, initial_map,
+ pretenure);
}
if (length_type->Is(Type::SignedSmall()) && length_type->Min() >= 0 &&
length_type->Max() <= kElementLoopUnrollLimit &&
length_type->Min() == length_type->Max()) {
int capacity = static_cast<int>(length_type->Max());
- return ReduceNewArray(node, length, capacity, site);
+ return ReduceNewArray(node, length, capacity, initial_map, pretenure);
}
- } else if (p.arity() <= JSArray::kInitialMaxFastElementArray) {
+ if (length_type->Maybe(Type::UnsignedSmall()) && can_inline_call) {
+ return ReduceNewArray(node, length, initial_map, pretenure);
+ }
+ } else if (arity <= JSArray::kInitialMaxFastElementArray) {
+ // Gather the values to store into the newly created array.
+ bool values_all_smis = true, values_all_numbers = true,
+ values_any_nonnumber = false;
std::vector<Node*> values;
values.reserve(p.arity());
- for (size_t i = 0; i < p.arity(); ++i) {
- values.push_back(
- NodeProperties::GetValueInput(node, static_cast<int>(2 + i)));
+ for (int i = 0; i < arity; ++i) {
+ Node* value = NodeProperties::GetValueInput(node, 2 + i);
+ Type* value_type = NodeProperties::GetType(value);
+ if (!value_type->Is(Type::SignedSmall())) {
+ values_all_smis = false;
+ }
+ if (!value_type->Is(Type::Number())) {
+ values_all_numbers = false;
+ }
+ if (!value_type->Maybe(Type::Number())) {
+ values_any_nonnumber = true;
+ }
+ values.push_back(value);
}
- return ReduceNewArray(node, values, site);
+
+ // Try to figure out the ideal elements kind statically.
+ ElementsKind elements_kind = initial_map->elements_kind();
+ if (values_all_smis) {
+ // Smis can be stored with any elements kind.
+ } else if (values_all_numbers) {
+ elements_kind = GetMoreGeneralElementsKind(
+ elements_kind, IsHoleyElementsKind(elements_kind)
+ ? HOLEY_DOUBLE_ELEMENTS
+ : PACKED_DOUBLE_ELEMENTS);
+ } else if (values_any_nonnumber) {
+ elements_kind = GetMoreGeneralElementsKind(
+ elements_kind, IsHoleyElementsKind(elements_kind)
+ ? HOLEY_ELEMENTS
+ : PACKED_ELEMENTS);
+ } else if (!can_inline_call) {
+ // We have some crazy combination of types for the {values} where
+ // there's no clear decision on the elements kind statically. And
+ // we don't have a protection against deoptimization loops for the
+ // checks that are introduced in the call to ReduceNewArray, so
+ // we cannot inline this invocation of the Array constructor here.
+ return NoChange();
+ }
+ initial_map = Map::AsElementsKind(initial_map, elements_kind);
+
+ return ReduceNewArray(node, values, initial_map, pretenure);
}
}
}
+ // TODO(bmeurer): Optimize the subclassing case.
+ if (target != new_target) return NoChange();
+
return ReduceNewArrayToStubCall(node, site);
}
+Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
+ CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
+ Handle<SharedFunctionInfo> shared = p.shared_info();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+
+ // Use inline allocation of closures only for instantiation sites that have
+ // seen more than one instantiation, this simplifies the generated code and
+ // also serves as a heuristic of which allocation sites benefit from it.
+ FeedbackSlot slot(FeedbackVector::ToSlot(p.feedback().index()));
+ Handle<Cell> vector_cell(Cell::cast(p.feedback().vector()->Get(slot)));
+ if (vector_cell->map() == isolate()->heap()->many_closures_cell_map()) {
+ Handle<Map> function_map(
+ Map::cast(native_context()->get(shared->function_map_index())));
+ Node* lazy_compile_builtin = jsgraph()->HeapConstant(
+ handle(isolate()->builtins()->builtin(Builtins::kCompileLazy)));
+ DCHECK(!function_map->IsInobjectSlackTrackingInProgress());
+ DCHECK(!function_map->is_dictionary_map());
+
+ // Emit code to allocate the JSFunction instance.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(function_map->instance_size());
+ a.Store(AccessBuilder::ForMap(), function_map);
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(),
+ jsgraph()->TheHoleConstant());
+ a.Store(AccessBuilder::ForJSFunctionSharedFunctionInfo(), shared);
+ a.Store(AccessBuilder::ForJSFunctionContext(), context);
+ a.Store(AccessBuilder::ForJSFunctionFeedbackVector(), vector_cell);
+ a.Store(AccessBuilder::ForJSFunctionCode(), lazy_compile_builtin);
+ STATIC_ASSERT(JSFunction::kSize == 8 * kPointerSize);
+ for (int i = 0; i < function_map->GetInObjectProperties(); i++) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(function_map, i),
+ jsgraph()->UndefinedConstant());
+ }
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
Node* value = NodeProperties::GetValueInput(node, 0);
@@ -863,41 +1035,41 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- Handle<FeedbackVector> feedback_vector;
- if (GetSpecializationFeedbackVector(node).ToHandle(&feedback_vector)) {
- FeedbackSlot slot(FeedbackVector::ToSlot(p.index()));
- Handle<Object> literal(feedback_vector->Get(slot), isolate());
- if (literal->IsAllocationSite()) {
- Handle<AllocationSite> site = Handle<AllocationSite>::cast(literal);
- Handle<JSObject> boilerplate(site->boilerplate(), isolate());
- int max_properties = kMaxFastLiteralProperties;
- if (IsFastLiteral(boilerplate, kMaxFastLiteralDepth, &max_properties)) {
- AllocationSiteUsageContext site_context(isolate(), site, false);
- site_context.EnterNewScope();
- Node* value = effect =
- AllocateFastLiteral(effect, control, boilerplate, &site_context);
- site_context.ExitScope(site, boilerplate);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
+ Handle<Object> feedback(p.feedback().vector()->Get(p.feedback().slot()),
+ isolate());
+ if (feedback->IsAllocationSite()) {
+ Handle<AllocationSite> site = Handle<AllocationSite>::cast(feedback);
+ Handle<JSObject> boilerplate(site->boilerplate(), isolate());
+ int max_properties = kMaxFastLiteralProperties;
+ if (IsFastLiteral(boilerplate, kMaxFastLiteralDepth, &max_properties)) {
+ AllocationSiteUsageContext site_context(isolate(), site, false);
+ site_context.EnterNewScope();
+ Node* value = effect =
+ AllocateFastLiteral(effect, control, boilerplate, &site_context);
+ site_context.ExitScope(site, boilerplate);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
}
return NoChange();
}
Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralArray(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kJSCreateEmptyLiteralArray);
- int literal_index = OpParameter<int>(node);
- Handle<FeedbackVector> feedback_vector;
- if (GetSpecializationFeedbackVector(node).ToHandle(&feedback_vector)) {
- FeedbackSlot slot(FeedbackVector::ToSlot(literal_index));
- Handle<Object> raw_site(feedback_vector->Get(slot), isolate());
- if (raw_site->IsAllocationSite()) {
- Handle<AllocationSite> site = Handle<AllocationSite>::cast(raw_site);
- DCHECK(!site->PointsToLiteral());
- Node* length = jsgraph()->ZeroConstant();
- return ReduceNewArray(node, length, 0, site);
- }
+ DCHECK_EQ(IrOpcode::kJSCreateEmptyLiteralArray, node->opcode());
+ FeedbackParameter const& p = FeedbackParameterOf(node->op());
+ Handle<Object> feedback(p.feedback().vector()->Get(p.feedback().slot()),
+ isolate());
+ if (feedback->IsAllocationSite()) {
+ Handle<AllocationSite> site = Handle<AllocationSite>::cast(feedback);
+ DCHECK(!site->PointsToLiteral());
+ Handle<Map> const initial_map(
+ native_context()->GetInitialJSArrayMap(site->GetElementsKind()),
+ isolate());
+ PretenureFlag const pretenure = site->GetPretenureMode();
+ dependencies()->AssumeTransitionStable(site);
+ dependencies()->AssumeTenuringDecision(site);
+ Node* length = jsgraph()->ZeroConstant();
+ return ReduceNewArray(node, length, 0, initial_map, pretenure);
}
return NoChange();
}
@@ -934,21 +1106,18 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralObject(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateLiteralRegExp(Node* node) {
- DCHECK(node->opcode() == IrOpcode::kJSCreateLiteralRegExp);
+ DCHECK_EQ(IrOpcode::kJSCreateLiteralRegExp, node->opcode());
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- Handle<FeedbackVector> feedback_vector;
- if (GetSpecializationFeedbackVector(node).ToHandle(&feedback_vector)) {
- FeedbackSlot slot(FeedbackVector::ToSlot(p.index()));
- Handle<Object> maybe_boilerplate(feedback_vector->Get(slot), isolate());
- if (maybe_boilerplate->IsJSRegExp()) {
- Node* value = effect = AllocateLiteralRegExp(
- effect, control, Handle<JSRegExp>::cast(maybe_boilerplate));
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
+ Handle<Object> feedback(p.feedback().vector()->Get(p.feedback().slot()),
+ isolate());
+ if (feedback->IsJSRegExp()) {
+ Handle<JSRegExp> boilerplate = Handle<JSRegExp>::cast(feedback);
+ Node* value = effect = AllocateLiteralRegExp(effect, control, boilerplate);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
return NoChange();
}
@@ -1201,6 +1370,53 @@ Node* JSCreateLowering::AllocateAliasedArguments(
return a.Finish();
}
+// Helper that allocates a FixedArray serving as a parameter map for values
+// unknown at compile-time, the true {arguments_length} and {arguments_frame}
+// values can only be determined dynamically at run-time and are provided.
+// Serves as backing store for JSCreateArguments nodes.
+Node* JSCreateLowering::AllocateAliasedArguments(
+ Node* effect, Node* control, Node* context, Node* arguments_frame,
+ Node* arguments_length, Handle<SharedFunctionInfo> shared,
+ bool* has_aliased_arguments) {
+ // If there is no aliasing, the arguments object elements are not
+ // special in any way, we can just return an unmapped backing store.
+ int parameter_count = shared->internal_formal_parameter_count();
+ if (parameter_count == 0) {
+ return graph()->NewNode(simplified()->NewArgumentsElements(0),
+ arguments_frame, arguments_length, effect);
+ }
+
+ // From here on we are going to allocate a mapped (aka. aliased) elements
+ // backing store. We do not statically know how many arguments exist, but
+ // dynamically selecting the hole for some of the "mapped" elements allows
+ // using a static shape for the parameter map.
+ int mapped_count = parameter_count;
+ *has_aliased_arguments = true;
+
+ // The unmapped argument values are stored yet another indirection away and
+ // then linked into the parameter map below, whereas mapped argument values
+ // (i.e. the first {mapped_count} elements) are replaced with a hole instead.
+ Node* arguments =
+ graph()->NewNode(simplified()->NewArgumentsElements(mapped_count),
+ arguments_frame, arguments_length, effect);
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), arguments, control);
+ a.AllocateArray(mapped_count + 2, factory()->sloppy_arguments_elements_map());
+ a.Store(AccessBuilder::ForFixedArraySlot(0), context);
+ a.Store(AccessBuilder::ForFixedArraySlot(1), arguments);
+ for (int i = 0; i < mapped_count; ++i) {
+ int idx = Context::MIN_CONTEXT_SLOTS + parameter_count - 1 - i;
+ Node* value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->NumberLessThan(), jsgraph()->Constant(i),
+ arguments_length),
+ jsgraph()->Constant(idx), jsgraph()->TheHoleConstant());
+ a.Store(AccessBuilder::ForFixedArraySlot(i + 2), value);
+ }
+ return a.Finish();
+}
+
Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
ElementsKind elements_kind,
int capacity,
@@ -1467,30 +1683,6 @@ Node* JSCreateLowering::AllocateLiteralRegExp(Node* effect, Node* control,
return builder.Finish();
}
-MaybeHandle<FeedbackVector> JSCreateLowering::GetSpecializationFeedbackVector(
- Node* node) {
- Node* const closure = NodeProperties::GetValueInput(node, 0);
- switch (closure->opcode()) {
- case IrOpcode::kHeapConstant: {
- Handle<HeapObject> object = OpParameter<Handle<HeapObject>>(closure);
- return handle(Handle<JSFunction>::cast(object)->feedback_vector());
- }
- case IrOpcode::kParameter: {
- int const index = ParameterIndexOf(closure->op());
- // The closure is always the last parameter to a JavaScript function, and
- // {Parameter} indices start at -1, so value outputs of {Start} look like
- // this: closure, receiver, param0, ..., paramN, context.
- if (index == -1) {
- return feedback_vector_;
- }
- break;
- }
- default:
- break;
- }
- return MaybeHandle<FeedbackVector>();
-}
-
Factory* JSCreateLowering::factory() const { return isolate()->factory(); }
Graph* JSCreateLowering::graph() const { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 02be94fcfa..00c2ba573c 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -34,12 +34,10 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
public:
JSCreateLowering(Editor* editor, CompilationDependencies* dependencies,
JSGraph* jsgraph,
- MaybeHandle<FeedbackVector> feedback_vector,
Handle<Context> native_context, Zone* zone)
: AdvancedReducer(editor),
dependencies_(dependencies),
jsgraph_(jsgraph),
- feedback_vector_(feedback_vector),
native_context_(native_context),
zone_(zone) {}
~JSCreateLowering() final {}
@@ -52,6 +50,7 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Reduction ReduceJSCreate(Node* node);
Reduction ReduceJSCreateArguments(Node* node);
Reduction ReduceJSCreateArray(Node* node);
+ Reduction ReduceJSCreateClosure(Node* node);
Reduction ReduceJSCreateIterResultObject(Node* node);
Reduction ReduceJSCreateKeyValueArray(Node* node);
Reduction ReduceJSCreateLiteralArrayOrObject(Node* node);
@@ -63,10 +62,12 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Reduction ReduceJSCreateCatchContext(Node* node);
Reduction ReduceJSCreateBlockContext(Node* node);
Reduction ReduceJSCreateGeneratorObject(Node* node);
+ Reduction ReduceNewArray(Node* node, Node* length, Handle<Map> initial_map,
+ PretenureFlag pretenure);
Reduction ReduceNewArray(Node* node, Node* length, int capacity,
- Handle<AllocationSite> site);
+ Handle<Map> initial_map, PretenureFlag pretenure);
Reduction ReduceNewArray(Node* node, std::vector<Node*> values,
- Handle<AllocationSite> site);
+ Handle<Map> initial_map, PretenureFlag pretenure);
Node* AllocateArguments(Node* effect, Node* control, Node* frame_state);
Node* AllocateRestArguments(Node* effect, Node* control, Node* frame_state,
@@ -74,6 +75,10 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Node* AllocateAliasedArguments(Node* effect, Node* control, Node* frame_state,
Node* context, Handle<SharedFunctionInfo>,
bool* has_aliased_arguments);
+ Node* AllocateAliasedArguments(Node* effect, Node* control, Node* context,
+ Node* arguments_frame, Node* arguments_length,
+ Handle<SharedFunctionInfo>,
+ bool* has_aliased_arguments);
Node* AllocateElements(Node* effect, Node* control,
ElementsKind elements_kind, int capacity,
PretenureFlag pretenure);
@@ -95,9 +100,6 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Reduction ReduceNewArrayToStubCall(Node* node, Handle<AllocationSite> site);
- // Infers the FeedbackVector to use for a given {node}.
- MaybeHandle<FeedbackVector> GetSpecializationFeedbackVector(Node* node);
-
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
@@ -110,7 +112,6 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
CompilationDependencies* const dependencies_;
JSGraph* const jsgraph_;
- MaybeHandle<FeedbackVector> const feedback_vector_;
Handle<Context> const native_context_;
Zone* const zone_;
};
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 8ed4f3fb9c..4d7b7972a9 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -14,7 +14,6 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
-#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -79,6 +78,7 @@ REPLACE_STUB_CALL(ToNumber)
REPLACE_STUB_CALL(ToName)
REPLACE_STUB_CALL(ToObject)
REPLACE_STUB_CALL(ToString)
+REPLACE_STUB_CALL(ForInEnumerate)
#undef REPLACE_STUB_CALL
void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
@@ -219,11 +219,12 @@ void JSGenericLowering::LowerJSStoreProperty(Node* node) {
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable = CodeFactory::KeyedStoreIC(isolate(), p.language_mode());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kKeyedStoreICTrampoline);
ReplaceWithStubCall(node, callable, flags);
} else {
Callable callable =
- CodeFactory::KeyedStoreICInOptimizedCode(isolate(), p.language_mode());
+ Builtins::CallableFor(isolate(), Builtins::kKeyedStoreIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
node->InsertInput(zone(), 4, vector);
ReplaceWithStubCall(node, callable, flags);
@@ -238,11 +239,11 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) {
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable = CodeFactory::StoreIC(isolate(), p.language_mode());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kStoreICTrampoline);
ReplaceWithStubCall(node, callable, flags);
} else {
- Callable callable =
- CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kStoreIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
node->InsertInput(zone(), 4, vector);
ReplaceWithStubCall(node, callable, flags);
@@ -460,15 +461,16 @@ void JSGenericLowering::LowerJSCreateKeyValueArray(Node* node) {
void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
+ node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector()));
+ node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
- // Use the FastCloneShallowArray builtin only for shallow boilerplates without
- // properties up to the number of elements that the stubs can handle.
+ // Use the CreateShallowArrayLiteratlr builtin only for shallow boilerplates
+ // without properties up to the number of elements that the stubs can handle.
if ((p.flags() & AggregateLiteral::kIsShallow) != 0 &&
p.length() < ConstructorBuiltins::kMaximumClonedShallowArrayElements) {
- Callable callable = CodeFactory::FastCloneShallowArray(
- isolate(), DONT_TRACK_ALLOCATION_SITE);
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kCreateShallowArrayLiteral);
ReplaceWithStubCall(node, callable, flags);
} else {
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
@@ -478,8 +480,10 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
void JSGenericLowering::LowerJSCreateEmptyLiteralArray(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- int literal_index = OpParameter<int>(node->op());
- node->InsertInput(zone(), 1, jsgraph()->SmiConstant(literal_index));
+ FeedbackParameter const& p = FeedbackParameterOf(node->op());
+ node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector()));
+ node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
+ node->RemoveInput(4); // control
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kCreateEmptyArrayLiteral);
ReplaceWithStubCall(node, callable, flags);
@@ -488,17 +492,18 @@ void JSGenericLowering::LowerJSCreateEmptyLiteralArray(Node* node) {
void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
+ node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector()));
+ node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
- // Use the FastCloneShallowObject builtin only for shallow boilerplates
+ // Use the CreateShallowObjectLiteratal builtin only for shallow boilerplates
// without elements up to the number of properties that the stubs can handle.
if ((p.flags() & AggregateLiteral::kIsShallow) != 0 &&
p.length() <=
ConstructorBuiltins::kMaximumClonedShallowObjectProperties) {
Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kFastCloneShallowObject);
+ Builtins::CallableFor(isolate(), Builtins::kCreateShallowObjectLiteral);
ReplaceWithStubCall(node, callable, flags);
} else {
ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
@@ -506,23 +511,18 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
}
void JSGenericLowering::LowerJSCreateEmptyLiteralObject(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kCreateEmptyObjectLiteral);
- ReplaceWithStubCall(node, callable, flags);
+ UNREACHABLE(); // Eliminated in typed lowering.
}
void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kFastCloneRegExp);
- Node* literal_index = jsgraph()->SmiConstant(p.index());
- Node* literal_flags = jsgraph()->SmiConstant(p.flags());
- Node* pattern = jsgraph()->HeapConstant(p.constant());
- node->InsertInput(graph()->zone(), 1, literal_index);
- node->InsertInput(graph()->zone(), 2, pattern);
- node->InsertInput(graph()->zone(), 3, literal_flags);
+ Builtins::CallableFor(isolate(), Builtins::kCreateRegExpLiteral);
+ node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector()));
+ node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
ReplaceWithStubCall(node, callable, flags);
}
@@ -705,15 +705,11 @@ void JSGenericLowering::LowerJSConvertReceiver(Node* node) {
}
void JSGenericLowering::LowerJSForInNext(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kForInNext);
- ReplaceWithStubCall(node, callable, flags);
+ UNREACHABLE(); // Eliminated in typed lowering.
}
void JSGenericLowering::LowerJSForInPrepare(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kForInPrepare);
- ReplaceWithStubCall(node, callable, flags, node->op()->properties(), 3);
+ UNREACHABLE(); // Eliminated in typed lowering.
}
void JSGenericLowering::LowerJSLoadMessage(Node* node) {
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 04e67621a9..1060b81b97 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -47,7 +47,7 @@ Node* JSGraph::CEntryStubConstant(int result_size, SaveFPRegsMode save_doubles,
} else if (result_size == 2) {
key = kCEntryStub2Constant;
} else {
- DCHECK(result_size == 3);
+ DCHECK_EQ(3, result_size);
key = kCEntryStub3Constant;
}
return CACHED(
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 7ac94c13a6..b784c6ef97 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -103,8 +103,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
return NoChange();
}
- // Functions marked with %SetForceInlineFlag are immediately inlined.
- bool can_inline = false, force_inline = true, small_inline = true;
+ bool can_inline = false, small_inline = true;
candidate.total_size = 0;
Node* frame_state = NodeProperties::GetFrameStateInput(node);
FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
@@ -114,9 +113,6 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
candidate.functions[i].is_null()
? candidate.shared_info
: handle(candidate.functions[i]->shared());
- if (!shared->force_inline()) {
- force_inline = false;
- }
candidate.can_inline_function[i] = CanInlineFunction(shared);
// Do not allow direct recursion i.e. f() -> f(). We still allow indirect
// recurion like f() -> g() -> f(). The indirect recursion is helpful in
@@ -141,7 +137,6 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
small_inline = false;
}
}
- if (force_inline) return InlineCandidate(candidate, true);
if (!can_inline) return NoChange();
// Stop inlining once the maximum allowed level is reached.
@@ -193,8 +188,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
// Forcibly inline small functions here. In the case of polymorphic inlining
// small_inline is set only when all functions are small.
- if (small_inline &&
- cumulative_count_ <= FLAG_max_inlined_bytecode_size_absolute) {
+ if (small_inline) {
TRACE("Inlining small function(s) at call site #%d:%s\n", node->id(),
node->op()->mnemonic());
return InlineCandidate(candidate, true);
@@ -236,37 +230,357 @@ void JSInliningHeuristic::Finalize() {
}
}
-Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
- bool force_inline) {
+namespace {
+
+struct NodeAndIndex {
+ Node* node;
+ int index;
+};
+
+bool CollectStateValuesOwnedUses(Node* node, Node* state_values,
+ NodeAndIndex* uses_buffer, size_t* use_count,
+ size_t max_uses) {
+ // Only accumulate states that are not shared with other users.
+ if (state_values->UseCount() > 1) return true;
+ for (int i = 0; i < state_values->InputCount(); i++) {
+ Node* input = state_values->InputAt(i);
+ if (input->opcode() == IrOpcode::kStateValues) {
+ if (!CollectStateValuesOwnedUses(node, input, uses_buffer, use_count,
+ max_uses)) {
+ return false;
+ }
+ } else if (input == node) {
+ if (*use_count >= max_uses) return false;
+ uses_buffer[*use_count] = {state_values, i};
+ (*use_count)++;
+ }
+ }
+ return true;
+}
+
+} // namespace
+
+Node* JSInliningHeuristic::DuplicateStateValuesAndRename(Node* state_values,
+ Node* from, Node* to,
+ StateCloneMode mode) {
+ // Only rename in states that are not shared with other users. This needs to
+ // be in sync with the condition in {CollectStateValuesOwnedUses}.
+ if (state_values->UseCount() > 1) return state_values;
+ Node* copy = mode == kChangeInPlace ? state_values : nullptr;
+ for (int i = 0; i < state_values->InputCount(); i++) {
+ Node* input = state_values->InputAt(i);
+ Node* processed;
+ if (input->opcode() == IrOpcode::kStateValues) {
+ processed = DuplicateStateValuesAndRename(input, from, to, mode);
+ } else if (input == from) {
+ processed = to;
+ } else {
+ processed = input;
+ }
+ if (processed != input) {
+ if (!copy) {
+ copy = graph()->CloneNode(state_values);
+ }
+ copy->ReplaceInput(i, processed);
+ }
+ }
+ return copy ? copy : state_values;
+}
+
+namespace {
+
+bool CollectFrameStateUniqueUses(Node* node, Node* frame_state,
+ NodeAndIndex* uses_buffer, size_t* use_count,
+ size_t max_uses) {
+ // Only accumulate states that are not shared with other users.
+ if (frame_state->UseCount() > 1) return true;
+ if (frame_state->InputAt(kFrameStateStackInput) == node) {
+ if (*use_count >= max_uses) return false;
+ uses_buffer[*use_count] = {frame_state, kFrameStateStackInput};
+ (*use_count)++;
+ }
+ if (!CollectStateValuesOwnedUses(node,
+ frame_state->InputAt(kFrameStateLocalsInput),
+ uses_buffer, use_count, max_uses)) {
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+Node* JSInliningHeuristic::DuplicateFrameStateAndRename(Node* frame_state,
+ Node* from, Node* to,
+ StateCloneMode mode) {
+ // Only rename in states that are not shared with other users. This needs to
+ // be in sync with the condition in {DuplicateFrameStateAndRename}.
+ if (frame_state->UseCount() > 1) return frame_state;
+ Node* copy = mode == kChangeInPlace ? frame_state : nullptr;
+ if (frame_state->InputAt(kFrameStateStackInput) == from) {
+ if (!copy) {
+ copy = graph()->CloneNode(frame_state);
+ }
+ copy->ReplaceInput(kFrameStateStackInput, to);
+ }
+ Node* locals = frame_state->InputAt(kFrameStateLocalsInput);
+ Node* new_locals = DuplicateStateValuesAndRename(locals, from, to, mode);
+ if (new_locals != locals) {
+ if (!copy) {
+ copy = graph()->CloneNode(frame_state);
+ }
+ copy->ReplaceInput(kFrameStateLocalsInput, new_locals);
+ }
+ return copy ? copy : frame_state;
+}
+
+bool JSInliningHeuristic::TryReuseDispatch(Node* node, Node* callee,
+ Candidate const& candidate,
+ Node** if_successes, Node** calls,
+ Node** inputs, int input_count) {
+ // We will try to reuse the control flow branch created for computing
+ // the {callee} target of the call. We only reuse the branch if there
+ // is no side-effect between the call and the branch, and if the callee is
+ // only used as the target (and possibly also in the related frame states).
+
int const num_calls = candidate.num_functions;
- Node* const node = candidate.node;
- if (num_calls == 1) {
- Handle<SharedFunctionInfo> shared =
- candidate.functions[0].is_null()
- ? candidate.shared_info
- : handle(candidate.functions[0]->shared());
- Reduction const reduction = inliner_.ReduceJSCall(node);
- if (reduction.Changed()) {
- cumulative_count_ += shared->bytecode_array()->length();
+
+ DCHECK_EQ(IrOpcode::kPhi, callee->opcode());
+ DCHECK_EQ(num_calls, callee->op()->ValueInputCount());
+
+ // We are trying to match the following pattern:
+ //
+ // C1 C2
+ // . .
+ // | |
+ // Merge(merge) <-----------------+
+ // ^ ^ |
+ // V1 V2 | | E1 E2 |
+ // . . | +----+ . . |
+ // | | | | | | |
+ // Phi(callee) EffectPhi(effect_phi) |
+ // ^ ^ |
+ // | | |
+ // +----+ | |
+ // | | | |
+ // | StateValues | |
+ // | ^ | |
+ // +----+ | | |
+ // | | | | |
+ // | FrameState | |
+ // | ^ | |
+ // | | | +---+
+ // | | | | |
+ // +----+ Checkpoint(checkpoint) |
+ // | | ^ |
+ // | StateValues | +-------------+
+ // | | | |
+ // +-----+ | | |
+ // | | | | |
+ // | FrameState | |
+ // | ^ | |
+ // +-----------+ | | |
+ // Call(node)
+ // |
+ // |
+ // .
+ //
+ // The {callee} here is a phi that merges the possible call targets, {node}
+ // is the actual call that we will try to duplicate and connect to the
+ // control that comes into {merge}. There can be a {checkpoint} between
+ // the call and the calle phi.
+ //
+ // The idea is to get rid of the merge, effect phi and phi, then duplicate
+ // the call (with all the frame states and such), and connect the duplicated
+ // calls and states directly to the inputs of the ex-phi, ex-effect-phi and
+ // ex-merge. The tricky part is to make sure that there is no interference
+ // from the outside. In particular, there should not be any unaccounted uses
+ // of the phi, effect-phi and merge because we will remove them from
+ // the graph.
+ //
+ // V1 E1 C1 V2 E2 C2
+ // . . . . . .
+ // | | | | | |
+ // +----+ | | +----+ |
+ // | | | | | | |
+ // | StateValues | | | StateValues |
+ // | ^ | | | ^ |
+ // +----+ | | | +----+ | |
+ // | | | | | | | | |
+ // | FrameState | | | FrameState |
+ // | ^ | | | ^ |
+ // | | | | | | |
+ // | | | | | | |
+ // +----+ Checkpoint | +----+ Checkpoint |
+ // | | ^ | | | ^ |
+ // | StateValues | | | StateValues | |
+ // | | | | | | | |
+ // +-----+ | | | +-----+ | | |
+ // | | | | | | | | | |
+ // | FrameState | | | FrameState | |
+ // | ^ | | | ^ | |
+ // +-------------+| | | +-------------+| | |
+ // Call----+ Call----+
+ // | |
+ // +-------+ +------------+
+ // | |
+ // Merge
+ // EffectPhi
+ // Phi
+ // |
+ // ...
+
+ // If there is a control node between the callee computation
+ // and the call, bail out.
+ Node* merge = NodeProperties::GetControlInput(callee);
+ if (NodeProperties::GetControlInput(node) != merge) return false;
+
+ // If there is a non-checkpoint effect node between the callee computation
+ // and the call, bail out. We will drop any checkpoint between the call and
+ // the callee phi because the callee computation should have its own
+ // checkpoint that the call can fall back to.
+ Node* checkpoint = nullptr;
+ Node* effect = NodeProperties::GetEffectInput(node);
+ if (effect->opcode() == IrOpcode::kCheckpoint) {
+ checkpoint = effect;
+ if (NodeProperties::GetControlInput(checkpoint) != merge) return false;
+ effect = NodeProperties::GetEffectInput(effect);
+ }
+ if (effect->opcode() != IrOpcode::kEffectPhi) return false;
+ if (NodeProperties::GetControlInput(effect) != merge) return false;
+ Node* effect_phi = effect;
+
+ // The effect phi, the callee, the call and the checkpoint must be the only
+ // users of the merge.
+ for (Node* merge_use : merge->uses()) {
+ if (merge_use != effect_phi && merge_use != callee && merge_use != node &&
+ merge_use != checkpoint) {
+ return false;
}
- return reduction;
}
- // Expand the JSCall/JSConstruct node to a subgraph first if
- // we have multiple known target functions.
- DCHECK_LT(1, num_calls);
- Node* calls[kMaxCallPolymorphism + 1];
- Node* if_successes[kMaxCallPolymorphism];
- Node* callee = NodeProperties::GetValueInput(node, 0);
- Node* fallthrough_control = NodeProperties::GetControlInput(node);
+ // The effect phi must be only used by the checkpoint or the call.
+ for (Node* effect_phi_use : effect_phi->uses()) {
+ if (effect_phi_use != node && effect_phi_use != checkpoint) return false;
+ }
- // Setup the inputs for the cloned call nodes.
- int const input_count = node->InputCount();
- Node** inputs = graph()->zone()->NewArray<Node*>(input_count);
- for (int i = 0; i < input_count; ++i) {
- inputs[i] = node->InputAt(i);
+ // We must replace the callee phi with the appropriate constant in
+ // the entire subgraph reachable by inputs from the call (terminating
+ // at phis and merges). Since we do not want to walk (and later duplicate)
+ // the subgraph here, we limit the possible uses to this set:
+ //
+ // 1. In the call (as a target).
+ // 2. The checkpoint between the call and the callee computation merge.
+ // 3. The lazy deoptimization frame state.
+ //
+ // This corresponds to the most common pattern, where the function is
+ // called with only local variables or constants as arguments.
+ //
+ // To check the uses, we first collect all the occurrences of callee in 1, 2
+ // and 3, and then we check that all uses of callee are in the collected
+ // occurrences. If there is an unaccounted use, we do not try to rewire
+ // the control flow.
+ //
+ // Note: With CFG, this would be much easier and more robust - we would just
+ // duplicate all the nodes between the merge and the call, replacing all
+ // occurrences of the {callee} phi with the appropriate constant.
+
+ // First compute the set of uses that are only reachable from 2 and 3.
+ const size_t kMaxUses = 8;
+ NodeAndIndex replaceable_uses[kMaxUses];
+ size_t replaceable_uses_count = 0;
+
+ // Collect the uses to check case 2.
+ Node* checkpoint_state = nullptr;
+ if (checkpoint) {
+ checkpoint_state = checkpoint->InputAt(0);
+ if (!CollectFrameStateUniqueUses(callee, checkpoint_state, replaceable_uses,
+ &replaceable_uses_count, kMaxUses)) {
+ return false;
+ }
+ }
+
+ // Collect the uses to check case 3.
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ if (!CollectFrameStateUniqueUses(callee, frame_state, replaceable_uses,
+ &replaceable_uses_count, kMaxUses)) {
+ return false;
+ }
+
+ // Bail out if there is a use of {callee} that is not reachable from 1, 2
+ // and 3.
+ for (Edge edge : callee->use_edges()) {
+ // Case 1 (use by the call as a target).
+ if (edge.from() == node && edge.index() == 0) continue;
+ // Case 2 and 3 - used in checkpoint and/or lazy deopt frame states.
+ bool found = false;
+ for (size_t i = 0; i < replaceable_uses_count; i++) {
+ if (replaceable_uses[i].node == edge.from() &&
+ replaceable_uses[i].index == edge.index()) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) return false;
}
+ // Clone the call and the framestate, including the uniquely reachable
+ // state values, making sure that we replace the phi with the constant.
+ for (int i = 0; i < num_calls; ++i) {
+ // Clone the calls for each branch.
+ // We need to specialize the calls to the correct target, effect, and
+ // control. We also need to duplicate the checkpoint and the lazy
+ // frame state, and change all the uses of the callee to the constant
+ // callee.
+ Node* target = callee->InputAt(i);
+ Node* effect = effect_phi->InputAt(i);
+ Node* control = merge->InputAt(i);
+
+ if (checkpoint) {
+ // Duplicate the checkpoint.
+ Node* new_checkpoint_state = DuplicateFrameStateAndRename(
+ checkpoint_state, callee, target,
+ (i == num_calls - 1) ? kChangeInPlace : kCloneState);
+ effect = graph()->NewNode(checkpoint->op(), new_checkpoint_state, effect,
+ control);
+ }
+
+ // Duplicate the call.
+ Node* new_lazy_frame_state = DuplicateFrameStateAndRename(
+ frame_state, callee, target,
+ (i == num_calls - 1) ? kChangeInPlace : kCloneState);
+ inputs[0] = target;
+ inputs[input_count - 3] = new_lazy_frame_state;
+ inputs[input_count - 2] = effect;
+ inputs[input_count - 1] = control;
+ calls[i] = if_successes[i] =
+ graph()->NewNode(node->op(), input_count, inputs);
+ }
+
+ // Mark the control inputs dead, so that we can kill the merge.
+ node->ReplaceInput(input_count - 1, jsgraph()->Dead());
+ callee->ReplaceInput(num_calls, jsgraph()->Dead());
+ effect_phi->ReplaceInput(num_calls, jsgraph()->Dead());
+ if (checkpoint) {
+ checkpoint->ReplaceInput(2, jsgraph()->Dead());
+ }
+
+ merge->Kill();
+ return true;
+}
+
+void JSInliningHeuristic::CreateOrReuseDispatch(Node* node, Node* callee,
+ Candidate const& candidate,
+ Node** if_successes,
+ Node** calls, Node** inputs,
+ int input_count) {
+ if (TryReuseDispatch(node, callee, candidate, if_successes, calls, inputs,
+ input_count)) {
+ return;
+ }
+
+ Node* fallthrough_control = NodeProperties::GetControlInput(node);
+ int const num_calls = candidate.num_functions;
+
// Create the appropriate control flow to dispatch to the cloned calls.
for (int i = 0; i < num_calls; ++i) {
// TODO(2206): Make comparison be based on underlying SharedFunctionInfo
@@ -283,13 +597,55 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
if_successes[i] = fallthrough_control;
}
+ // Clone the calls for each branch.
// The first input to the call is the actual target (which we specialize
// to the known {target}); the last input is the control dependency.
+ // We also specialize the new.target of JSConstruct {node}s if it refers
+ // to the same node as the {node}'s target input, so that we can later
+ // properly inline the JSCreate operations.
+ if (node->opcode() == IrOpcode::kJSConstruct && inputs[0] == inputs[1]) {
+ inputs[1] = target;
+ }
inputs[0] = target;
inputs[input_count - 1] = if_successes[i];
calls[i] = if_successes[i] =
graph()->NewNode(node->op(), input_count, inputs);
}
+}
+
+Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
+ bool small_function) {
+ int const num_calls = candidate.num_functions;
+ Node* const node = candidate.node;
+ if (num_calls == 1) {
+ Handle<SharedFunctionInfo> shared =
+ candidate.functions[0].is_null()
+ ? candidate.shared_info
+ : handle(candidate.functions[0]->shared());
+ Reduction const reduction = inliner_.ReduceJSCall(node);
+ if (reduction.Changed()) {
+ cumulative_count_ += shared->bytecode_array()->length();
+ }
+ return reduction;
+ }
+
+ // Expand the JSCall/JSConstruct node to a subgraph first if
+ // we have multiple known target functions.
+ DCHECK_LT(1, num_calls);
+ Node* calls[kMaxCallPolymorphism + 1];
+ Node* if_successes[kMaxCallPolymorphism];
+ Node* callee = NodeProperties::GetValueInput(node, 0);
+
+ // Setup the inputs for the cloned call nodes.
+ int const input_count = node->InputCount();
+ Node** inputs = graph()->zone()->NewArray<Node*>(input_count);
+ for (int i = 0; i < input_count; ++i) {
+ inputs[i] = node->InputAt(i);
+ }
+
+ // Create the appropriate control flow to dispatch to the cloned calls.
+ CreateOrReuseDispatch(node, callee, candidate, if_successes, calls, inputs,
+ input_count);
// Check if we have an exception projection for the call {node}.
Node* if_exception = nullptr;
@@ -329,7 +685,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
for (int i = 0; i < num_calls; ++i) {
Handle<JSFunction> function = candidate.functions[i];
Node* node = calls[i];
- if (force_inline ||
+ if (small_function ||
(candidate.can_inline_function[i] &&
cumulative_count_ < FLAG_max_inlined_bytecode_size_cumulative)) {
Reduction const reduction = inliner_.ReduceJSCall(node);
@@ -394,6 +750,8 @@ SimplifiedOperatorBuilder* JSInliningHeuristic::simplified() const {
return jsgraph()->simplified();
}
+#undef TRACE
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index b4947c505e..dffa5cfd6a 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -63,7 +63,18 @@ class JSInliningHeuristic final : public AdvancedReducer {
// Dumps candidates to console.
void PrintCandidates();
- Reduction InlineCandidate(Candidate const& candidate, bool force_inline);
+ Reduction InlineCandidate(Candidate const& candidate, bool small_function);
+ void CreateOrReuseDispatch(Node* node, Node* callee,
+ Candidate const& candidate, Node** if_successes,
+ Node** calls, Node** inputs, int input_count);
+ bool TryReuseDispatch(Node* node, Node* callee, Candidate const& candidate,
+ Node** if_successes, Node** calls, Node** inputs,
+ int input_count);
+ enum StateCloneMode { kCloneState, kChangeInPlace };
+ Node* DuplicateFrameStateAndRename(Node* frame_state, Node* from, Node* to,
+ StateCloneMode mode);
+ Node* DuplicateStateValuesAndRename(Node* state_values, Node* from, Node* to,
+ StateCloneMode mode);
CommonOperatorBuilder* common() const;
Graph* graph() const;
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index b74f94fa72..6943aab250 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -430,6 +430,10 @@ Reduction JSInliner::Reduce(Node* node) {
return ReduceJSCall(node);
}
+Handle<Context> JSInliner::native_context() const {
+ return handle(info_->context()->native_context());
+}
+
Reduction JSInliner::ReduceJSCall(Node* node) {
DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
Handle<SharedFunctionInfo> shared_info;
@@ -541,7 +545,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
}
BytecodeGraphBuilder graph_builder(
zone(), shared_info, feedback_vector, BailoutId::None(), jsgraph(),
- call.frequency(), source_positions_, inlining_id, flags, false);
+ call.frequency(), source_positions_, native_context(), inlining_id,
+ flags, false);
graph_builder.CreateGraph();
// Extract the inlinee start/end nodes.
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index 8718a176a3..d078413b47 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -47,6 +47,7 @@ class JSInliner final : public AdvancedReducer {
SimplifiedOperatorBuilder* simplified() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
+ Handle<Context> native_context() const;
Zone* const local_zone_;
CompilationInfo* info_;
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index e7a0cc7dee..3ed50acaeb 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -98,8 +98,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
case Runtime::kInlineTypedArrayGetLength:
return ReduceArrayBufferViewField(node,
AccessBuilder::ForJSTypedArrayLength());
- case Runtime::kInlineTypedArrayMaxSizeInHeap:
- return ReduceTypedArrayMaxSizeInHeap(node);
case Runtime::kInlineTheHole:
return ReduceTheHole(node);
case Runtime::kInlineClassOf:
@@ -383,12 +381,6 @@ Reduction JSIntrinsicLowering::ReduceMaxSmi(Node* node) {
return Replace(value);
}
-Reduction JSIntrinsicLowering::ReduceTypedArrayMaxSizeInHeap(Node* node) {
- Node* value = jsgraph()->Constant(FLAG_typed_array_max_size_in_heap);
- ReplaceWithValue(node, value);
- return Replace(value);
-}
-
Reduction JSIntrinsicLowering::ReduceTheHole(Node* node) {
Node* value = jsgraph()->TheHoleConstant();
ReplaceWithValue(node, value);
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 1a86ebf534..0226ae56f5 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -68,7 +68,6 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceArrayBufferViewField(Node* node, FieldAccess const& access);
Reduction ReduceArrayBufferViewWasNeutered(Node* node);
Reduction ReduceMaxSmi(Node* node);
- Reduction ReduceTypedArrayMaxSizeInHeap(Node* node);
// TODO(turbofan): collection.js support; drop once Maps and Sets are
// converted to proper CodeStubAssembler based builtins.
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index dbe3fc9608..06f059e24e 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -5,6 +5,7 @@
#include "src/compiler/js-native-context-specialization.h"
#include "src/accessors.h"
+#include "src/api.h"
#include "src/code-factory.h"
#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
@@ -652,7 +653,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* node, Node* value, MapHandles const& receiver_maps, Handle<Name> name,
- AccessMode access_mode, LanguageMode language_mode, Node* index) {
+ AccessMode access_mode, Node* index) {
DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSStoreNamed ||
node->opcode() == IrOpcode::kJSLoadProperty ||
@@ -731,7 +732,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Generate the actual property access.
ValueEffectControl continuation = BuildPropertyAccess(
receiver, value, context, frame_state, effect, control, name,
- if_exceptions, access_info, access_mode, language_mode);
+ if_exceptions, access_info, access_mode);
value = continuation.value();
effect = continuation.effect();
control = continuation.control();
@@ -836,10 +837,9 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
// Generate the actual property access.
- ValueEffectControl continuation =
- BuildPropertyAccess(this_receiver, this_value, context, frame_state,
- this_effect, this_control, name, if_exceptions,
- access_info, access_mode, language_mode);
+ ValueEffectControl continuation = BuildPropertyAccess(
+ this_receiver, this_value, context, frame_state, this_effect,
+ this_control, name, if_exceptions, access_info, access_mode);
values.push_back(continuation.value());
effects.push_back(continuation.effect());
controls.push_back(continuation.control());
@@ -891,7 +891,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
Node* node, Node* value, FeedbackNexus const& nexus, Handle<Name> name,
- AccessMode access_mode, LanguageMode language_mode) {
+ AccessMode access_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSStoreNamed ||
node->opcode() == IrOpcode::kJSStoreNamedOwn);
@@ -929,8 +929,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
}
// Try to lower the named access based on the {receiver_maps}.
- return ReduceNamedAccess(node, value, receiver_maps, name, access_mode,
- language_mode);
+ return ReduceNamedAccess(node, value, receiver_maps, name, access_mode);
}
Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
@@ -968,13 +967,13 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
}
}
- // Extract receiver maps from the LOAD_IC using the LoadICNexus.
+ // Extract receiver maps from the load IC using the LoadICNexus.
if (!p.feedback().IsValid()) return NoChange();
LoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
// Try to lower the named access based on the {receiver_maps}.
return ReduceNamedAccessFromNexus(node, value, nexus, p.name(),
- AccessMode::kLoad, p.language_mode());
+ AccessMode::kLoad);
}
@@ -983,13 +982,13 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
NamedAccess const& p = NamedAccessOf(node->op());
Node* const value = NodeProperties::GetValueInput(node, 1);
- // Extract receiver maps from the STORE_IC using the StoreICNexus.
+ // Extract receiver maps from the store IC using the StoreICNexus.
if (!p.feedback().IsValid()) return NoChange();
StoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
// Try to lower the named access based on the {receiver_maps}.
return ReduceNamedAccessFromNexus(node, value, nexus, p.name(),
- AccessMode::kStore, p.language_mode());
+ AccessMode::kStore);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
@@ -1003,13 +1002,12 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
// Try to lower the creation of a named property based on the {receiver_maps}.
return ReduceNamedAccessFromNexus(node, value, nexus, p.name(),
- AccessMode::kStoreInLiteral, STRICT);
+ AccessMode::kStoreInLiteral);
}
Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* node, Node* index, Node* value, MapHandles const& receiver_maps,
- AccessMode access_mode, LanguageMode language_mode,
- KeyedAccessStoreMode store_mode) {
+ AccessMode access_mode, KeyedAccessStoreMode store_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty);
Node* receiver = NodeProperties::GetValueInput(node, 0);
@@ -1250,47 +1248,89 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
template <typename KeyedICNexus>
Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
Node* node, Node* index, Node* value, KeyedICNexus const& nexus,
- AccessMode access_mode, LanguageMode language_mode,
- KeyedAccessStoreMode store_mode) {
+ AccessMode access_mode, KeyedAccessStoreMode store_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty);
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Optimize access for constant {receiver}.
- HeapObjectMatcher mreceiver(receiver);
- if (mreceiver.HasValue() && mreceiver.Value()->IsString()) {
- Handle<String> string = Handle<String>::cast(mreceiver.Value());
-
- // Strings are immutable in JavaScript.
- if (access_mode == AccessMode::kStore) return NoChange();
-
- // Properly deal with constant {index}.
- NumberMatcher mindex(index);
- if (mindex.IsInteger() && mindex.IsInRange(0.0, string->length() - 1)) {
- // Constant-fold the {index} access to {string}.
- Node* value = jsgraph()->HeapConstant(
- factory()->LookupSingleCharacterStringFromCode(
- string->Get(static_cast<int>(mindex.Value()))));
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
+ // Optimize the case where we load from a constant {receiver}.
+ if (access_mode == AccessMode::kLoad) {
+ HeapObjectMatcher mreceiver(receiver);
+ if (mreceiver.HasValue() && !mreceiver.Value()->IsTheHole(isolate()) &&
+ !mreceiver.Value()->IsNullOrUndefined(isolate())) {
+ // Check whether we're accessing a known element on the {receiver}
+ // that is non-configurable, non-writable (i.e. the {receiver} was
+ // frozen using Object.freeze).
+ NumberMatcher mindex(index);
+ if (mindex.IsInteger() && mindex.IsInRange(0.0, kMaxUInt32 - 1.0)) {
+ LookupIterator it(isolate(), mreceiver.Value(),
+ static_cast<uint32_t>(mindex.Value()),
+ LookupIterator::OWN);
+ if (it.state() == LookupIterator::DATA) {
+ if (it.IsReadOnly() && !it.IsConfigurable()) {
+ // We can safely constant-fold the {index} access to {receiver},
+ // since the element is non-configurable, non-writable and thus
+ // cannot change anymore.
+ value = jsgraph()->Constant(it.GetDataValue());
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
- // We can only assume that the {index} is a valid array index if the IC
- // is in element access mode and not MEGAMORPHIC, otherwise there's no
- // guard for the bounds check below.
- if (nexus.ic_state() != MEGAMORPHIC && nexus.GetKeyType() == ELEMENT) {
- // Ensure that {index} is less than {receiver} length.
- Node* length = jsgraph()->Constant(string->length());
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- length, effect, control);
+ // Check if the {receiver} is a known constant with a copy-on-write
+ // backing store, and whether {index} is within the appropriate
+ // bounds. In that case we can constant-fold the access and only
+ // check that the {elements} didn't change. This is sufficient as
+ // the backing store of a copy-on-write JSArray is defensively copied
+ // whenever the length or the elements (might) change.
+ //
+ // What's interesting here is that we don't need to map check the
+ // {receiver}, since JSArray's will always have their elements in
+ // the backing store.
+ if (mreceiver.Value()->IsJSArray()) {
+ Handle<JSArray> array = Handle<JSArray>::cast(mreceiver.Value());
+ if (array->elements()->IsCowArray()) {
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, effect, control);
+ Handle<FixedArray> array_elements(
+ FixedArray::cast(array->elements()), isolate());
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(), elements,
+ jsgraph()->HeapConstant(array_elements));
+ effect = graph()->NewNode(simplified()->CheckIf(), check, effect,
+ control);
+ value = jsgraph()->Constant(it.GetDataValue());
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ }
+ }
+ }
- // Return the character from the {receiver} as single character string.
- value = graph()->NewNode(simplified()->StringCharAt(), receiver, index,
- control);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
+ // For constant Strings we can eagerly strength-reduce the keyed
+ // accesses using the known length, which doesn't change.
+ if (mreceiver.Value()->IsString()) {
+ Handle<String> string = Handle<String>::cast(mreceiver.Value());
+
+ // We can only assume that the {index} is a valid array index if the IC
+ // is in element access mode and not MEGAMORPHIC, otherwise there's no
+ // guard for the bounds check below.
+ if (nexus.ic_state() != MEGAMORPHIC && nexus.GetKeyType() == ELEMENT) {
+ // Ensure that {index} is less than {receiver} length.
+ Node* length = jsgraph()->Constant(string->length());
+ index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
+ length, effect, control);
+
+ // Return the character from the {receiver} as single character
+ // string.
+ value = graph()->NewNode(simplified()->StringCharAt(), receiver,
+ index, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ }
}
}
@@ -1332,8 +1372,7 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
index = jsgraph()->Constant(static_cast<double>(array_index));
} else {
name = factory()->InternalizeName(name);
- return ReduceNamedAccess(node, value, receiver_maps, name, access_mode,
- language_mode);
+ return ReduceNamedAccess(node, value, receiver_maps, name, access_mode);
}
}
}
@@ -1341,8 +1380,7 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
// Check if we have feedback for a named access.
if (Name* name = nexus.FindFirstName()) {
return ReduceNamedAccess(node, value, receiver_maps,
- handle(name, isolate()), access_mode,
- language_mode, index);
+ handle(name, isolate()), access_mode, index);
} else if (nexus.GetKeyType() != ELEMENT) {
// The KeyedLoad/StoreIC has seen non-element accesses, so we cannot assume
// that the {index} is a valid array index, thus we just let the IC continue
@@ -1358,7 +1396,7 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
// Try to lower the element access based on the {receiver_maps}.
return ReduceElementAccess(node, index, value, receiver_maps, access_mode,
- language_mode, store_mode);
+ store_mode);
}
Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
@@ -1377,39 +1415,137 @@ Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
return Changed(node);
}
-
Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode());
PropertyAccess const& p = PropertyAccessOf(node->op());
- Node* const index = NodeProperties::GetValueInput(node, 1);
- Node* const value = jsgraph()->Dead();
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* name = NodeProperties::GetValueInput(node, 1);
+ Node* value = jsgraph()->Dead();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // We can optimize a property load if it's being used inside a for..in,
+ // so for code like this:
+ //
+ // for (name in receiver) {
+ // value = receiver[name];
+ // ...
+ // }
+ //
+ // If the for..in is in fast-mode, we know that the {receiver} has {name}
+ // as own property, otherwise the enumeration wouldn't include it. The graph
+ // constructed by the BytecodeGraphBuilder in this case looks like this:
+
+ // receiver
+ // ^ ^
+ // | |
+ // | +-+
+ // | |
+ // | JSToObject
+ // | ^
+ // | |
+ // | |
+ // | JSForInNext
+ // | ^
+ // | |
+ // +----+ |
+ // | |
+ // | |
+ // JSLoadProperty
+
+ // If the for..in has only seen maps with enum cache consisting of keys
+ // and indices so far, we can turn the {JSLoadProperty} into a map check
+ // on the {receiver} and then just load the field value dynamically via
+ // the {LoadFieldByIndex} operator. The map check is only necessary when
+ // TurboFan cannot prove that there is no observable side effect between
+ // the {JSForInNext} and the {JSLoadProperty} node.
+ //
+ // Also note that it's safe to look through the {JSToObject}, since the
+ // [[Get]] operation does an implicit ToObject anyway, and these operations
+ // are not observable.
+ if (name->opcode() == IrOpcode::kJSForInNext) {
+ ForInMode const mode = ForInModeOf(name->op());
+ if (mode == ForInMode::kUseEnumCacheKeysAndIndices) {
+ Node* object = NodeProperties::GetValueInput(name, 0);
+ Node* enumerator = NodeProperties::GetValueInput(name, 2);
+ Node* index = NodeProperties::GetValueInput(name, 3);
+ if (object->opcode() == IrOpcode::kJSToObject) {
+ object = NodeProperties::GetValueInput(object, 0);
+ }
+ if (object == receiver) {
+ // No need to repeat the map check if we can prove that there's no
+ // observable side effect between {effect} and {name].
+ if (!NodeProperties::NoObservableSideEffectBetween(effect, name)) {
+ // Check that the {receiver} map is still valid.
+ Node* receiver_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
+ receiver_map, enumerator);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ }
+
+ // Load the enum cache indices from the {cache_type}.
+ Node* descriptor_array = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapDescriptors()),
+ enumerator, effect, control);
+ Node* enum_cache = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForDescriptorArrayEnumCache()),
+ descriptor_array, effect, control);
+ Node* enum_indices = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForEnumCacheIndices()),
+ enum_cache, effect, control);
+
+ // Ensure that the {enum_indices} are valid.
+ Node* check = graph()->NewNode(
+ simplified()->BooleanNot(),
+ graph()->NewNode(simplified()->ReferenceEqual(), enum_indices,
+ jsgraph()->EmptyFixedArrayConstant()));
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check, effect, control);
- // Extract receiver maps from the KEYED_LOAD_IC using the KeyedLoadICNexus.
+ // Determine the index from the {enum_indices}.
+ index = effect = graph()->NewNode(
+ simplified()->LoadElement(
+ AccessBuilder::ForFixedArrayElement(PACKED_SMI_ELEMENTS)),
+ enum_indices, index, effect, control);
+
+ // Load the actual field value.
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadFieldByIndex(), receiver, index, effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ }
+ }
+
+ // Extract receiver maps from the keyed load IC using the KeyedLoadICNexus.
if (!p.feedback().IsValid()) return NoChange();
KeyedLoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
// Try to lower the keyed access based on the {nexus}.
- return ReduceKeyedAccess(node, index, value, nexus, AccessMode::kLoad,
- p.language_mode(), STANDARD_STORE);
+ return ReduceKeyedAccess(node, name, value, nexus, AccessMode::kLoad,
+ STANDARD_STORE);
}
-
Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreProperty, node->opcode());
PropertyAccess const& p = PropertyAccessOf(node->op());
Node* const index = NodeProperties::GetValueInput(node, 1);
Node* const value = NodeProperties::GetValueInput(node, 2);
- // Extract receiver maps from the KEYED_STORE_IC using the KeyedStoreICNexus.
+ // Extract receiver maps from the keyed store IC using the KeyedStoreICNexus.
if (!p.feedback().IsValid()) return NoChange();
KeyedStoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
- // Extract the keyed access store mode from the KEYED_STORE_IC.
+ // Extract the keyed access store mode from the keyed store IC.
KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode();
// Try to lower the keyed access based on the {nexus}.
return ReduceKeyedAccess(node, index, value, nexus, AccessMode::kStore,
- p.language_mode(), store_mode);
+ store_mode);
}
Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
@@ -1570,7 +1706,7 @@ JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::BuildPropertyLoad(
Node* receiver, Node* context, Node* frame_state, Node* effect,
Node* control, Handle<Name> name, ZoneVector<Node*>* if_exceptions,
- PropertyAccessInfo const& access_info, LanguageMode language_mode) {
+ PropertyAccessInfo const& access_info) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
PropertyAccessBuilder access_builder(jsgraph(), dependencies());
@@ -1589,6 +1725,11 @@ JSNativeContextSpecialization::BuildPropertyLoad(
} else if (access_info.IsAccessorConstant()) {
value = InlinePropertyGetterCall(receiver, context, frame_state, &effect,
&control, if_exceptions, access_info);
+ } else if (access_info.IsModuleExport()) {
+ Node* cell = jsgraph()->Constant(access_info.export_cell());
+ value = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForCellValue()),
+ cell, effect, control);
} else {
DCHECK(access_info.IsDataField() || access_info.IsDataConstantField());
value = access_builder.BuildLoadDataField(name, access_info, receiver,
@@ -1602,17 +1743,16 @@ JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::BuildPropertyAccess(
Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
Node* control, Handle<Name> name, ZoneVector<Node*>* if_exceptions,
- PropertyAccessInfo const& access_info, AccessMode access_mode,
- LanguageMode language_mode) {
+ PropertyAccessInfo const& access_info, AccessMode access_mode) {
switch (access_mode) {
case AccessMode::kLoad:
return BuildPropertyLoad(receiver, context, frame_state, effect, control,
- name, if_exceptions, access_info, language_mode);
+ name, if_exceptions, access_info);
case AccessMode::kStore:
case AccessMode::kStoreInLiteral:
return BuildPropertyStore(receiver, value, context, frame_state, effect,
control, name, if_exceptions, access_info,
- access_mode, language_mode);
+ access_mode);
}
UNREACHABLE();
return ValueEffectControl();
@@ -1622,8 +1762,7 @@ JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::BuildPropertyStore(
Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
Node* control, Handle<Name> name, ZoneVector<Node*>* if_exceptions,
- PropertyAccessInfo const& access_info, AccessMode access_mode,
- LanguageMode language_mode) {
+ PropertyAccessInfo const& access_info, AccessMode access_mode) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
PropertyAccessBuilder access_builder(jsgraph(), dependencies());
@@ -1889,7 +2028,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
// Generate the actual property access.
ValueEffectControl continuation = BuildPropertyAccess(
receiver, value, context, frame_state_lazy, effect, control, cached_name,
- nullptr, access_info, AccessMode::kStoreInLiteral, LanguageMode::SLOPPY);
+ nullptr, access_info, AccessMode::kStoreInLiteral);
value = continuation.value();
effect = continuation.effect();
control = continuation.control();
@@ -2021,9 +2160,12 @@ JSNativeContextSpecialization::BuildElementAccess(
UNREACHABLE();
break;
case AccessMode::kStore: {
- // Ensure that the {value} is actually a Number.
- value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
- effect, control);
+ // Ensure that the {value} is actually a Number or an Oddball,
+ // and truncate it to a Number appropriately.
+ value = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball),
+ value, effect, control);
// Introduce the appropriate truncation for {value}. Currently we
// only need to do this for ClamedUint8Array {receiver}s, as the
@@ -2100,13 +2242,8 @@ JSNativeContextSpecialization::BuildElementAccess(
// Check if we might need to grow the {elements} backing store.
if (IsGrowStoreMode(store_mode)) {
+ // For growing stores we validate the {index} below.
DCHECK_EQ(AccessMode::kStore, access_mode);
-
- // Check that the {index} is a valid array index; the actual checking
- // happens below right before the element store.
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- jsgraph()->Constant(Smi::kMaxValue),
- effect, control);
} else {
// Check that the {index} is in the valid range for the {receiver}.
index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
@@ -2187,23 +2324,69 @@ JSNativeContextSpecialization::BuildElementAccess(
graph()->NewNode(simplified()->EnsureWritableFastElements(),
receiver, elements, effect, control);
} else if (IsGrowStoreMode(store_mode)) {
- // Grow {elements} backing store if necessary. Also updates the
- // "length" property for JSArray {receiver}s, hence there must
- // not be any other check after this operation, as the write
- // to the "length" property is observable.
- GrowFastElementsFlags flags = GrowFastElementsFlag::kNone;
+ // Determine the length of the {elements} backing store.
+ Node* elements_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ elements, effect, control);
+
+ // Validate the {index} depending on holeyness:
+ //
+ // For HOLEY_*_ELEMENTS the {index} must not exceed the {elements}
+ // backing store capacity plus the maximum allowed gap, as otherwise
+ // the (potential) backing store growth would normalize and thus
+ // the elements kind of the {receiver} would change to slow mode.
+ //
+ // For PACKED_*_ELEMENTS the {index} must be within the range
+ // [0,length+1[ to be valid. In case {index} equals {length},
+ // the {receiver} will be extended, but kept packed.
+ Node* limit =
+ IsHoleyElementsKind(elements_kind)
+ ? graph()->NewNode(simplified()->NumberAdd(), elements_length,
+ jsgraph()->Constant(JSObject::kMaxGap))
+ : graph()->NewNode(simplified()->NumberAdd(), length,
+ jsgraph()->OneConstant());
+ index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
+ limit, effect, control);
+
+ // Grow {elements} backing store if necessary.
+ GrowFastElementsMode mode =
+ IsDoubleElementsKind(elements_kind)
+ ? GrowFastElementsMode::kDoubleElements
+ : GrowFastElementsMode::kSmiOrObjectElements;
+ elements = effect = graph()->NewNode(
+ simplified()->MaybeGrowFastElements(mode), receiver, elements,
+ index, elements_length, effect, control);
+
+ // Also update the "length" property if {receiver} is a JSArray.
if (receiver_is_jsarray) {
- flags |= GrowFastElementsFlag::kArrayObject;
- }
- if (IsHoleyOrDictionaryElementsKind(elements_kind)) {
- flags |= GrowFastElementsFlag::kHoleyElements;
- }
- if (IsDoubleElementsKind(elements_kind)) {
- flags |= GrowFastElementsFlag::kDoubleElements;
+ Node* check =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ {
+ // We don't need to do anything, the {index} is within
+ // the valid bounds for the JSArray {receiver}.
+ }
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ {
+ // Update the JSArray::length field. Since this is observable,
+ // there must be no other check after this.
+ Node* new_length = graph()->NewNode(
+ simplified()->NumberAdd(), index, jsgraph()->OneConstant());
+ efalse = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForJSArrayLength(elements_kind)),
+ receiver, new_length, efalse, if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
}
- elements = effect = graph()->NewNode(
- simplified()->MaybeGrowFastElements(flags), receiver, elements,
- index, length, effect, control);
}
// Perform the actual element access.
@@ -2255,14 +2438,18 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
jsgraph()->SmiConstant(PropertyArray::kNoHashSentinel));
hash = graph()->NewNode(common()->TypeGuard(Type::SignedSmall()), hash,
control);
+ hash =
+ graph()->NewNode(simplified()->NumberShiftLeft(), hash,
+ jsgraph()->Constant(PropertyArray::HashField::kShift));
} else {
hash = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForPropertyArrayLengthAndHash()),
properties, effect, control);
effect = graph()->NewNode(
common()->BeginRegion(RegionObservability::kNotObservable), effect);
- hash = graph()->NewNode(simplified()->NumberBitwiseAnd(), hash,
- jsgraph()->Constant(JSReceiver::kHashMask));
+ hash =
+ graph()->NewNode(simplified()->NumberBitwiseAnd(), hash,
+ jsgraph()->Constant(PropertyArray::HashField::kMask));
}
Node* new_length_and_hash = graph()->NewNode(
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 9659020e33..f7e9439e29 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -75,22 +75,18 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
MapHandles const& receiver_maps,
AccessMode access_mode,
- LanguageMode language_mode,
KeyedAccessStoreMode store_mode);
template <typename KeyedICNexus>
Reduction ReduceKeyedAccess(Node* node, Node* index, Node* value,
KeyedICNexus const& nexus, AccessMode access_mode,
- LanguageMode language_mode,
KeyedAccessStoreMode store_mode);
Reduction ReduceNamedAccessFromNexus(Node* node, Node* value,
FeedbackNexus const& nexus,
Handle<Name> name,
- AccessMode access_mode,
- LanguageMode language_mode);
+ AccessMode access_mode);
Reduction ReduceNamedAccess(Node* node, Node* value,
MapHandles const& receiver_maps,
Handle<Name> name, AccessMode access_mode,
- LanguageMode language_mode,
Node* index = nullptr);
Reduction ReduceGlobalAccess(Node* node, Node* receiver, Node* value,
Handle<Name> name, AccessMode access_mode,
@@ -117,23 +113,26 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
};
// Construct the appropriate subgraph for property access.
- ValueEffectControl BuildPropertyAccess(
- Node* receiver, Node* value, Node* context, Node* frame_state,
- Node* effect, Node* control, Handle<Name> name,
- ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info,
- AccessMode access_mode, LanguageMode language_mode);
+ ValueEffectControl BuildPropertyAccess(Node* receiver, Node* value,
+ Node* context, Node* frame_state,
+ Node* effect, Node* control,
+ Handle<Name> name,
+ ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info,
+ AccessMode access_mode);
ValueEffectControl BuildPropertyLoad(Node* receiver, Node* context,
Node* frame_state, Node* effect,
Node* control, Handle<Name> name,
ZoneVector<Node*>* if_exceptions,
- PropertyAccessInfo const& access_info,
- LanguageMode language_mode);
-
- ValueEffectControl BuildPropertyStore(
- Node* receiver, Node* value, Node* context, Node* frame_state,
- Node* effect, Node* control, Handle<Name> name,
- ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info,
- AccessMode access_mode, LanguageMode language_mode);
+ PropertyAccessInfo const& access_info);
+
+ ValueEffectControl BuildPropertyStore(Node* receiver, Node* value,
+ Node* context, Node* frame_state,
+ Node* effect, Node* control,
+ Handle<Name> name,
+ ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info,
+ AccessMode access_mode);
// Helpers for accessor inlining.
Node* InlinePropertyGetterCall(Node* receiver, Node* context,
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 2a680cd676..7b1df6e5a9 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -290,7 +290,8 @@ std::ostream& operator<<(std::ostream& os, FeedbackParameter const& p) {
}
FeedbackParameter const& FeedbackParameterOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral);
+ DCHECK(op->opcode() == IrOpcode::kJSCreateEmptyLiteralArray ||
+ op->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral);
return OpParameter<FeedbackParameter>(op);
}
@@ -484,8 +485,8 @@ const CreateClosureParameters& CreateClosureParametersOf(const Operator* op) {
bool operator==(CreateLiteralParameters const& lhs,
CreateLiteralParameters const& rhs) {
return lhs.constant().location() == rhs.constant().location() &&
- lhs.length() == rhs.length() && lhs.flags() == rhs.flags() &&
- lhs.index() == rhs.index();
+ lhs.feedback() == rhs.feedback() && lhs.length() == rhs.length() &&
+ lhs.flags() == rhs.flags();
}
@@ -496,14 +497,13 @@ bool operator!=(CreateLiteralParameters const& lhs,
size_t hash_value(CreateLiteralParameters const& p) {
- return base::hash_combine(p.constant().location(), p.length(), p.flags(),
- p.index());
+ return base::hash_combine(p.constant().location(), p.feedback(), p.length(),
+ p.flags());
}
std::ostream& operator<<(std::ostream& os, CreateLiteralParameters const& p) {
- return os << Brief(*p.constant()) << ", " << p.length() << ", " << p.flags()
- << ", " << p.index();
+ return os << Brief(*p.constant()) << ", " << p.length() << ", " << p.flags();
}
@@ -514,6 +514,26 @@ const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op) {
return OpParameter<CreateLiteralParameters>(op);
}
+size_t hash_value(ForInMode mode) { return static_cast<uint8_t>(mode); }
+
+std::ostream& operator<<(std::ostream& os, ForInMode mode) {
+ switch (mode) {
+ case ForInMode::kUseEnumCacheKeysAndIndices:
+ return os << "UseEnumCacheKeysAndIndices";
+ case ForInMode::kUseEnumCacheKeys:
+ return os << "UseEnumCacheKeys";
+ case ForInMode::kGeneric:
+ return os << "Generic";
+ }
+ UNREACHABLE();
+}
+
+ForInMode ForInModeOf(Operator const* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSForInNext ||
+ op->opcode() == IrOpcode::kJSForInPrepare);
+ return OpParameter<ForInMode>(op);
+}
+
BinaryOperationHint BinaryOperationHintOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kJSAdd, op->opcode());
return OpParameter<BinaryOperationHint>(op);
@@ -555,8 +575,7 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(HasInPrototypeChain, Operator::kNoProperties, 2, 1) \
V(InstanceOf, Operator::kNoProperties, 2, 1) \
V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
- V(ForInNext, Operator::kNoProperties, 4, 1) \
- V(ForInPrepare, Operator::kNoProperties, 1, 3) \
+ V(ForInEnumerate, Operator::kNoProperties, 1, 1) \
V(LoadMessage, Operator::kNoThrow | Operator::kNoWrite, 0, 1) \
V(StoreMessage, Operator::kNoRead | Operator::kNoThrow, 1, 0) \
V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
@@ -855,6 +874,23 @@ const Operator* JSOperatorBuilder::LoadProperty(
access); // parameter
}
+const Operator* JSOperatorBuilder::ForInNext(ForInMode mode) {
+ return new (zone()) Operator1<ForInMode>( // --
+ IrOpcode::kJSForInNext, Operator::kNoProperties, // opcode
+ "JSForInNext", // name
+ 4, 1, 1, 1, 1, 2, // counts
+ mode); // parameter
+}
+
+const Operator* JSOperatorBuilder::ForInPrepare(ForInMode mode) {
+ return new (zone()) Operator1<ForInMode>( // --
+ IrOpcode::kJSForInPrepare, // opcode
+ Operator::kNoWrite | Operator::kNoThrow, // flags
+ "JSForInPrepare", // name
+ 1, 1, 1, 3, 1, 1, // counts
+ mode); // parameter
+}
+
const Operator* JSOperatorBuilder::GeneratorStore(int register_count) {
return new (zone()) Operator1<int>( // --
IrOpcode::kJSGeneratorStore, Operator::kNoThrow, // opcode
@@ -1013,36 +1049,41 @@ const Operator* JSOperatorBuilder::CreateClosure(
}
const Operator* JSOperatorBuilder::CreateLiteralArray(
- Handle<ConstantElementsPair> constant_elements, int literal_flags,
- int literal_index, int number_of_elements) {
- CreateLiteralParameters parameters(constant_elements, number_of_elements,
- literal_flags, literal_index);
- return new (zone()) Operator1<CreateLiteralParameters>( // --
- IrOpcode::kJSCreateLiteralArray, Operator::kNoProperties, // opcode
- "JSCreateLiteralArray", // name
- 1, 1, 1, 1, 1, 2, // counts
- parameters); // parameter
-}
-
-const Operator* JSOperatorBuilder::CreateEmptyLiteralArray(int literal_index) {
- return new (zone()) Operator1<int>( // --
- IrOpcode::kJSCreateEmptyLiteralArray, // opcode
- Operator::kNoProperties, // properties
- "JSCreateEmptyLiteralArray", // name
- 1, 1, 1, 1, 1, 2, // counts
- literal_index); // parameter
+ Handle<ConstantElementsPair> constant_elements,
+ VectorSlotPair const& feedback, int literal_flags, int number_of_elements) {
+ CreateLiteralParameters parameters(constant_elements, feedback,
+ number_of_elements, literal_flags);
+ return new (zone()) Operator1<CreateLiteralParameters>( // --
+ IrOpcode::kJSCreateLiteralArray, // opcode
+ Operator::kNoProperties, // properties
+ "JSCreateLiteralArray", // name
+ 0, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
+
+const Operator* JSOperatorBuilder::CreateEmptyLiteralArray(
+ VectorSlotPair const& feedback) {
+ FeedbackParameter parameters(feedback);
+ return new (zone()) Operator1<FeedbackParameter>( // --
+ IrOpcode::kJSCreateEmptyLiteralArray, // opcode
+ Operator::kEliminatable, // properties
+ "JSCreateEmptyLiteralArray", // name
+ 0, 1, 1, 1, 1, 0, // counts
+ parameters); // parameter
}
const Operator* JSOperatorBuilder::CreateLiteralObject(
- Handle<BoilerplateDescription> constant_properties, int literal_flags,
- int literal_index, int number_of_properties) {
- CreateLiteralParameters parameters(constant_properties, number_of_properties,
- literal_flags, literal_index);
- return new (zone()) Operator1<CreateLiteralParameters>( // --
- IrOpcode::kJSCreateLiteralObject, Operator::kNoProperties, // opcode
- "JSCreateLiteralObject", // name
- 1, 1, 1, 1, 1, 2, // counts
- parameters); // parameter
+ Handle<BoilerplateDescription> constant_properties,
+ VectorSlotPair const& feedback, int literal_flags,
+ int number_of_properties) {
+ CreateLiteralParameters parameters(constant_properties, feedback,
+ number_of_properties, literal_flags);
+ return new (zone()) Operator1<CreateLiteralParameters>( // --
+ IrOpcode::kJSCreateLiteralObject, // opcode
+ Operator::kNoProperties, // properties
+ "JSCreateLiteralObject", // name
+ 0, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
}
const Operator* JSOperatorBuilder::CreateEmptyLiteralObject() {
@@ -1054,14 +1095,16 @@ const Operator* JSOperatorBuilder::CreateEmptyLiteralObject() {
}
const Operator* JSOperatorBuilder::CreateLiteralRegExp(
- Handle<String> constant_pattern, int literal_flags, int literal_index) {
- CreateLiteralParameters parameters(constant_pattern, -1, literal_flags,
- literal_index);
- return new (zone()) Operator1<CreateLiteralParameters>( // --
- IrOpcode::kJSCreateLiteralRegExp, Operator::kNoProperties, // opcode
- "JSCreateLiteralRegExp", // name
- 1, 1, 1, 1, 1, 2, // counts
- parameters); // parameter
+ Handle<String> constant_pattern, VectorSlotPair const& feedback,
+ int literal_flags) {
+ CreateLiteralParameters parameters(constant_pattern, feedback, -1,
+ literal_flags);
+ return new (zone()) Operator1<CreateLiteralParameters>( // --
+ IrOpcode::kJSCreateLiteralRegExp, // opcode
+ Operator::kNoProperties, // properties
+ "JSCreateLiteralRegExp", // name
+ 0, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
}
const Operator* JSOperatorBuilder::CreateFunctionContext(int slot_count,
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 5ea288f355..0d47005824 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -366,8 +366,8 @@ std::ostream& operator<<(std::ostream&, StoreNamedOwnParameters const&);
const StoreNamedOwnParameters& StoreNamedOwnParametersOf(const Operator* op);
// Defines the feedback, i.e., vector and index, for storing a data property in
-// an object literal. This is
-// used as a parameter by the JSStoreDataPropertyInLiteral operator.
+// an object literal. This is used as a parameter by JSCreateEmptyLiteralArray
+// and JSStoreDataPropertyInLiteral operators.
class FeedbackParameter final {
public:
explicit FeedbackParameter(VectorSlotPair const& feedback)
@@ -561,20 +561,23 @@ const CreateClosureParameters& CreateClosureParametersOf(const Operator* op);
// JSCreateLiteralRegExp operators.
class CreateLiteralParameters final {
public:
- CreateLiteralParameters(Handle<HeapObject> constant, int length, int flags,
- int index)
- : constant_(constant), length_(length), flags_(flags), index_(index) {}
+ CreateLiteralParameters(Handle<HeapObject> constant,
+ VectorSlotPair const& feedback, int length, int flags)
+ : constant_(constant),
+ feedback_(feedback),
+ length_(length),
+ flags_(flags) {}
Handle<HeapObject> constant() const { return constant_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
int length() const { return length_; }
int flags() const { return flags_; }
- int index() const { return index_; }
private:
Handle<HeapObject> const constant_;
+ VectorSlotPair const feedback_;
int const length_;
int const flags_;
- int const index_;
};
bool operator==(CreateLiteralParameters const&, CreateLiteralParameters const&);
@@ -586,6 +589,19 @@ std::ostream& operator<<(std::ostream&, CreateLiteralParameters const&);
const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op);
+// Descriptor used by the JSForInPrepare and JSForInNext opcodes.
+enum class ForInMode : uint8_t {
+ kUseEnumCacheKeysAndIndices,
+ kUseEnumCacheKeys,
+ kGeneric
+};
+
+size_t hash_value(ForInMode);
+
+std::ostream& operator<<(std::ostream&, ForInMode);
+
+ForInMode ForInModeOf(Operator const* op) WARN_UNUSED_RESULT;
+
BinaryOperationHint BinaryOperationHintOf(const Operator* op);
CompareOperationHint CompareOperationHintOf(const Operator* op);
@@ -634,16 +650,18 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreateIterResultObject();
const Operator* CreateKeyValueArray();
const Operator* CreateLiteralArray(Handle<ConstantElementsPair> constant,
- int literal_flags, int literal_index,
- int number_of_elements);
- const Operator* CreateEmptyLiteralArray(int literal_index);
+ VectorSlotPair const& feedback,
+ int literal_flags, int number_of_elements);
+ const Operator* CreateEmptyLiteralArray(VectorSlotPair const& feedback);
const Operator* CreateEmptyLiteralObject();
const Operator* CreateLiteralObject(Handle<BoilerplateDescription> constant,
- int literal_flags, int literal_index,
+ VectorSlotPair const& feedback,
+ int literal_flags,
int number_of_properties);
const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
- int literal_flags, int literal_index);
+ VectorSlotPair const& feedback,
+ int literal_flags);
const Operator* CallForwardVarargs(size_t arity, uint32_t start_index);
const Operator* Call(
@@ -708,8 +726,9 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* InstanceOf();
const Operator* OrdinaryHasInstance();
- const Operator* ForInNext();
- const Operator* ForInPrepare();
+ const Operator* ForInEnumerate();
+ const Operator* ForInNext(ForInMode);
+ const Operator* ForInPrepare(ForInMode);
const Operator* LoadMessage();
const Operator* StoreMessage();
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index 43f84506bf..98b336ce97 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -212,21 +212,37 @@ JSTypeHintLowering::JSTypeHintLowering(JSGraph* jsgraph,
Flags flags)
: jsgraph_(jsgraph), flags_(flags), feedback_vector_(feedback_vector) {}
-Reduction JSTypeHintLowering::ReduceBinaryOperation(const Operator* op,
- Node* left, Node* right,
- Node* effect, Node* control,
- FeedbackSlot slot) const {
+JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
+ const Operator* op, Node* left, Node* right, Node* effect, Node* control,
+ FeedbackSlot slot) const {
switch (op->opcode()) {
- case IrOpcode::kJSStrictEqual:
+ case IrOpcode::kJSStrictEqual: {
+ DCHECK(!slot.IsInvalid());
+ CompareICNexus nexus(feedback_vector(), slot);
+ if (Node* node = TryBuildSoftDeopt(
+ nexus, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) {
+ return LoweringResult::Exit(node);
+ }
+ // TODO(turbofan): Should we generally support early lowering of
+ // JSStrictEqual operators here?
break;
+ }
case IrOpcode::kJSEqual:
case IrOpcode::kJSLessThan:
case IrOpcode::kJSGreaterThan:
case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kJSGreaterThanOrEqual: {
+ DCHECK(!slot.IsInvalid());
+ CompareICNexus nexus(feedback_vector(), slot);
+ if (Node* node = TryBuildSoftDeopt(
+ nexus, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) {
+ return LoweringResult::Exit(node);
+ }
JSSpeculativeBinopBuilder b(this, op, left, right, effect, control, slot);
if (Node* node = b.TryBuildNumberCompare()) {
- return Reduction(node);
+ return LoweringResult::SideEffectFree(node, node, control);
}
break;
}
@@ -241,9 +257,16 @@ Reduction JSTypeHintLowering::ReduceBinaryOperation(const Operator* op,
case IrOpcode::kJSMultiply:
case IrOpcode::kJSDivide:
case IrOpcode::kJSModulus: {
+ DCHECK(!slot.IsInvalid());
+ BinaryOpICNexus nexus(feedback_vector(), slot);
+ if (Node* node = TryBuildSoftDeopt(
+ nexus, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation)) {
+ return LoweringResult::Exit(node);
+ }
JSSpeculativeBinopBuilder b(this, op, left, right, effect, control, slot);
if (Node* node = b.TryBuildNumberBinop()) {
- return Reduction(node);
+ return LoweringResult::SideEffectFree(node, node, control);
}
break;
}
@@ -251,10 +274,10 @@ Reduction JSTypeHintLowering::ReduceBinaryOperation(const Operator* op,
UNREACHABLE();
break;
}
- return Reduction();
+ return LoweringResult::NoChange();
}
-Reduction JSTypeHintLowering::ReduceForInNextOperation(
+JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceForInNextOperation(
Node* receiver, Node* cache_array, Node* cache_type, Node* index,
Node* effect, Node* control, FeedbackSlot slot) const {
DCHECK(!slot.IsInvalid());
@@ -262,24 +285,27 @@ Reduction JSTypeHintLowering::ReduceForInNextOperation(
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForForIn)) {
- return Reduction(node);
+ return LoweringResult::Exit(node);
}
- if (!nexus.IsGeneric()) {
- effect =
- jsgraph()->graph()->NewNode(jsgraph()->simplified()->CheckMapValue(),
- receiver, cache_type, effect, control);
- Node* node = jsgraph()->graph()->NewNode(
- jsgraph()->simplified()->LoadElement(
- AccessBuilder::ForDescriptorArrayEnumCacheBridgeCacheElement()),
- cache_array, index, effect, control);
- return Reduction(node);
+ return LoweringResult::NoChange();
+}
+
+JSTypeHintLowering::LoweringResult
+JSTypeHintLowering::ReduceForInPrepareOperation(Node* enumerator, Node* effect,
+ Node* control,
+ FeedbackSlot slot) const {
+ DCHECK(!slot.IsInvalid());
+ ForInICNexus nexus(feedback_vector(), slot);
+ if (Node* node = TryBuildSoftDeopt(
+ nexus, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForForIn)) {
+ return LoweringResult::Exit(node);
}
- return Reduction();
+ return LoweringResult::NoChange();
}
-Reduction JSTypeHintLowering::ReduceToNumberOperation(Node* input, Node* effect,
- Node* control,
- FeedbackSlot slot) const {
+JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceToNumberOperation(
+ Node* input, Node* effect, Node* control, FeedbackSlot slot) const {
DCHECK(!slot.IsInvalid());
BinaryOpICNexus nexus(feedback_vector(), slot);
NumberOperationHint hint;
@@ -288,16 +314,14 @@ Reduction JSTypeHintLowering::ReduceToNumberOperation(Node* input, Node* effect,
Node* node = jsgraph()->graph()->NewNode(
jsgraph()->simplified()->SpeculativeToNumber(hint), input, effect,
control);
- return Reduction(node);
+ return LoweringResult::SideEffectFree(node, node, control);
}
- return Reduction();
+ return LoweringResult::NoChange();
}
-Reduction JSTypeHintLowering::ReduceCallOperation(const Operator* op,
- Node* const* args,
- int arg_count, Node* effect,
- Node* control,
- FeedbackSlot slot) const {
+JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceCallOperation(
+ const Operator* op, Node* const* args, int arg_count, Node* effect,
+ Node* control, FeedbackSlot slot) const {
DCHECK(op->opcode() == IrOpcode::kJSCall ||
op->opcode() == IrOpcode::kJSCallWithSpread);
DCHECK(!slot.IsInvalid());
@@ -305,12 +329,12 @@ Reduction JSTypeHintLowering::ReduceCallOperation(const Operator* op,
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForCall)) {
- return Reduction(node);
+ return LoweringResult::Exit(node);
}
- return Reduction();
+ return LoweringResult::NoChange();
}
-Reduction JSTypeHintLowering::ReduceConstructOperation(
+JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceConstructOperation(
const Operator* op, Node* const* args, int arg_count, Node* effect,
Node* control, FeedbackSlot slot) const {
DCHECK(op->opcode() == IrOpcode::kJSConstruct ||
@@ -320,13 +344,13 @@ Reduction JSTypeHintLowering::ReduceConstructOperation(
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForConstruct)) {
- return Reduction(node);
+ return LoweringResult::Exit(node);
}
- return Reduction();
+ return LoweringResult::NoChange();
}
-Reduction JSTypeHintLowering::ReduceLoadNamedOperation(
- const Operator* op, Node* obj, Node* effect, Node* control,
+JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceLoadNamedOperation(
+ const Operator* op, Node* receiver, Node* effect, Node* control,
FeedbackSlot slot) const {
DCHECK_EQ(IrOpcode::kJSLoadNamed, op->opcode());
DCHECK(!slot.IsInvalid());
@@ -334,12 +358,12 @@ Reduction JSTypeHintLowering::ReduceLoadNamedOperation(
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
- return Reduction(node);
+ return LoweringResult::Exit(node);
}
- return Reduction();
+ return LoweringResult::NoChange();
}
-Reduction JSTypeHintLowering::ReduceLoadKeyedOperation(
+JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceLoadKeyedOperation(
const Operator* op, Node* obj, Node* key, Node* effect, Node* control,
FeedbackSlot slot) const {
DCHECK_EQ(IrOpcode::kJSLoadProperty, op->opcode());
@@ -348,14 +372,16 @@ Reduction JSTypeHintLowering::ReduceLoadKeyedOperation(
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess)) {
- return Reduction(node);
+ return LoweringResult::Exit(node);
}
- return Reduction();
+ return LoweringResult::NoChange();
}
-Reduction JSTypeHintLowering::ReduceStoreNamedOperation(
- const Operator* op, Node* obj, Node* val, Node* effect, Node* control,
- FeedbackSlot slot) const {
+JSTypeHintLowering::LoweringResult
+JSTypeHintLowering::ReduceStoreNamedOperation(const Operator* op, Node* obj,
+ Node* val, Node* effect,
+ Node* control,
+ FeedbackSlot slot) const {
DCHECK(op->opcode() == IrOpcode::kJSStoreNamed ||
op->opcode() == IrOpcode::kJSStoreNamedOwn);
DCHECK(!slot.IsInvalid());
@@ -363,23 +389,25 @@ Reduction JSTypeHintLowering::ReduceStoreNamedOperation(
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
- return Reduction(node);
+ return LoweringResult::Exit(node);
}
- return Reduction();
+ return LoweringResult::NoChange();
}
-Reduction JSTypeHintLowering::ReduceStoreKeyedOperation(
- const Operator* op, Node* obj, Node* key, Node* val, Node* effect,
- Node* control, FeedbackSlot slot) const {
+JSTypeHintLowering::LoweringResult
+JSTypeHintLowering::ReduceStoreKeyedOperation(const Operator* op, Node* obj,
+ Node* key, Node* val,
+ Node* effect, Node* control,
+ FeedbackSlot slot) const {
DCHECK_EQ(IrOpcode::kJSStoreProperty, op->opcode());
DCHECK(!slot.IsInvalid());
KeyedStoreICNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess)) {
- return Reduction(node);
+ return LoweringResult::Exit(node);
}
- return Reduction();
+ return LoweringResult::NoChange();
}
Node* JSTypeHintLowering::TryBuildSoftDeopt(FeedbackNexus& nexus, Node* effect,
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h
index c9f17a4034..f6cc65c602 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.h
+++ b/deps/v8/src/compiler/js-type-hint-lowering.h
@@ -33,13 +33,8 @@ class Operator;
// the JavaScript-level operators and directly emit simplified-level operators
// even during initial graph building. This is the reason this lowering doesn't
// follow the interface of the reducer framework used after graph construction.
-//
-// Also note that all reductions returned by this lowering will not produce any
-// control-output, but might very well produce an effect-output. The one node
-// returned as a replacement must fully describe the effect (i.e. produce the
-// effect and carry {Operator::Property} for the entire lowering). Use-sites
-// rely on this invariant, if it ever changes we need to switch the interface
-// away from using the {Reduction} class.
+// The result of the lowering is encapsulated in
+// {the JSTypeHintLowering::LoweringResult} class.
class JSTypeHintLowering {
public:
// Flags that control the mode of operation.
@@ -49,45 +44,107 @@ class JSTypeHintLowering {
JSTypeHintLowering(JSGraph* jsgraph, Handle<FeedbackVector> feedback_vector,
Flags flags);
+ // {LoweringResult} describes the result of lowering. The following outcomes
+ // are possible:
+ //
+ // - operation was lowered to a side-effect-free operation, the resulting
+ // value, effect and control can be obtained by the {value}, {effect} and
+ // {control} methods.
+ //
+ // - operation was lowered to a graph exit (deoptimization). The caller
+ // should connect {effect} and {control} nodes to the end.
+ //
+ // - no lowering happened. The caller needs to create the generic version
+ // of the operation.
+ class LoweringResult {
+ public:
+ Node* value() const { return value_; }
+ Node* effect() const { return effect_; }
+ Node* control() const { return control_; }
+
+ bool Changed() const { return kind_ != LoweringResultKind::kNoChange; }
+ bool IsExit() const { return kind_ == LoweringResultKind::kExit; }
+ bool IsSideEffectFree() const {
+ return kind_ == LoweringResultKind::kSideEffectFree;
+ }
+
+ static LoweringResult SideEffectFree(Node* value, Node* effect,
+ Node* control) {
+ DCHECK_NOT_NULL(effect);
+ DCHECK_NOT_NULL(control);
+ return LoweringResult(LoweringResultKind::kSideEffectFree, value, effect,
+ control);
+ }
+
+ static LoweringResult NoChange() {
+ return LoweringResult(LoweringResultKind::kNoChange, nullptr, nullptr,
+ nullptr);
+ }
+
+ static LoweringResult Exit(Node* control) {
+ return LoweringResult(LoweringResultKind::kExit, nullptr, nullptr,
+ control);
+ }
+
+ private:
+ enum class LoweringResultKind { kNoChange, kSideEffectFree, kExit };
+
+ LoweringResult(LoweringResultKind kind, Node* value, Node* effect,
+ Node* control)
+ : kind_(kind), value_(value), effect_(effect), control_(control) {}
+
+ LoweringResultKind kind_;
+ Node* value_;
+ Node* effect_;
+ Node* control_;
+ };
+
// Potential reduction of binary (arithmetic, logical, shift and relational
// comparison) operations.
- Reduction ReduceBinaryOperation(const Operator* op, Node* left, Node* right,
- Node* effect, Node* control,
- FeedbackSlot slot) const;
-
- // Potential reduction to ForInNext operations
- Reduction ReduceForInNextOperation(Node* receiver, Node* cache_array,
- Node* cache_type, Node* index,
- Node* effect, Node* control,
- FeedbackSlot slot) const;
+ LoweringResult ReduceBinaryOperation(const Operator* op, Node* left,
+ Node* right, Node* effect, Node* control,
+ FeedbackSlot slot) const;
+
+ // Potential reduction to for..in operations
+ LoweringResult ReduceForInNextOperation(Node* receiver, Node* cache_array,
+ Node* cache_type, Node* index,
+ Node* effect, Node* control,
+ FeedbackSlot slot) const;
+ LoweringResult ReduceForInPrepareOperation(Node* enumerator, Node* effect,
+ Node* control,
+ FeedbackSlot slot) const;
// Potential reduction to ToNumber operations
- Reduction ReduceToNumberOperation(Node* value, Node* effect, Node* control,
- FeedbackSlot slot) const;
+ LoweringResult ReduceToNumberOperation(Node* value, Node* effect,
+ Node* control,
+ FeedbackSlot slot) const;
// Potential reduction of call operations.
- Reduction ReduceCallOperation(const Operator* op, Node* const* args,
- int arg_count, Node* effect, Node* control,
- FeedbackSlot slot) const;
-
- // Potential reduction of construct operations.
- Reduction ReduceConstructOperation(const Operator* op, Node* const* args,
+ LoweringResult ReduceCallOperation(const Operator* op, Node* const* args,
int arg_count, Node* effect, Node* control,
FeedbackSlot slot) const;
+ // Potential reduction of construct operations.
+ LoweringResult ReduceConstructOperation(const Operator* op, Node* const* args,
+ int arg_count, Node* effect,
+ Node* control,
+ FeedbackSlot slot) const;
// Potential reduction of property access operations.
- Reduction ReduceLoadNamedOperation(const Operator* op, Node* obj,
- Node* effect, Node* control,
- FeedbackSlot slot) const;
- Reduction ReduceLoadKeyedOperation(const Operator* op, Node* obj, Node* key,
- Node* effect, Node* control,
- FeedbackSlot slot) const;
- Reduction ReduceStoreNamedOperation(const Operator* op, Node* obj, Node* val,
- Node* effect, Node* control,
- FeedbackSlot slot) const;
- Reduction ReduceStoreKeyedOperation(const Operator* op, Node* obj, Node* key,
- Node* val, Node* effect, Node* control,
- FeedbackSlot slot) const;
+ LoweringResult ReduceLoadNamedOperation(const Operator* op, Node* obj,
+ Node* effect, Node* control,
+ FeedbackSlot slot) const;
+ LoweringResult ReduceLoadKeyedOperation(const Operator* op, Node* obj,
+ Node* key, Node* effect,
+ Node* control,
+ FeedbackSlot slot) const;
+ LoweringResult ReduceStoreNamedOperation(const Operator* op, Node* obj,
+ Node* val, Node* effect,
+ Node* control,
+ FeedbackSlot slot) const;
+ LoweringResult ReduceStoreKeyedOperation(const Operator* op, Node* obj,
+ Node* key, Node* val, Node* effect,
+ Node* control,
+ FeedbackSlot slot) const;
private:
friend class JSSpeculativeBinopBuilder;
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index ab5dcc3c86..6f50ba15a3 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -439,6 +439,36 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
if (r.OneInputIs(Type::String())) {
+ // We know that (at least) one input is already a String,
+ // so try to strength-reduce the non-String input.
+ if (r.LeftInputIs(Type::String())) {
+ Reduction const reduction = ReduceJSToStringInput(r.right());
+ if (reduction.Changed()) {
+ NodeProperties::ReplaceValueInput(node, reduction.replacement(), 1);
+ }
+ } else if (r.RightInputIs(Type::String())) {
+ Reduction const reduction = ReduceJSToStringInput(r.left());
+ if (reduction.Changed()) {
+ NodeProperties::ReplaceValueInput(node, reduction.replacement(), 0);
+ }
+ }
+ // We might be able to constant-fold the String concatenation now.
+ if (r.BothInputsAre(Type::String())) {
+ HeapObjectBinopMatcher m(node);
+ if (m.IsFoldable()) {
+ Handle<String> left = Handle<String>::cast(m.left().Value());
+ Handle<String> right = Handle<String>::cast(m.right().Value());
+ if (left->length() + right->length() > String::kMaxLength) {
+ // No point in trying to optimize this, as it will just throw.
+ return NoChange();
+ }
+ Node* value = jsgraph()->HeapConstant(
+ factory()->NewConsString(left, right).ToHandleChecked());
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ }
+ // We might know for sure that we're creating a ConsString here.
if (r.ShouldCreateConsString()) {
return ReduceCreateConsString(node);
}
@@ -638,6 +668,7 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
// Morph the {node} into a {FinishRegion}.
ReplaceWithValue(node, node, node, control);
+ NodeProperties::SetType(value, NodeProperties::GetType(node));
node->ReplaceInput(0, value);
node->ReplaceInput(1, effect);
node->TrimInputCount(2);
@@ -1030,6 +1061,16 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
if (input_type->Is(Type::Null())) {
return Replace(jsgraph()->HeapConstant(factory()->null_string()));
}
+ if (input_type->Is(Type::NaN())) {
+ return Replace(jsgraph()->HeapConstant(factory()->NaN_string()));
+ }
+ if (input_type->Is(Type::OrderedNumber()) &&
+ input_type->Min() == input_type->Max()) {
+ // Note that we can use Type::OrderedNumber(), since
+ // both 0 and -0 map to the String "0" in JavaScript.
+ return Replace(jsgraph()->HeapConstant(
+ factory()->NumberToString(factory()->NewNumber(input_type->Min()))));
+ }
// TODO(turbofan): js-typed-lowering of ToString(x:number)
return NoChange();
}
@@ -1894,9 +1935,9 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
return NoChange();
}
-
Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
DCHECK_EQ(IrOpcode::kJSForInNext, node->opcode());
+ ForInMode const mode = ForInModeOf(node->op());
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* cache_array = NodeProperties::GetValueInput(node, 1);
Node* cache_type = NodeProperties::GetValueInput(node, 2);
@@ -1906,72 +1947,229 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Load the next {key} from the {cache_array}.
- Node* key = effect = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
- cache_array, index, effect, control);
-
// Load the map of the {receiver}.
Node* receiver_map = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
receiver, effect, control);
- // Check if the expected map still matches that of the {receiver}.
- Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(), receiver_map,
- cache_type);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+ switch (mode) {
+ case ForInMode::kUseEnumCacheKeys:
+ case ForInMode::kUseEnumCacheKeysAndIndices: {
+ // Ensure that the expected map still matches that of the {receiver}.
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
+ receiver_map, cache_type);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+
+ ReplaceWithValue(node, node, effect, control);
+
+ // Morph the {node} into a LoadElement.
+ node->ReplaceInput(0, cache_array);
+ node->ReplaceInput(1, index);
+ node->ReplaceInput(2, effect);
+ node->ReplaceInput(3, control);
+ node->TrimInputCount(4);
+ NodeProperties::ChangeOp(
+ node,
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()));
+ NodeProperties::SetType(node, Type::InternalizedString());
+ break;
+ }
+ case ForInMode::kGeneric: {
+ // Load the next {key} from the {cache_array}.
+ Node* key = effect = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
+ cache_array, index, effect, control);
+
+ // Check if the expected map still matches that of the {receiver}.
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
+ receiver_map, cache_type);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue;
+ Node* vtrue;
+ {
+ // Don't need filtering since expected map still matches that of the
+ // {receiver}.
+ etrue = effect;
+ vtrue = key;
+ }
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0;
- Node* vtrue0;
- {
- // Don't need filtering since expected map still matches that of the
- // {receiver}.
- etrue0 = effect;
- vtrue0 = key;
- }
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse;
+ Node* vfalse;
+ {
+ // Filter the {key} to check if it's still a valid property of the
+ // {receiver} (does the ToName conversion implicitly).
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kForInFilter);
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState);
+ vfalse = efalse = if_false = graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(callable.code()), key,
+ receiver, context, frame_state, effect, if_false);
+
+ // Update potential {IfException} uses of {node} to point to the above
+ // ForInFilter stub call node instead.
+ Node* if_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &if_exception)) {
+ if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
+ NodeProperties::ReplaceControlInput(if_exception, vfalse);
+ NodeProperties::ReplaceEffectInput(if_exception, efalse);
+ Revisit(if_exception);
+ }
+ }
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0;
- Node* vfalse0;
- {
- // Filter the {key} to check if it's still a valid property of the
- // {receiver} (does the ToName conversion implicitly).
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kForInFilter);
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState);
- vfalse0 = efalse0 = if_false0 = graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(callable.code()), key,
- receiver, context, frame_state, effect, if_false0);
-
- // Update potential {IfException} uses of {node} to point to the above
- // ForInFilter stub call node instead.
- Node* if_exception = nullptr;
- if (NodeProperties::IsExceptionalCall(node, &if_exception)) {
- if_false0 = graph()->NewNode(common()->IfSuccess(), vfalse0);
- NodeProperties::ReplaceControlInput(if_exception, vfalse0);
- NodeProperties::ReplaceEffectInput(if_exception, efalse0);
- Revisit(if_exception);
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ ReplaceWithValue(node, node, effect, control);
+
+ // Morph the {node} into a Phi.
+ node->ReplaceInput(0, vtrue);
+ node->ReplaceInput(1, vfalse);
+ node->ReplaceInput(2, control);
+ node->TrimInputCount(3);
+ NodeProperties::ChangeOp(
+ node, common()->Phi(MachineRepresentation::kTagged, 2));
}
}
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- ReplaceWithValue(node, node, effect, control);
-
- // Morph the {node} into a Phi.
- node->ReplaceInput(0, vtrue0);
- node->ReplaceInput(1, vfalse0);
- node->ReplaceInput(2, control);
- node->TrimInputCount(3);
- NodeProperties::ChangeOp(node,
- common()->Phi(MachineRepresentation::kTagged, 2));
return Changed(node);
}
+Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSForInPrepare, node->opcode());
+ ForInMode const mode = ForInModeOf(node->op());
+ Node* enumerator = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* cache_type = enumerator;
+ Node* cache_array = nullptr;
+ Node* cache_length = nullptr;
+
+ switch (mode) {
+ case ForInMode::kUseEnumCacheKeys:
+ case ForInMode::kUseEnumCacheKeysAndIndices: {
+ // Check that the {enumerator} is a Map.
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone,
+ ZoneHandleSet<Map>(factory()->meta_map())),
+ enumerator, effect, control);
+
+ // Load the enum cache from the {enumerator} map.
+ Node* descriptor_array = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapDescriptors()),
+ enumerator, effect, control);
+ Node* enum_cache = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForDescriptorArrayEnumCache()),
+ descriptor_array, effect, control);
+ cache_array = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForEnumCacheKeys()),
+ enum_cache, effect, control);
+
+ // Load the enum length of the {enumerator} map.
+ Node* bit_field3 = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapBitField3()), enumerator,
+ effect, control);
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ cache_length =
+ graph()->NewNode(simplified()->NumberBitwiseAnd(), bit_field3,
+ jsgraph()->Constant(Map::EnumLengthBits::kMask));
+ break;
+ }
+ case ForInMode::kGeneric: {
+ // Check if the {enumerator} is a Map or a FixedArray.
+ Node* check = graph()->NewNode(
+ simplified()->CompareMaps(ZoneHandleSet<Map>(factory()->meta_map())),
+ enumerator, effect, control);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* cache_array_true;
+ Node* cache_length_true;
+ {
+ // Load the enum cache from the {enumerator} map.
+ Node* descriptor_array = etrue = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapDescriptors()),
+ enumerator, etrue, if_true);
+ Node* enum_cache = etrue =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForDescriptorArrayEnumCache()),
+ descriptor_array, etrue, if_true);
+ cache_array_true = etrue = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForEnumCacheKeys()),
+ enum_cache, etrue, if_true);
+
+ // Load the enum length of the {enumerator} map.
+ Node* bit_field3 = etrue = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapBitField3()),
+ enumerator, etrue, if_true);
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ cache_length_true =
+ graph()->NewNode(simplified()->NumberBitwiseAnd(), bit_field3,
+ jsgraph()->Constant(Map::EnumLengthBits::kMask));
+ }
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* cache_array_false;
+ Node* cache_length_false;
+ {
+ // The {enumerator} is the FixedArray with the keys to iterate.
+ cache_array_false = enumerator;
+ cache_length_false = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ cache_array_false, efalse, if_false);
+ }
+
+ // Rewrite the uses of the {node}.
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ cache_array =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_array_true, cache_array_false, control);
+ cache_length =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_length_true, cache_length_false, control);
+ break;
+ }
+ }
+
+ // Update the uses of {node}.
+ for (Edge edge : node->use_edges()) {
+ Node* const user = edge.from();
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ Revisit(user);
+ } else if (NodeProperties::IsControlEdge(edge)) {
+ edge.UpdateTo(control);
+ Revisit(user);
+ } else {
+ DCHECK(NodeProperties::IsValueEdge(edge));
+ switch (ProjectionIndexOf(user->op())) {
+ case 0:
+ Replace(user, cache_type);
+ break;
+ case 1:
+ Replace(user, cache_array);
+ break;
+ case 2:
+ Replace(user, cache_length);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ node->Kill();
+ return Replace(effect);
+}
+
Reduction JSTypedLowering::ReduceJSLoadMessage(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadMessage, node->opcode());
ExternalReference const ref =
@@ -2140,6 +2338,8 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSCallForwardVarargs(node);
case IrOpcode::kJSCall:
return ReduceJSCall(node);
+ case IrOpcode::kJSForInPrepare:
+ return ReduceJSForInPrepare(node);
case IrOpcode::kJSForInNext:
return ReduceJSForInNext(node);
case IrOpcode::kJSLoadMessage:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 4873b8aaf0..80f818e0a4 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -65,6 +65,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSCallForwardVarargs(Node* node);
Reduction ReduceJSCall(Node* node);
Reduction ReduceJSForInNext(Node* node);
+ Reduction ReduceJSForInPrepare(Node* node);
Reduction ReduceJSLoadMessage(Node* node);
Reduction ReduceJSStoreMessage(Node* node);
Reduction ReduceJSGeneratorStore(Node* node);
diff --git a/deps/v8/src/compiler/jump-threading.cc b/deps/v8/src/compiler/jump-threading.cc
index a480204e47..c2a84cc9b5 100644
--- a/deps/v8/src/compiler/jump-threading.cc
+++ b/deps/v8/src/compiler/jump-threading.cc
@@ -4,7 +4,6 @@
#include "src/compiler/jump-threading.h"
#include "src/compiler/code-generator-impl.h"
-#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -120,7 +119,7 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
#ifdef DEBUG
for (RpoNumber num : result) {
- CHECK(num.IsValid());
+ DCHECK(num.IsValid());
}
#endif
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 1d3b15a817..679771f56e 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -12,7 +12,6 @@
#include "src/compiler/node.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline.h"
-#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index a6c3b618e6..0f85fc994f 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -47,12 +47,12 @@ class LinkageLocation {
static LinkageLocation ForRegister(int32_t reg,
MachineType type = MachineType::None()) {
- DCHECK(reg >= 0);
+ DCHECK_LE(0, reg);
return LinkageLocation(REGISTER, reg, type);
}
static LinkageLocation ForCallerFrameSlot(int32_t slot, MachineType type) {
- DCHECK(slot < 0);
+ DCHECK_GT(0, slot);
return LinkageLocation(STACK_SLOT, slot, type);
}
@@ -101,25 +101,6 @@ class LinkageLocation {
return caller_location;
}
- private:
- friend class CallDescriptor;
- friend class OperandGenerator;
-
- enum LocationType { REGISTER, STACK_SLOT };
-
- class TypeField : public BitField<LocationType, 0, 1> {};
- class LocationField : public BitField<int32_t, TypeField::kNext, 31> {};
-
- static const int32_t ANY_REGISTER = -1;
- static const int32_t MAX_STACK_SLOT = 32767;
-
- LinkageLocation(LocationType type, int32_t location,
- MachineType machine_type) {
- bit_field_ = TypeField::encode(type) |
- ((location << LocationField::kShift) & LocationField::kMask);
- machine_type_ = machine_type;
- }
-
MachineType GetType() const { return machine_type_; }
int GetSize() const {
@@ -156,6 +137,22 @@ class LinkageLocation {
return GetLocation();
}
+ private:
+ enum LocationType { REGISTER, STACK_SLOT };
+
+ class TypeField : public BitField<LocationType, 0, 1> {};
+ class LocationField : public BitField<int32_t, TypeField::kNext, 31> {};
+
+ static constexpr int32_t ANY_REGISTER = -1;
+ static constexpr int32_t MAX_STACK_SLOT = 32767;
+
+ LinkageLocation(LocationType type, int32_t location,
+ MachineType machine_type) {
+ bit_field_ = TypeField::encode(type) |
+ ((location << LocationField::kShift) & LocationField::kMask);
+ machine_type_ = machine_type;
+ }
+
int32_t bit_field_;
MachineType machine_type_;
};
@@ -309,8 +306,13 @@ class V8_EXPORT_PRIVATE CallDescriptor final
return allocatable_registers_ != 0;
}
+ void set_save_fp_mode(SaveFPRegsMode mode) { save_fp_mode_ = mode; }
+
+ SaveFPRegsMode get_save_fp_mode() const { return save_fp_mode_; }
+
private:
friend class Linkage;
+ SaveFPRegsMode save_fp_mode_ = kSaveFPRegs;
const Kind kind_;
const MachineType target_type_;
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 7b7f50dbf5..d3b9879919 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -330,13 +330,29 @@ bool MayAlias(MaybeHandle<Name> x, MaybeHandle<Name> y) {
} // namespace
+class LoadElimination::AliasStateInfo {
+ public:
+ AliasStateInfo(const AbstractState* state, Node* object, Handle<Map> map)
+ : state_(state), object_(object), map_(map) {}
+ AliasStateInfo(const AbstractState* state, Node* object)
+ : state_(state), object_(object) {}
+
+ bool MayAlias(Node* other) const;
+
+ private:
+ const AbstractState* state_;
+ Node* object_;
+ MaybeHandle<Map> map_;
+};
+
LoadElimination::AbstractField const* LoadElimination::AbstractField::Kill(
- Node* object, MaybeHandle<Name> name, Zone* zone) const {
+ const AliasStateInfo& alias_info, MaybeHandle<Name> name,
+ Zone* zone) const {
for (auto pair : this->info_for_node_) {
- if (MayAlias(object, pair.first)) {
+ if (alias_info.MayAlias(pair.first)) {
AbstractField* that = new (zone) AbstractField(zone);
for (auto pair : this->info_for_node_) {
- if (!MayAlias(object, pair.first) ||
+ if (!alias_info.MayAlias(pair.first) ||
!MayAlias(name, pair.second.name)) {
that->info_for_node_.insert(pair);
}
@@ -367,12 +383,12 @@ bool LoadElimination::AbstractMaps::Lookup(
}
LoadElimination::AbstractMaps const* LoadElimination::AbstractMaps::Kill(
- Node* object, Zone* zone) const {
+ const AliasStateInfo& alias_info, Zone* zone) const {
for (auto pair : this->info_for_node_) {
- if (MayAlias(object, pair.first)) {
+ if (alias_info.MayAlias(pair.first)) {
AbstractMaps* that = new (zone) AbstractMaps(zone);
for (auto pair : this->info_for_node_) {
- if (!MayAlias(object, pair.first)) that->info_for_node_.insert(pair);
+ if (!alias_info.MayAlias(pair.first)) that->info_for_node_.insert(pair);
}
return that;
}
@@ -512,9 +528,9 @@ LoadElimination::AbstractState const* LoadElimination::AbstractState::AddMaps(
}
LoadElimination::AbstractState const* LoadElimination::AbstractState::KillMaps(
- Node* object, Zone* zone) const {
+ const AliasStateInfo& alias_info, Zone* zone) const {
if (this->maps_) {
- AbstractMaps const* that_maps = this->maps_->Kill(object, zone);
+ AbstractMaps const* that_maps = this->maps_->Kill(alias_info, zone);
if (this->maps_ != that_maps) {
AbstractState* that = new (zone) AbstractState(*this);
that->maps_ = that_maps;
@@ -524,6 +540,12 @@ LoadElimination::AbstractState const* LoadElimination::AbstractState::KillMaps(
return this;
}
+LoadElimination::AbstractState const* LoadElimination::AbstractState::KillMaps(
+ Node* object, Zone* zone) const {
+ AliasStateInfo alias_info(this, object);
+ return KillMaps(alias_info, zone);
+}
+
Node* LoadElimination::AbstractState::LookupElement(
Node* object, Node* index, MachineRepresentation representation) const {
if (this->elements_) {
@@ -578,8 +600,15 @@ LoadElimination::AbstractState const* LoadElimination::AbstractState::AddField(
LoadElimination::AbstractState const* LoadElimination::AbstractState::KillField(
Node* object, size_t index, MaybeHandle<Name> name, Zone* zone) const {
+ AliasStateInfo alias_info(this, object);
+ return KillField(alias_info, index, name, zone);
+}
+
+LoadElimination::AbstractState const* LoadElimination::AbstractState::KillField(
+ const AliasStateInfo& alias_info, size_t index, MaybeHandle<Name> name,
+ Zone* zone) const {
if (AbstractField const* this_field = this->fields_[index]) {
- this_field = this_field->Kill(object, name, zone);
+ this_field = this_field->Kill(alias_info, name, zone);
if (this->fields_[index] != this_field) {
AbstractState* that = new (zone) AbstractState(*this);
that->fields_[index] = this_field;
@@ -592,16 +621,18 @@ LoadElimination::AbstractState const* LoadElimination::AbstractState::KillField(
LoadElimination::AbstractState const*
LoadElimination::AbstractState::KillFields(Node* object, MaybeHandle<Name> name,
Zone* zone) const {
+ AliasStateInfo alias_info(this, object);
for (size_t i = 0;; ++i) {
if (i == arraysize(fields_)) return this;
if (AbstractField const* this_field = this->fields_[i]) {
- AbstractField const* that_field = this_field->Kill(object, name, zone);
+ AbstractField const* that_field =
+ this_field->Kill(alias_info, name, zone);
if (that_field != this_field) {
AbstractState* that = new (zone) AbstractState(*this);
that->fields_[i] = that_field;
while (++i < arraysize(fields_)) {
if (this->fields_[i] != nullptr) {
- that->fields_[i] = this->fields_[i]->Kill(object, name, zone);
+ that->fields_[i] = this->fields_[i]->Kill(alias_info, name, zone);
}
}
return that;
@@ -618,6 +649,22 @@ Node* LoadElimination::AbstractState::LookupField(Node* object,
return nullptr;
}
+bool LoadElimination::AliasStateInfo::MayAlias(Node* other) const {
+ if (QueryAlias(object_, other) == kNoAlias) {
+ return false;
+ }
+ Handle<Map> map;
+ if (map_.ToHandle(&map)) {
+ ZoneHandleSet<Map> other_maps;
+ if (state_->LookupMaps(other, &other_maps) && other_maps.size() == 1) {
+ if (map.address() != other_maps.at(0).address()) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
void LoadElimination::AbstractState::Print() const {
if (checks_) {
PrintF(" checks:\n");
@@ -741,12 +788,12 @@ Reduction LoadElimination::ReduceEnsureWritableFastElements(Node* node) {
}
Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) {
- GrowFastElementsFlags flags = GrowFastElementsFlagsOf(node->op());
+ GrowFastElementsMode mode = GrowFastElementsModeOf(node->op());
Node* const object = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- if (flags & GrowFastElementsFlag::kDoubleElements) {
+ if (mode == GrowFastElementsMode::kDoubleElements) {
// We know that the resulting elements have the fixed double array map.
state = state->AddMaps(
node, ZoneHandleSet<Map>(factory()->fixed_double_array_map()), zone());
@@ -755,11 +802,6 @@ Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) {
state = state->AddMaps(
node, ZoneHandleSet<Map>(factory()->fixed_array_map()), zone());
}
- if (flags & GrowFastElementsFlag::kArrayObject) {
- // Kill the previous Array::length on {object}.
- state = state->KillField(object, FieldIndexOf(JSArray::kLengthOffset),
- factory()->length_string(), zone());
- }
// Kill the previous elements on {object}.
state = state->KillField(object, FieldIndexOf(JSObject::kElementsOffset),
MaybeHandle<Name>(), zone());
@@ -777,6 +819,17 @@ Reduction LoadElimination::ReduceTransitionElementsKind(Node* node) {
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
+ switch (transition.mode()) {
+ case ElementsTransition::kFastTransition:
+ break;
+ case ElementsTransition::kSlowTransition:
+ // Kill the elements as well.
+ AliasStateInfo alias_info(state, object, source_map);
+ state =
+ state->KillField(alias_info, FieldIndexOf(JSObject::kElementsOffset),
+ MaybeHandle<Name>(), zone());
+ break;
+ }
ZoneHandleSet<Map> object_maps;
if (state->LookupMaps(object, &object_maps)) {
if (ZoneHandleSet<Map>(target_map).contains(object_maps)) {
@@ -787,20 +840,13 @@ Reduction LoadElimination::ReduceTransitionElementsKind(Node* node) {
if (object_maps.contains(ZoneHandleSet<Map>(source_map))) {
object_maps.remove(source_map, zone());
object_maps.insert(target_map, zone());
- state = state->KillMaps(object, zone());
+ AliasStateInfo alias_info(state, object, source_map);
+ state = state->KillMaps(alias_info, zone());
state = state->AddMaps(object, object_maps, zone());
}
} else {
- state = state->KillMaps(object, zone());
- }
- switch (transition.mode()) {
- case ElementsTransition::kFastTransition:
- break;
- case ElementsTransition::kSlowTransition:
- // Kill the elements as well.
- state = state->KillField(object, FieldIndexOf(JSObject::kElementsOffset),
- MaybeHandle<Name>(), zone());
- break;
+ AliasStateInfo alias_info(state, object, source_map);
+ state = state->KillMaps(alias_info, zone());
}
return UpdateState(node, state);
}
@@ -1109,6 +1155,11 @@ Reduction LoadElimination::UpdateState(Node* node, AbstractState const* state) {
LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
Node* node, AbstractState const* state) const {
Node* const control = NodeProperties::GetControlInput(node);
+ struct TransitionElementsKindInfo {
+ ElementsTransition transition;
+ Node* object;
+ };
+ ZoneVector<TransitionElementsKindInfo> element_transitions_(zone());
ZoneQueue<Node*> queue(zone());
ZoneSet<Node*> visited(zone());
visited.insert(node);
@@ -1130,17 +1181,10 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
break;
}
case IrOpcode::kMaybeGrowFastElements: {
- GrowFastElementsFlags flags =
- GrowFastElementsFlagsOf(current->op());
Node* const object = NodeProperties::GetValueInput(current, 0);
state = state->KillField(object,
FieldIndexOf(JSObject::kElementsOffset),
MaybeHandle<Name>(), zone());
- if (flags & GrowFastElementsFlag::kArrayObject) {
- state =
- state->KillField(object, FieldIndexOf(JSArray::kLengthOffset),
- factory()->length_string(), zone());
- }
break;
}
case IrOpcode::kTransitionElementsKind: {
@@ -1150,17 +1194,7 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
if (!state->LookupMaps(object, &object_maps) ||
!ZoneHandleSet<Map>(transition.target())
.contains(object_maps)) {
- state = state->KillMaps(object, zone());
- switch (transition.mode()) {
- case ElementsTransition::kFastTransition:
- break;
- case ElementsTransition::kSlowTransition:
- // Kill the elements as well.
- state = state->KillField(
- object, FieldIndexOf(JSObject::kElementsOffset),
- MaybeHandle<Name>(), zone());
- break;
- }
+ element_transitions_.push_back({transition, object});
}
break;
}
@@ -1210,6 +1244,48 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
}
}
}
+
+ // Finally, we apply the element transitions. For each transition, we will try
+ // to only invalidate information about nodes that can have the transition's
+ // source map. The trouble is that an object can be transitioned by some other
+ // transition to the source map. In that case, the other transition will
+ // invalidate the information, so we are mostly fine.
+ //
+ // The only bad case is
+ //
+ // mapA ---fast---> mapB ---slow---> mapC
+ //
+ // If we process the slow transition first on an object that has mapA, we will
+ // ignore the transition because the object does not have its source map
+ // (mapB). When we later process the fast transition, we invalidate the
+ // object's map, but we keep the information about the object's elements. This
+ // is wrong because the elements will be overwritten by the slow transition.
+ //
+ // Note that the slow-slow case is fine because either of the slow transition
+ // will invalidate the elements field, so the processing order does not
+ // matter.
+ //
+ // To handle the bad case properly, we first kill the maps using all
+ // transitions. We kill the the fields later when all the transitions are
+ // already reflected in the map information.
+
+ for (const TransitionElementsKindInfo& t : element_transitions_) {
+ AliasStateInfo alias_info(state, t.object, t.transition.source());
+ state = state->KillMaps(alias_info, zone());
+ }
+ for (const TransitionElementsKindInfo& t : element_transitions_) {
+ switch (t.transition.mode()) {
+ case ElementsTransition::kFastTransition:
+ break;
+ case ElementsTransition::kSlowTransition: {
+ AliasStateInfo alias_info(state, t.object, t.transition.source());
+ state = state->KillField(alias_info,
+ FieldIndexOf(JSObject::kElementsOffset),
+ MaybeHandle<Name>(), zone());
+ break;
+ }
+ }
+ }
return state;
}
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index fb8c4c72a2..5080d7980a 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -125,6 +125,11 @@ class V8_EXPORT_PRIVATE LoadElimination final
size_t next_index_ = 0;
};
+ // Information we use to resolve object aliasing. Currently, we consider
+ // object not aliased if they have different maps or if the nodes may
+ // not alias.
+ class AliasStateInfo;
+
// Abstract state to approximate the current state of a certain field along
// the effect paths through the graph.
class AbstractField final : public ZoneObject {
@@ -143,8 +148,8 @@ class V8_EXPORT_PRIVATE LoadElimination final
return that;
}
Node* Lookup(Node* object) const;
- AbstractField const* Kill(Node* object, MaybeHandle<Name> name,
- Zone* zone) const;
+ AbstractField const* Kill(const AliasStateInfo& alias_info,
+ MaybeHandle<Name> name, Zone* zone) const;
bool Equals(AbstractField const* that) const {
return this == that || this->info_for_node_ == that->info_for_node_;
}
@@ -196,7 +201,8 @@ class V8_EXPORT_PRIVATE LoadElimination final
AbstractMaps const* Extend(Node* object, ZoneHandleSet<Map> maps,
Zone* zone) const;
bool Lookup(Node* object, ZoneHandleSet<Map>* object_maps) const;
- AbstractMaps const* Kill(Node* object, Zone* zone) const;
+ AbstractMaps const* Kill(const AliasStateInfo& alias_info,
+ Zone* zone) const;
bool Equals(AbstractMaps const* that) const {
return this == that || this->info_for_node_ == that->info_for_node_;
}
@@ -222,10 +228,15 @@ class V8_EXPORT_PRIVATE LoadElimination final
AbstractState const* AddMaps(Node* object, ZoneHandleSet<Map> maps,
Zone* zone) const;
AbstractState const* KillMaps(Node* object, Zone* zone) const;
+ AbstractState const* KillMaps(const AliasStateInfo& alias_info,
+ Zone* zone) const;
bool LookupMaps(Node* object, ZoneHandleSet<Map>* object_maps) const;
AbstractState const* AddField(Node* object, size_t index, Node* value,
MaybeHandle<Name> name, Zone* zone) const;
+ AbstractState const* KillField(const AliasStateInfo& alias_info,
+ size_t index, MaybeHandle<Name> name,
+ Zone* zone) const;
AbstractState const* KillField(Node* object, size_t index,
MaybeHandle<Name> name, Zone* zone) const;
AbstractState const* KillFields(Node* object, MaybeHandle<Name> name,
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.cc b/deps/v8/src/compiler/loop-variable-optimizer.cc
index 07e026eb73..d50237ad6e 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.cc
+++ b/deps/v8/src/compiler/loop-variable-optimizer.cc
@@ -129,7 +129,7 @@ class LoopVariableOptimizer::VariableLimits : public ZoneObject {
// Then we go through both lists in lock-step until we find
// the common tail.
while (head_ != other_limit) {
- DCHECK(limit_count_ > 0);
+ DCHECK_LT(0, limit_count_);
limit_count_--;
other_limit = other_limit->next();
head_ = head_->next();
@@ -310,7 +310,8 @@ InductionVariable* LoopVariableOptimizer::TryGetInductionVariable(Node* phi) {
arith->opcode() == IrOpcode::kSpeculativeSafeIntegerAdd) {
arithmeticType = InductionVariable::ArithmeticType::kAddition;
} else if (arith->opcode() == IrOpcode::kJSSubtract ||
- arith->opcode() == IrOpcode::kSpeculativeNumberSubtract) {
+ arith->opcode() == IrOpcode::kSpeculativeNumberSubtract ||
+ arith->opcode() == IrOpcode::kSpeculativeSafeIntegerSubtract) {
arithmeticType = InductionVariable::ArithmeticType::kSubtraction;
} else {
return nullptr;
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 13b017fdef..8393a749bb 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -545,10 +545,10 @@ class MachineRepresentationChecker {
CheckValueInputForInt32Op(node, input_index);
break;
default:
- CheckValueInputRepresentationIs(
- node, 2, inferrer_->GetRepresentation(node));
+ CheckValueInputRepresentationIs(node, input_index,
+ type.representation());
+ break;
}
- break;
}
break;
}
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 383f2799fe..8590c942d3 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -12,7 +12,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-matchers.h"
-#include "src/objects-inl.h"
+#include "src/conversions-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 6ff3087bd6..2603b1d18e 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -487,7 +487,7 @@ struct MachineOperatorGlobalCache {
ProtectedLoad##Type##Operator() \
: Operator1<LoadRepresentation>( \
IrOpcode::kProtectedLoad, \
- Operator::kNoDeopt | Operator::kNoThrow, "ProtectedLoad", 3, 1, \
+ Operator::kNoDeopt | Operator::kNoThrow, "ProtectedLoad", 2, 1, \
1, 1, 1, 0, MachineType::Type()) {} \
}; \
Load##Type##Operator kLoad##Type; \
@@ -562,7 +562,7 @@ struct MachineOperatorGlobalCache {
: Operator1<StoreRepresentation>( \
IrOpcode::kProtectedStore, \
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
- "Store", 4, 1, 1, 0, 1, 0, \
+ "Store", 3, 1, 1, 0, 1, 0, \
StoreRepresentation(MachineRepresentation::Type, \
kNoWriteBarrier)) {} \
}; \
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index aecbfa6bac..e6264bc2b4 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -2,12 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/code-generator.h"
+#include "src/assembler-inl.h"
+#include "src/callable.h"
#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/code-generator.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
+#include "src/heap/heap-inl.h"
#include "src/mips/macro-assembler-mips.h"
namespace v8 {
@@ -55,7 +58,7 @@ class MipsOperandConverter final : public InstructionOperandConverter {
Register InputOrZeroRegister(size_t index) {
if (instr_->InputAt(index)->IsImmediate()) {
- DCHECK((InputInt32(index) == 0));
+ DCHECK_EQ(0, InputInt32(index));
return zero_reg;
}
return InputRegister(index);
@@ -231,6 +234,28 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
+ void SaveRegisters(RegList registers) {
+ DCHECK_LT(0, NumRegs(registers));
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
+ }
+ __ MultiPush(regs | ra.bit());
+ }
+
+ void RestoreRegisters(RegList registers) {
+ DCHECK_LT(0, NumRegs(registers));
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
+ }
+ __ MultiPop(regs | ra.bit());
+ }
+
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
@@ -238,6 +263,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
+ __ Addu(scratch1_, object_, index_);
RememberedSetAction const remembered_set_action =
mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
: OMIT_REMEMBERED_SET;
@@ -247,10 +273,14 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
}
- __ Addu(scratch1_, object_, index_);
+#ifdef V8_CSA_WRITE_BARRIER
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+#else
__ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
+#endif
if (must_save_lr_) {
__ Pop(ra);
}
@@ -367,7 +397,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
} // namespace
-
#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
do { \
auto result = i.Output##width##Register(); \
@@ -375,7 +404,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
__ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ addu(kScratchReg, i.InputRegister(2), offset); \
+ __ Addu(kScratchReg, i.InputRegister(2), offset); \
__ asm_instr(result, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
@@ -385,7 +414,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ bind(ool->exit()); \
} while (0)
-
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister(); \
@@ -393,7 +421,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
__ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ addu(kScratchReg, i.InputRegister(2), offset); \
+ __ Addu(kScratchReg, i.InputRegister(2), offset); \
__ asm_instr(result, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
@@ -409,16 +437,16 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
auto value = i.InputOrZero##width##Register(2); \
- if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
+ if (value == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { \
__ Move(kDoubleRegZero, 0.0); \
} \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ addu(kScratchReg, i.InputRegister(3), offset); \
+ __ Addu(kScratchReg, i.InputRegister(3), offset); \
__ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
auto value = i.InputOrZero##width##Register(2); \
- if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
+ if (value == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { \
__ Move(kDoubleRegZero, 0.0); \
} \
__ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
@@ -434,7 +462,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
auto offset = i.InputRegister(0); \
auto value = i.InputOrZeroRegister(2); \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ addu(kScratchReg, i.InputRegister(3), offset); \
+ __ Addu(kScratchReg, i.InputRegister(3), offset); \
__ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
@@ -513,7 +541,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
#define ASSEMBLE_ATOMIC_BINOP(bin_instr) \
do { \
Label binop; \
- __ addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ sync(); \
__ bind(&binop); \
__ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
@@ -527,7 +555,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
#define ASSEMBLE_ATOMIC_BINOP_EXT(sign_extend, size, bin_instr) \
do { \
Label binop; \
- __ addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \
__ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(3))); \
__ sll(i.TempRegister(3), i.TempRegister(3), 3); \
@@ -545,6 +573,80 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ sync(); \
} while (0)
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER() \
+ do { \
+ Label exchange; \
+ __ sync(); \
+ __ bind(&exchange); \
+ __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ mov(i.TempRegister(1), i.InputRegister(2)); \
+ __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(sign_extend, size) \
+ do { \
+ Label exchange; \
+ __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ __ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(1))); \
+ __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ sync(); \
+ __ bind(&exchange); \
+ __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
+ size); \
+ __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER() \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ sync(); \
+ __ bind(&compareExchange); \
+ __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ mov(i.TempRegister(2), i.InputRegister(3)); \
+ __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(sign_extend, size) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ __ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(1))); \
+ __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ sync(); \
+ __ bind(&compareExchange); \
+ __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
+ size); \
+ __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ sync(); \
+ } while (0)
+
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \
@@ -638,6 +740,34 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check if the code object is marked for deoptimization. If it is, then it
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. load the address of the current instruction;
+// 2. read from memory the word that contains that bit, which can be found in
+// the first set of flags ({kKindSpecificFlags1Offset});
+// 3. test kMarkedForDeoptimizationBit in those flags; and
+// 4. if it is not zero then it jumps to the builtin.
+void CodeGenerator::BailoutIfDeoptimized() {
+ Label current;
+ // This push on ra and the pop below together ensure that we restore the
+ // register ra, which is needed while computing frames for deoptimization.
+ __ push(ra);
+ // The bal instruction puts the address of the current instruction into
+ // the return address (ra) register, which we can use later on.
+ __ bal(&current);
+ __ nop();
+ int pc = __ pc_offset();
+ __ bind(&current);
+ int offset = Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc);
+ __ lw(a2, MemOperand(ra, offset));
+ __ pop(ra);
+ __ And(a2, a2, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ Handle<Code> code = isolate()->builtins()->builtin_handle(
+ Builtins::kCompileLazyDeoptimizedCode);
+ __ Jump(code, RelocInfo::CODE_TARGET, ne, a2, Operand(zero_reg));
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -700,13 +830,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchSaveCallerRegisters: {
+ fp_mode_ =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// kReturnRegister0 should have been saved before entering the stub.
- __ PushCallerSaved(kSaveFPRegs, kReturnRegister0);
+ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
+ DCHECK_EQ(0, bytes % kPointerSize);
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ DCHECK(!caller_registers_saved_);
+ caller_registers_saved_ = true;
break;
}
case kArchRestoreCallerRegisters: {
+ DCHECK(fp_mode_ ==
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// Don't overwrite the returned value.
- __ PopCallerSaved(kSaveFPRegs, kReturnRegister0);
+ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ DCHECK(caller_registers_saved_);
+ caller_registers_saved_ = false;
break;
}
case kArchPrepareTailCall:
@@ -722,7 +867,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CallCFunction(func, num_parameters);
}
frame_access_state()->SetFrameAccessToDefault();
+ // Ideally, we should decrement SP delta to match the change of stack
+ // pointer in CallCFunction. However, for certain architectures (e.g.
+ // ARM), there may be more strict alignment requirement, causing old SP
+ // to be saved on the stack. In those cases, we can not calculate the SP
+ // delta statically.
frame_access_state()->ClearSPDelta();
+ if (caller_registers_saved_) {
+ // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
+ // Here, we assume the sequence to be:
+ // kArchSaveCallerRegisters;
+ // kArchCallCFunction;
+ // kArchRestoreCallerRegisters;
+ int bytes =
+ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ }
break;
}
case kArchJmp:
@@ -735,7 +895,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchTableSwitch(instr);
break;
case kArchDebugAbort:
- DCHECK(i.InputRegister(0).is(a0));
+ DCHECK(i.InputRegister(0) == a0);
if (!frame_access_state()->has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
@@ -961,7 +1121,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(1)->IsRegister()) {
__ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
} else {
- DCHECK(i.InputOperand(1).immediate() == 0);
+ DCHECK_EQ(0, i.InputOperand(1).immediate());
__ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
}
break;
@@ -978,7 +1138,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// We don't have an instruction to count the number of trailing zeroes.
// Start by flipping the bits end-for-end so we can count the number of
// leading zeroes instead.
- __ rotr(dst, src, 16);
+ __ Ror(dst, src, 16);
__ wsbh(dst, dst);
__ bitswap(dst, dst);
__ Clz(dst, dst);
@@ -1201,8 +1361,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMipsMulPair: {
__ Mulu(i.OutputRegister(1), i.OutputRegister(0), i.InputRegister(0),
i.InputRegister(2));
- __ mul(kScratchReg, i.InputRegister(0), i.InputRegister(3));
- __ mul(kScratchReg2, i.InputRegister(1), i.InputRegister(2));
+ __ Mul(kScratchReg, i.InputRegister(0), i.InputRegister(3));
+ __ Mul(kScratchReg2, i.InputRegister(1), i.InputRegister(2));
__ Addu(i.OutputRegister(1), i.OutputRegister(1), kScratchReg);
__ Addu(i.OutputRegister(1), i.OutputRegister(1), kScratchReg2);
} break;
@@ -1379,26 +1539,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMipsFloorWD: {
FPURegister scratch = kScratchDoubleReg;
- __ floor_w_d(scratch, i.InputDoubleRegister(0));
+ __ Floor_w_d(scratch, i.InputDoubleRegister(0));
__ mfc1(i.OutputRegister(), scratch);
break;
}
case kMipsCeilWD: {
FPURegister scratch = kScratchDoubleReg;
- __ ceil_w_d(scratch, i.InputDoubleRegister(0));
+ __ Ceil_w_d(scratch, i.InputDoubleRegister(0));
__ mfc1(i.OutputRegister(), scratch);
break;
}
case kMipsRoundWD: {
FPURegister scratch = kScratchDoubleReg;
- __ round_w_d(scratch, i.InputDoubleRegister(0));
+ __ Round_w_d(scratch, i.InputDoubleRegister(0));
__ mfc1(i.OutputRegister(), scratch);
break;
}
case kMipsTruncWD: {
FPURegister scratch = kScratchDoubleReg;
// Other arches use round to zero here, so we follow.
- __ trunc_w_d(scratch, i.InputDoubleRegister(0));
+ __ Trunc_w_d(scratch, i.InputDoubleRegister(0));
__ mfc1(i.OutputRegister(), scratch);
break;
}
@@ -1426,8 +1586,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mfc1(i.OutputRegister(), scratch);
// Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
// because INT32_MIN allows easier out-of-bounds detection.
- __ addiu(kScratchReg, i.OutputRegister(), 1);
- __ slt(kScratchReg2, kScratchReg, i.OutputRegister());
+ __ Addu(kScratchReg, i.OutputRegister(), 1);
+ __ Slt(kScratchReg2, kScratchReg, i.OutputRegister());
__ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
break;
}
@@ -1443,7 +1603,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Trunc_uw_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
// Avoid UINT32_MAX as an overflow indicator and use 0 instead,
// because 0 allows easier out-of-bounds detection.
- __ addiu(kScratchReg, i.OutputRegister(), 1);
+ __ Addu(kScratchReg, i.OutputRegister(), 1);
__ Movz(i.OutputRegister(), zero_reg, kScratchReg);
break;
}
@@ -1521,7 +1681,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
FPURegister ft = i.InputOrZeroSingleRegister(index);
- if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
__ swc1(ft, operand);
@@ -1531,7 +1691,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
FPURegister ft = i.InputOrZeroSingleRegister(index);
- if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
__ Uswc1(ft, operand, kScratchReg);
@@ -1545,7 +1705,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMipsSdc1: {
FPURegister ft = i.InputOrZeroDoubleRegister(2);
- if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
__ Sdc1(ft, i.MemoryOperand());
@@ -1553,7 +1713,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMipsUsdc1: {
FPURegister ft = i.InputOrZeroDoubleRegister(2);
- if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
__ Usdc1(ft, i.MemoryOperand(), kScratchReg);
@@ -1657,16 +1817,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
break;
case kAtomicExchangeInt8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 8);
+ break;
case kAtomicExchangeUint8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 8);
+ break;
case kAtomicExchangeInt16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 16);
+ break;
case kAtomicExchangeUint16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 16);
+ break;
case kAtomicExchangeWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER();
+ break;
case kAtomicCompareExchangeInt8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 8);
+ break;
case kAtomicCompareExchangeUint8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 8);
+ break;
case kAtomicCompareExchangeInt16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 16);
+ break;
case kAtomicCompareExchangeUint16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 16);
+ break;
case kAtomicCompareExchangeWord32:
- UNREACHABLE();
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER();
break;
#define ATOMIC_BINOP_CASE(op, inst) \
case kAtomic##op##Int8: \
@@ -1711,7 +1889,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
- if (!src.is(dst)) {
+ if (src != dst) {
__ move_v(dst, src);
}
__ insert_w(dst, i.InputInt8(1), i.InputRegister(2));
@@ -1745,7 +1923,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
- if (!src.is(dst)) {
+ if (src != dst) {
__ move_v(dst, src);
}
__ FmoveLow(kScratchReg, i.InputSingleRegister(2));
@@ -1825,7 +2003,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMipsS128Select: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
+ DCHECK(i.OutputSimd128Register() == i.InputSimd128Register(0));
__ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
i.InputSimd128Register(1));
break;
@@ -1960,7 +2138,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
- if (!src.is(dst)) {
+ if (src != dst) {
__ move_v(dst, src);
}
__ insert_h(dst, i.InputInt8(1), i.InputRegister(2));
@@ -2109,7 +2287,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
- if (!src.is(dst)) {
+ if (src != dst) {
__ move_v(dst, src);
}
__ insert_b(dst, i.InputInt8(1), i.InputRegister(2));
@@ -2392,22 +2570,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int32_t shuffle = i.InputInt32(2);
- if (src0.is(src1)) {
+ if (src0 == src1) {
// Unary S32x4 shuffles are handled with shf.w instruction
uint32_t i8 = 0;
for (int i = 0; i < 4; i++) {
int lane = shuffle & 0xff;
- DCHECK(lane < 4);
+ DCHECK_GT(4, lane);
i8 |= lane << (2 * i);
shuffle >>= 8;
}
__ shf_w(dst, src0, i8);
} else {
// For binary shuffles use vshf.w instruction
- if (dst.is(src0)) {
+ if (dst == src0) {
__ move_v(kSimd128ScratchReg, src0);
src0 = kSimd128ScratchReg;
- } else if (dst.is(src1)) {
+ } else if (dst == src1) {
__ move_v(kSimd128ScratchReg, src1);
src1 = kSimd128ScratchReg;
}
@@ -2558,7 +2736,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMipsS8x16Concat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
__ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2));
break;
}
@@ -2568,10 +2746,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
- if (dst.is(src0)) {
+ if (dst == src0) {
__ move_v(kSimd128ScratchReg, src0);
src0 = kSimd128ScratchReg;
- } else if (dst.is(src1)) {
+ } else if (dst == src1) {
__ move_v(kSimd128ScratchReg, src1);
src1 = kSimd128ScratchReg;
}
@@ -2853,7 +3031,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
}
FPURegister left = i.InputOrZeroSingleRegister(0);
FPURegister right = i.InputOrZeroSingleRegister(1);
- if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
@@ -2864,7 +3042,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
}
FPURegister left = i.InputOrZeroDoubleRegister(0);
FPURegister right = i.InputOrZeroDoubleRegister(1);
- if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
@@ -2887,6 +3065,10 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
+void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
+ BranchInfo* branch) {
+ AssembleArchBranch(instr, branch);
+}
void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
@@ -2930,6 +3112,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
tasm()->isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
+ CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ __ Drop(pop_count);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
@@ -3106,7 +3291,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
instr->arch_opcode() == kMipsCmpS) {
FPURegister left = i.InputOrZeroDoubleRegister(0);
FPURegister right = i.InputOrZeroDoubleRegister(1);
- if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
@@ -3117,7 +3302,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (instr->arch_opcode() == kMipsCmpD) {
__ c(cc, D, left, right);
} else {
- DCHECK(instr->arch_opcode() == kMipsCmpS);
+ DCHECK_EQ(kMipsCmpS, instr->arch_opcode());
__ c(cc, S, left, right);
}
if (predicate) {
@@ -3129,7 +3314,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (instr->arch_opcode() == kMipsCmpD) {
__ cmp(cc, L, kDoubleCompareReg, left, right);
} else {
- DCHECK(instr->arch_opcode() == kMipsCmpS);
+ DCHECK_EQ(kMipsCmpS, instr->arch_opcode());
__ cmp(cc, W, kDoubleCompareReg, left, right);
}
__ mfc1(result, kDoubleCompareReg);
@@ -3181,7 +3366,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
if (saves_fpu != 0) {
int count = base::bits::CountPopulation32(saves_fpu);
- DCHECK(kNumCalleeSavedFPU == count);
+ DCHECK_EQ(kNumCalleeSavedFPU, count);
frame->AllocateSavedCalleeRegisterSlots(count *
(kDoubleSize / kPointerSize));
}
@@ -3189,7 +3374,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
int count = base::bits::CountPopulation32(saves);
- DCHECK(kNumCalleeSaved == count + 1);
+ DCHECK_EQ(kNumCalleeSaved, count + 1);
frame->AllocateSavedCalleeRegisterSlots(count);
}
}
@@ -3240,7 +3425,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (saves != 0) {
// Save callee-saved registers.
__ MultiPush(saves);
- DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1);
+ DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation32(saves) + 1);
}
}
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 7013b85ab2..5bb112f77e 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -47,7 +47,7 @@ class MipsOperandGenerator final : public OperandGenerator {
}
int64_t GetIntegerConstantValue(Node* node) {
- DCHECK(node->opcode() == IrOpcode::kInt32Constant);
+ DCHECK_EQ(IrOpcode::kInt32Constant, node->opcode());
return OpParameter<int32_t>(node);
}
@@ -1917,10 +1917,82 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
}
}
-void InstructionSelector::VisitAtomicExchange(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitAtomicExchange(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicExchangeWord32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
- UNIMPLEMENTED();
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicCompareExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicCompareExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicCompareExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicCompareExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicCompareExchangeWord32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(old_value);
+ inputs[input_count++] = g.UseUniqueRegister(new_value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void InstructionSelector::VisitAtomicBinaryOperation(
@@ -2297,6 +2369,14 @@ InstructionSelector::AlignmentRequirements() {
}
}
+#undef SIMD_BINOP_LIST
+#undef SIMD_SHIFT_OP_LIST
+#undef SIMD_UNOP_LIST
+#undef SIMD_TYPE_LIST
+#undef SIMD_FORMAT_LIST
+#undef TRACE_UNIMPL
+#undef TRACE
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index 4ff55f25a5..6542f0d099 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -2,12 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/assembler-inl.h"
+#include "src/callable.h"
#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
+#include "src/heap/heap-inl.h"
#include "src/mips64/macro-assembler-mips64.h"
namespace v8 {
@@ -54,7 +57,7 @@ class MipsOperandConverter final : public InstructionOperandConverter {
Register InputOrZeroRegister(size_t index) {
if (instr_->InputAt(index)->IsImmediate()) {
- DCHECK((InputInt32(index) == 0));
+ DCHECK_EQ(0, InputInt32(index));
return zero_reg;
}
return InputRegister(index);
@@ -231,6 +234,28 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
+ void SaveRegisters(RegList registers) {
+ DCHECK_LT(0, NumRegs(registers));
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
+ }
+ __ MultiPush(regs | ra.bit());
+ }
+
+ void RestoreRegisters(RegList registers) {
+ DCHECK_LT(0, NumRegs(registers));
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
+ }
+ __ MultiPop(regs | ra.bit());
+ }
+
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
@@ -238,6 +263,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
+ __ Daddu(scratch1_, object_, index_);
RememberedSetAction const remembered_set_action =
mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
: OMIT_REMEMBERED_SET;
@@ -247,10 +273,14 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
}
- __ Daddu(scratch1_, object_, index_);
+#ifdef V8_CSA_WRITE_BARRIER
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+#else
__ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
+#endif
if (must_save_lr_) {
__ Pop(ra);
}
@@ -445,7 +475,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
auto value = i.InputOrZero##width##Register(2); \
- if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
+ if (value == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { \
__ Move(kDoubleRegZero, 0.0); \
} \
ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), &done); \
@@ -455,7 +485,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
} else { \
int offset = static_cast<int>(i.InputOperand(0).immediate()); \
auto value = i.InputOrZero##width##Register(2); \
- if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
+ if (value == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { \
__ Move(kDoubleRegZero, 0.0); \
} \
ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), &done); \
@@ -582,6 +612,82 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ sync(); \
} while (0)
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER() \
+ do { \
+ Label exchange; \
+ __ sync(); \
+ __ bind(&exchange); \
+ __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ mov(i.TempRegister(1), i.InputRegister(2)); \
+ __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(sign_extend, size) \
+ do { \
+ Label exchange; \
+ __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ sync(); \
+ __ bind(&exchange); \
+ __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
+ size); \
+ __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER() \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ sync(); \
+ __ bind(&compareExchange); \
+ __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ mov(i.TempRegister(2), i.InputRegister(3)); \
+ __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(sign_extend, size) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ sync(); \
+ __ bind(&compareExchange); \
+ __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
+ size); \
+ __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ sync(); \
+ } while (0)
+
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \
@@ -677,6 +783,34 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check if the code object is marked for deoptimization. If it is, then it
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. load the address of the current instruction;
+// 2. read from memory the word that contains that bit, which can be found in
+// the first set of flags ({kKindSpecificFlags1Offset});
+// 3. test kMarkedForDeoptimizationBit in those flags; and
+// 4. if it is not zero then it jumps to the builtin.
+void CodeGenerator::BailoutIfDeoptimized() {
+ Label current;
+ // This push on ra and the pop below together ensure that we restore the
+ // register ra, which is needed while computing frames for deoptimization.
+ __ push(ra);
+ // The bal instruction puts the address of the current instruction into
+ // the return address (ra) register, which we can use later on.
+ __ bal(&current);
+ __ nop();
+ int pc = __ pc_offset();
+ __ bind(&current);
+ int offset = Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc);
+ __ Lw(a2, MemOperand(ra, offset));
+ __ pop(ra);
+ __ And(a2, a2, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ Handle<Code> code = isolate()->builtins()->builtin_handle(
+ Builtins::kCompileLazyDeoptimizedCode);
+ __ Jump(code, RelocInfo::CODE_TARGET, ne, a2, Operand(zero_reg));
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -741,13 +875,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchSaveCallerRegisters: {
+ fp_mode_ =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// kReturnRegister0 should have been saved before entering the stub.
- __ PushCallerSaved(kSaveFPRegs, kReturnRegister0);
+ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
+ DCHECK_EQ(0, bytes % kPointerSize);
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ DCHECK(!caller_registers_saved_);
+ caller_registers_saved_ = true;
break;
}
case kArchRestoreCallerRegisters: {
+ DCHECK(fp_mode_ ==
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// Don't overwrite the returned value.
- __ PopCallerSaved(kSaveFPRegs, kReturnRegister0);
+ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ DCHECK(caller_registers_saved_);
+ caller_registers_saved_ = false;
break;
}
case kArchPrepareTailCall:
@@ -763,7 +912,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CallCFunction(func, num_parameters);
}
frame_access_state()->SetFrameAccessToDefault();
+ // Ideally, we should decrement SP delta to match the change of stack
+ // pointer in CallCFunction. However, for certain architectures (e.g.
+ // ARM), there may be more strict alignment requirement, causing old SP
+ // to be saved on the stack. In those cases, we can not calculate the SP
+ // delta statically.
frame_access_state()->ClearSPDelta();
+ if (caller_registers_saved_) {
+ // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
+ // Here, we assume the sequence to be:
+ // kArchSaveCallerRegisters;
+ // kArchCallCFunction;
+ // kArchRestoreCallerRegisters;
+ int bytes =
+ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ }
break;
}
case kArchJmp:
@@ -776,7 +940,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchTableSwitch(instr);
break;
case kArchDebugAbort:
- DCHECK(i.InputRegister(0).is(a0));
+ DCHECK(i.InputRegister(0) == a0);
if (!frame_access_state()->has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
@@ -1066,7 +1230,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(1)->IsRegister()) {
__ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
} else {
- DCHECK(i.InputOperand(1).immediate() == 0);
+ DCHECK_EQ(0, i.InputOperand(1).immediate());
__ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
}
break;
@@ -1076,7 +1240,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
__ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
} else {
- DCHECK(i.InputOperand(1).immediate() == 0);
+ DCHECK_EQ(0, i.InputOperand(1).immediate());
__ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
__ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
}
@@ -1801,7 +1965,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
FPURegister ft = i.InputOrZeroSingleRegister(index);
- if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
__ Swc1(ft, operand);
@@ -1811,7 +1975,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
FPURegister ft = i.InputOrZeroSingleRegister(index);
- if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
__ Uswc1(ft, operand, kScratchReg);
@@ -1825,7 +1989,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Sdc1: {
FPURegister ft = i.InputOrZeroDoubleRegister(2);
- if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
__ Sdc1(ft, i.MemoryOperand());
@@ -1833,7 +1997,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64Usdc1: {
FPURegister ft = i.InputOrZeroDoubleRegister(2);
- if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
__ Usdc1(ft, i.MemoryOperand(), kScratchReg);
@@ -1938,16 +2102,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
break;
case kAtomicExchangeInt8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 8);
+ break;
case kAtomicExchangeUint8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 8);
+ break;
case kAtomicExchangeInt16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 16);
+ break;
case kAtomicExchangeUint16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 16);
+ break;
case kAtomicExchangeWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER();
+ break;
case kAtomicCompareExchangeInt8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 8);
+ break;
case kAtomicCompareExchangeUint8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 8);
+ break;
case kAtomicCompareExchangeInt16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 16);
+ break;
case kAtomicCompareExchangeUint16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 16);
+ break;
case kAtomicCompareExchangeWord32:
- UNREACHABLE();
+ __ sll(i.InputRegister(2), i.InputRegister(2), 0);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER();
break;
#define ATOMIC_BINOP_CASE(op, inst) \
case kAtomic##op##Int8: \
@@ -1996,7 +2179,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
- if (!src.is(dst)) {
+ if (src != dst) {
__ move_v(dst, src);
}
__ insert_w(dst, i.InputInt8(1), i.InputRegister(2));
@@ -2030,7 +2213,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
- if (!src.is(dst)) {
+ if (src != dst) {
__ move_v(dst, src);
}
__ FmoveLow(kScratchReg, i.InputSingleRegister(2));
@@ -2110,7 +2293,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64S128Select: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
+ DCHECK(i.OutputSimd128Register() == i.InputSimd128Register(0));
__ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
i.InputSimd128Register(1));
break;
@@ -2245,7 +2428,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
- if (!src.is(dst)) {
+ if (src != dst) {
__ move_v(dst, src);
}
__ insert_h(dst, i.InputInt8(1), i.InputRegister(2));
@@ -2394,7 +2577,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
- if (!src.is(dst)) {
+ if (src != dst) {
__ move_v(dst, src);
}
__ insert_b(dst, i.InputInt8(1), i.InputRegister(2));
@@ -2676,22 +2859,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int32_t shuffle = i.InputInt32(2);
- if (src0.is(src1)) {
+ if (src0 == src1) {
// Unary S32x4 shuffles are handled with shf.w instruction
uint32_t i8 = 0;
for (int i = 0; i < 4; i++) {
int lane = shuffle & 0xff;
- DCHECK(lane < 4);
+ DCHECK_GT(4, lane);
i8 |= lane << (2 * i);
shuffle >>= 8;
}
__ shf_w(dst, src0, i8);
} else {
// For binary shuffles use vshf.w instruction
- if (dst.is(src0)) {
+ if (dst == src0) {
__ move_v(kSimd128ScratchReg, src0);
src0 = kSimd128ScratchReg;
- } else if (dst.is(src1)) {
+ } else if (dst == src1) {
__ move_v(kSimd128ScratchReg, src1);
src1 = kSimd128ScratchReg;
}
@@ -2842,7 +3025,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64S8x16Concat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
- DCHECK(dst.is(i.InputSimd128Register(0)));
+ DCHECK(dst == i.InputSimd128Register(0));
__ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2));
break;
}
@@ -2852,10 +3035,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
- if (dst.is(src0)) {
+ if (dst == src0) {
__ move_v(kSimd128ScratchReg, src0);
src0 = kSimd128ScratchReg;
- } else if (dst.is(src1)) {
+ } else if (dst == src1) {
__ move_v(kSimd128ScratchReg, src1);
src1 = kSimd128ScratchReg;
}
@@ -3143,7 +3326,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
}
FPURegister left = i.InputOrZeroSingleRegister(0);
FPURegister right = i.InputOrZeroSingleRegister(1);
- if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
@@ -3154,7 +3337,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
}
FPURegister left = i.InputOrZeroDoubleRegister(0);
FPURegister right = i.InputOrZeroDoubleRegister(1);
- if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
@@ -3178,6 +3361,10 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
+void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
+ BranchInfo* branch) {
+ AssembleArchBranch(instr, branch);
+}
void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
@@ -3219,6 +3406,10 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
tasm()->isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
+ CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ pop_count += (pop_count & 1); // align
+ __ Drop(pop_count);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
@@ -3404,7 +3595,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
instr->arch_opcode() == kMips64CmpS) {
FPURegister left = i.InputOrZeroDoubleRegister(0);
FPURegister right = i.InputOrZeroDoubleRegister(1);
- if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
@@ -3415,7 +3606,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (instr->arch_opcode() == kMips64CmpD) {
__ c(cc, D, left, right);
} else {
- DCHECK(instr->arch_opcode() == kMips64CmpS);
+ DCHECK_EQ(kMips64CmpS, instr->arch_opcode());
__ c(cc, S, left, right);
}
if (predicate) {
@@ -3428,7 +3619,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ cmp(cc, L, kDoubleCompareReg, left, right);
__ dmfc1(result, kDoubleCompareReg);
} else {
- DCHECK(instr->arch_opcode() == kMips64CmpS);
+ DCHECK_EQ(kMips64CmpS, instr->arch_opcode());
__ cmp(cc, W, kDoubleCompareReg, left, right);
__ mfc1(result, kDoubleCompareReg);
}
@@ -3476,7 +3667,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
if (saves_fpu != 0) {
int count = base::bits::CountPopulation32(saves_fpu);
- DCHECK(kNumCalleeSavedFPU == count);
+ DCHECK_EQ(kNumCalleeSavedFPU, count);
frame->AllocateSavedCalleeRegisterSlots(count *
(kDoubleSize / kPointerSize));
}
@@ -3484,7 +3675,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
int count = base::bits::CountPopulation32(saves);
- DCHECK(kNumCalleeSaved == count + 1);
+ DCHECK_EQ(kNumCalleeSaved, count + 1);
frame->AllocateSavedCalleeRegisterSlots(count);
}
}
@@ -3530,14 +3721,14 @@ void CodeGenerator::AssembleConstructFrame() {
if (saves_fpu != 0) {
// Save callee-saved FPU registers.
__ MultiPushFPU(saves_fpu);
- DCHECK(kNumCalleeSavedFPU == base::bits::CountPopulation32(saves_fpu));
+ DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation32(saves_fpu));
}
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
// Save callee-saved registers.
__ MultiPush(saves);
- DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1);
+ DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation32(saves) + 1);
}
}
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 0df4aa57bf..a8dc8a19db 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -51,7 +51,7 @@ class Mips64OperandGenerator final : public OperandGenerator {
if (node->opcode() == IrOpcode::kInt32Constant) {
return OpParameter<int32_t>(node);
}
- DCHECK(node->opcode() == IrOpcode::kInt64Constant);
+ DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
return OpParameter<int64_t>(node);
}
@@ -219,7 +219,7 @@ struct ExtendingLoadMatcher {
selector_->CanCover(m.node(), m.left().node())) {
MachineRepresentation rep =
LoadRepresentationOf(m.left().node()->op()).representation();
- DCHECK(ElementSizeLog2Of(rep) == 3);
+ DCHECK_EQ(3, ElementSizeLog2Of(rep));
if (rep != MachineRepresentation::kTaggedSigned &&
rep != MachineRepresentation::kTaggedPointer &&
rep != MachineRepresentation::kTagged &&
@@ -2610,10 +2610,82 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
}
}
-void InstructionSelector::VisitAtomicExchange(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitAtomicExchange(Node* node) {
+ Mips64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicExchangeWord32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
- UNIMPLEMENTED();
+ Mips64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicCompareExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicCompareExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicCompareExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicCompareExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicCompareExchangeWord32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(old_value);
+ inputs[input_count++] = g.UseUniqueRegister(new_value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void InstructionSelector::VisitAtomicBinaryOperation(
@@ -2684,6 +2756,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8) \
V(I8x16)
+// TODO(mostynb@opera.com): this is never used, remove it?
#define SIMD_FORMAT_LIST(V) \
V(32x4) \
V(16x8) \
@@ -2986,12 +3059,20 @@ InstructionSelector::AlignmentRequirements() {
return MachineOperatorBuilder::AlignmentRequirements::
FullUnalignedAccessSupport();
} else {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK_EQ(kMips64r2, kArchVariant);
return MachineOperatorBuilder::AlignmentRequirements::
NoUnalignedAccessSupport();
}
}
+#undef SIMD_BINOP_LIST
+#undef SIMD_SHIFT_OP_LIST
+#undef SIMD_UNOP_LIST
+#undef SIMD_FORMAT_LIST
+#undef SIMD_TYPE_LIST
+#undef TRACE_UNIMPL
+#undef TRACE
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/move-optimizer.cc b/deps/v8/src/compiler/move-optimizer.cc
index f0cf43811b..82f4b63276 100644
--- a/deps/v8/src/compiler/move-optimizer.cc
+++ b/deps/v8/src/compiler/move-optimizer.cc
@@ -376,7 +376,7 @@ const Instruction* MoveOptimizer::LastInstruction(
void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
- DCHECK(block->PredecessorCount() > 1);
+ DCHECK_LT(1, block->PredecessorCount());
// Ensure that the last instruction in all incoming blocks don't contain
// things that would prevent moving gap moves across them.
for (RpoNumber& pred_index : block->predecessors()) {
diff --git a/deps/v8/src/compiler/new-escape-analysis-reducer.cc b/deps/v8/src/compiler/new-escape-analysis-reducer.cc
deleted file mode 100644
index 6ac2e03a5c..0000000000
--- a/deps/v8/src/compiler/new-escape-analysis-reducer.cc
+++ /dev/null
@@ -1,411 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/new-escape-analysis-reducer.h"
-
-#include "src/compiler/all-nodes.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/compiler/type-cache.h"
-#include "src/frame-constants.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-#ifdef DEBUG
-#define TRACE(...) \
- do { \
- if (FLAG_trace_turbo_escape) PrintF(__VA_ARGS__); \
- } while (false)
-#else
-#define TRACE(...)
-#endif // DEBUG
-
-NewEscapeAnalysisReducer::NewEscapeAnalysisReducer(
- Editor* editor, JSGraph* jsgraph, EscapeAnalysisResult analysis_result,
- Zone* zone)
- : AdvancedReducer(editor),
- jsgraph_(jsgraph),
- analysis_result_(analysis_result),
- object_id_cache_(zone),
- node_cache_(jsgraph->graph(), zone),
- arguments_elements_(zone),
- zone_(zone) {}
-
-Node* NewEscapeAnalysisReducer::MaybeGuard(Node* original, Node* replacement) {
- // We might need to guard the replacement if the type of the {replacement}
- // node is not in a sub-type relation to the type of the the {original} node.
- Type* const replacement_type = NodeProperties::GetType(replacement);
- Type* const original_type = NodeProperties::GetType(original);
- if (!replacement_type->Is(original_type)) {
- Node* const control = NodeProperties::GetControlInput(original);
- replacement = jsgraph()->graph()->NewNode(
- jsgraph()->common()->TypeGuard(original_type), replacement, control);
- NodeProperties::SetType(replacement, original_type);
- }
- return replacement;
-}
-
-namespace {
-
-Node* SkipTypeGuards(Node* node) {
- while (node->opcode() == IrOpcode::kTypeGuard) {
- node = NodeProperties::GetValueInput(node, 0);
- }
- return node;
-}
-
-} // namespace
-
-Node* NewEscapeAnalysisReducer::ObjectIdNode(const VirtualObject* vobject) {
- VirtualObject::Id id = vobject->id();
- if (id >= object_id_cache_.size()) object_id_cache_.resize(id + 1);
- if (!object_id_cache_[id]) {
- Node* node = jsgraph()->graph()->NewNode(jsgraph()->common()->ObjectId(id));
- NodeProperties::SetType(node, Type::Object());
- object_id_cache_[id] = node;
- }
- return object_id_cache_[id];
-}
-
-Reduction NewEscapeAnalysisReducer::Reduce(Node* node) {
- if (Node* replacement = analysis_result().GetReplacementOf(node)) {
- DCHECK(node->opcode() != IrOpcode::kAllocate &&
- node->opcode() != IrOpcode::kFinishRegion);
- DCHECK_NE(replacement, node);
- if (replacement != jsgraph()->Dead()) {
- replacement = MaybeGuard(node, replacement);
- }
- RelaxEffectsAndControls(node);
- return Replace(replacement);
- }
-
- switch (node->opcode()) {
- case IrOpcode::kAllocate: {
- const VirtualObject* vobject = analysis_result().GetVirtualObject(node);
- if (vobject && !vobject->HasEscaped()) {
- RelaxEffectsAndControls(node);
- }
- return NoChange();
- }
- case IrOpcode::kFinishRegion: {
- Node* effect = NodeProperties::GetEffectInput(node, 0);
- if (effect->opcode() == IrOpcode::kBeginRegion) {
- RelaxEffectsAndControls(effect);
- RelaxEffectsAndControls(node);
- }
- return NoChange();
- }
- case IrOpcode::kNewUnmappedArgumentsElements:
- arguments_elements_.insert(node);
- return NoChange();
- default: {
- // TODO(sigurds): Change this to GetFrameStateInputCount once
- // it is working. For now we use EffectInputCount > 0 to determine
- // whether a node might have a frame state input.
- if (node->op()->EffectInputCount() > 0) {
- ReduceFrameStateInputs(node);
- }
- return NoChange();
- }
- }
-}
-
-// While doing DFS on the FrameState tree, we have to recognize duplicate
-// occurrences of virtual objects.
-class Deduplicator {
- public:
- explicit Deduplicator(Zone* zone) : is_duplicate_(zone) {}
- bool SeenBefore(const VirtualObject* vobject) {
- VirtualObject::Id id = vobject->id();
- if (id >= is_duplicate_.size()) {
- is_duplicate_.resize(id + 1);
- }
- bool is_duplicate = is_duplicate_[id];
- is_duplicate_[id] = true;
- return is_duplicate;
- }
-
- private:
- ZoneVector<bool> is_duplicate_;
-};
-
-void NewEscapeAnalysisReducer::ReduceFrameStateInputs(Node* node) {
- DCHECK_GE(node->op()->EffectInputCount(), 1);
- for (int i = 0; i < node->InputCount(); ++i) {
- Node* input = node->InputAt(i);
- if (input->opcode() == IrOpcode::kFrameState) {
- Deduplicator deduplicator(zone());
- if (Node* ret = ReduceDeoptState(input, node, &deduplicator)) {
- node->ReplaceInput(i, ret);
- }
- }
- }
-}
-
-Node* NewEscapeAnalysisReducer::ReduceDeoptState(Node* node, Node* effect,
- Deduplicator* deduplicator) {
- if (node->opcode() == IrOpcode::kFrameState) {
- NodeHashCache::Constructor new_node(&node_cache_, node);
- // This input order is important to match the DFS traversal used in the
- // instruction selector. Otherwise, the instruction selector might find a
- // duplicate node before the original one.
- for (int input_id : {kFrameStateOuterStateInput, kFrameStateFunctionInput,
- kFrameStateParametersInput, kFrameStateContextInput,
- kFrameStateLocalsInput, kFrameStateStackInput}) {
- Node* input = node->InputAt(input_id);
- new_node.ReplaceInput(ReduceDeoptState(input, effect, deduplicator),
- input_id);
- }
- return new_node.Get();
- } else if (node->opcode() == IrOpcode::kStateValues) {
- NodeHashCache::Constructor new_node(&node_cache_, node);
- for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
- Node* input = NodeProperties::GetValueInput(node, i);
- new_node.ReplaceValueInput(ReduceDeoptState(input, effect, deduplicator),
- i);
- }
- return new_node.Get();
- } else if (const VirtualObject* vobject =
- analysis_result().GetVirtualObject(SkipTypeGuards(node))) {
- if (vobject->HasEscaped()) return node;
- if (deduplicator->SeenBefore(vobject)) {
- return ObjectIdNode(vobject);
- } else {
- std::vector<Node*> inputs;
- for (int offset = 0; offset < vobject->size(); offset += kPointerSize) {
- Node* field =
- analysis_result().GetVirtualObjectField(vobject, offset, effect);
- CHECK_NOT_NULL(field);
- if (field != jsgraph()->Dead()) {
- inputs.push_back(ReduceDeoptState(field, effect, deduplicator));
- }
- }
- int num_inputs = static_cast<int>(inputs.size());
- NodeHashCache::Constructor new_node(
- &node_cache_,
- jsgraph()->common()->ObjectState(vobject->id(), num_inputs),
- num_inputs, &inputs.front(), NodeProperties::GetType(node));
- return new_node.Get();
- }
- } else {
- return node;
- }
-}
-
-void NewEscapeAnalysisReducer::VerifyReplacement() const {
- AllNodes all(zone(), jsgraph()->graph());
- for (Node* node : all.reachable) {
- if (node->opcode() == IrOpcode::kAllocate) {
- if (const VirtualObject* vobject =
- analysis_result().GetVirtualObject(node)) {
- if (!vobject->HasEscaped()) {
- V8_Fatal(__FILE__, __LINE__,
- "Escape analysis failed to remove node %s#%d\n",
- node->op()->mnemonic(), node->id());
- }
- }
- }
- }
-}
-
-void NewEscapeAnalysisReducer::Finalize() {
- for (Node* node : arguments_elements_) {
- DCHECK(node->opcode() == IrOpcode::kNewUnmappedArgumentsElements);
-
- Node* arguments_frame = NodeProperties::GetValueInput(node, 0);
- if (arguments_frame->opcode() != IrOpcode::kArgumentsFrame) continue;
- Node* arguments_length = NodeProperties::GetValueInput(node, 1);
- if (arguments_length->opcode() != IrOpcode::kArgumentsLength) continue;
-
- Node* arguments_length_state = nullptr;
- for (Edge edge : arguments_length->use_edges()) {
- Node* use = edge.from();
- switch (use->opcode()) {
- case IrOpcode::kObjectState:
- case IrOpcode::kTypedObjectState:
- case IrOpcode::kStateValues:
- case IrOpcode::kTypedStateValues:
- if (!arguments_length_state) {
- arguments_length_state = jsgraph()->graph()->NewNode(
- jsgraph()->common()->ArgumentsLengthState(
- IsRestLengthOf(arguments_length->op())));
- NodeProperties::SetType(arguments_length_state,
- Type::OtherInternal());
- }
- edge.UpdateTo(arguments_length_state);
- break;
- default:
- break;
- }
- }
-
- bool escaping_use = false;
- ZoneVector<Node*> loads(zone());
- for (Edge edge : node->use_edges()) {
- Node* use = edge.from();
- if (!NodeProperties::IsValueEdge(edge)) continue;
- if (use->use_edges().empty()) {
- // A node without uses is dead, so we don't have to care about it.
- continue;
- }
- switch (use->opcode()) {
- case IrOpcode::kStateValues:
- case IrOpcode::kTypedStateValues:
- case IrOpcode::kObjectState:
- case IrOpcode::kTypedObjectState:
- break;
- case IrOpcode::kLoadElement:
- loads.push_back(use);
- break;
- case IrOpcode::kLoadField:
- if (FieldAccessOf(use->op()).offset == FixedArray::kLengthOffset) {
- loads.push_back(use);
- } else {
- escaping_use = true;
- }
- break;
- default:
- // If the arguments elements node node is used by an unhandled node,
- // then we cannot remove this allocation.
- escaping_use = true;
- break;
- }
- if (escaping_use) break;
- }
- if (!escaping_use) {
- Node* arguments_elements_state = jsgraph()->graph()->NewNode(
- jsgraph()->common()->ArgumentsElementsState(
- IsRestLengthOf(arguments_length->op())));
- NodeProperties::SetType(arguments_elements_state, Type::OtherInternal());
- ReplaceWithValue(node, arguments_elements_state);
-
- ElementAccess stack_access;
- stack_access.base_is_tagged = BaseTaggedness::kUntaggedBase;
- // Reduce base address by {kPointerSize} such that (length - index)
- // resolves to the right position.
- stack_access.header_size =
- CommonFrameConstants::kFixedFrameSizeAboveFp - kPointerSize;
- stack_access.type = Type::NonInternal();
- stack_access.machine_type = MachineType::AnyTagged();
- stack_access.write_barrier_kind = WriteBarrierKind::kNoWriteBarrier;
- const Operator* load_stack_op =
- jsgraph()->simplified()->LoadElement(stack_access);
-
- for (Node* load : loads) {
- switch (load->opcode()) {
- case IrOpcode::kLoadElement: {
- Node* index = NodeProperties::GetValueInput(load, 1);
- // {offset} is a reverted index starting from 1. The base address is
- // adapted to allow offsets starting from 1.
- Node* offset = jsgraph()->graph()->NewNode(
- jsgraph()->simplified()->NumberSubtract(), arguments_length,
- index);
- NodeProperties::SetType(offset,
- TypeCache::Get().kArgumentsLengthType);
- NodeProperties::ReplaceValueInput(load, arguments_frame, 0);
- NodeProperties::ReplaceValueInput(load, offset, 1);
- NodeProperties::ChangeOp(load, load_stack_op);
- break;
- }
- case IrOpcode::kLoadField: {
- DCHECK_EQ(FieldAccessOf(load->op()).offset,
- FixedArray::kLengthOffset);
- Node* length = NodeProperties::GetValueInput(node, 1);
- ReplaceWithValue(load, length);
- break;
- }
- default:
- UNREACHABLE();
- }
- }
- }
- }
-}
-
-Node* NodeHashCache::Query(Node* node) {
- auto it = cache_.find(node);
- if (it != cache_.end()) {
- return *it;
- } else {
- return nullptr;
- }
-}
-
-NodeHashCache::Constructor::Constructor(NodeHashCache* cache,
- const Operator* op, int input_count,
- Node** inputs, Type* type)
- : node_cache_(cache), from_(nullptr) {
- if (node_cache_->temp_nodes_.size() > 0) {
- tmp_ = node_cache_->temp_nodes_.back();
- node_cache_->temp_nodes_.pop_back();
- int tmp_input_count = tmp_->InputCount();
- if (input_count <= tmp_input_count) {
- tmp_->TrimInputCount(input_count);
- }
- for (int i = 0; i < input_count; ++i) {
- if (i < tmp_input_count) {
- tmp_->ReplaceInput(i, inputs[i]);
- } else {
- tmp_->AppendInput(node_cache_->graph_->zone(), inputs[i]);
- }
- }
- NodeProperties::ChangeOp(tmp_, op);
- } else {
- tmp_ = node_cache_->graph_->NewNode(op, input_count, inputs);
- }
- NodeProperties::SetType(tmp_, type);
-}
-
-Node* NodeHashCache::Constructor::Get() {
- DCHECK(tmp_ || from_);
- Node* node;
- if (!tmp_) {
- node = node_cache_->Query(from_);
- if (!node) node = from_;
- } else {
- node = node_cache_->Query(tmp_);
- if (node) {
- node_cache_->temp_nodes_.push_back(tmp_);
- } else {
- node = tmp_;
- node_cache_->Insert(node);
- }
- }
- tmp_ = from_ = nullptr;
- return node;
-}
-
-Node* NodeHashCache::Constructor::MutableNode() {
- DCHECK(tmp_ || from_);
- if (!tmp_) {
- if (node_cache_->temp_nodes_.empty()) {
- tmp_ = node_cache_->graph_->CloneNode(from_);
- } else {
- tmp_ = node_cache_->temp_nodes_.back();
- node_cache_->temp_nodes_.pop_back();
- int from_input_count = from_->InputCount();
- int tmp_input_count = tmp_->InputCount();
- if (from_input_count <= tmp_input_count) {
- tmp_->TrimInputCount(from_input_count);
- }
- for (int i = 0; i < from_input_count; ++i) {
- if (i < tmp_input_count) {
- tmp_->ReplaceInput(i, from_->InputAt(i));
- } else {
- tmp_->AppendInput(node_cache_->graph_->zone(), from_->InputAt(i));
- }
- }
- NodeProperties::SetType(tmp_, NodeProperties::GetType(from_));
- NodeProperties::ChangeOp(tmp_, from_->op());
- }
- }
- return tmp_;
-}
-
-#undef TRACE
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/new-escape-analysis-reducer.h b/deps/v8/src/compiler/new-escape-analysis-reducer.h
deleted file mode 100644
index e5be1a06d9..0000000000
--- a/deps/v8/src/compiler/new-escape-analysis-reducer.h
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_NEW_ESCAPE_ANALYSIS_REDUCER_H_
-#define V8_COMPILER_NEW_ESCAPE_ANALYSIS_REDUCER_H_
-
-#include "src/base/compiler-specific.h"
-#include "src/compiler/graph-reducer.h"
-#include "src/compiler/new-escape-analysis.h"
-#include "src/globals.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class Deduplicator;
-class JSGraph;
-
-// Perform hash-consing when creating or mutating nodes. Used to avoid duplicate
-// nodes when creating ObjectState, StateValues and FrameState nodes
-class NodeHashCache {
- public:
- NodeHashCache(Graph* graph, Zone* zone)
- : graph_(graph), cache_(zone), temp_nodes_(zone) {}
-
- // Handle to a conceptually new mutable node. Tries to re-use existing nodes
- // and to recycle memory if possible.
- class Constructor {
- public:
- // Construct a new node as a clone of [from].
- Constructor(NodeHashCache* cache, Node* from)
- : node_cache_(cache), from_(from), tmp_(nullptr) {}
- // Construct a new node from scratch.
- Constructor(NodeHashCache* cache, const Operator* op, int input_count,
- Node** inputs, Type* type);
-
- // Modify the new node.
- void ReplaceValueInput(Node* input, int i) {
- if (!tmp_ && input == NodeProperties::GetValueInput(from_, i)) return;
- Node* node = MutableNode();
- NodeProperties::ReplaceValueInput(node, input, i);
- }
- void ReplaceInput(Node* input, int i) {
- if (!tmp_ && input == from_->InputAt(i)) return;
- Node* node = MutableNode();
- node->ReplaceInput(i, input);
- }
-
- // Obtain the mutated node or a cached copy. Invalidates the [Constructor].
- Node* Get();
-
- private:
- Node* MutableNode();
-
- NodeHashCache* node_cache_;
- // Original node, copied on write.
- Node* from_;
- // Temporary node used for mutations, can be recycled if cache is hit.
- Node* tmp_;
- };
-
- private:
- Node* Query(Node* node);
- void Insert(Node* node) { cache_.insert(node); }
-
- Graph* graph_;
- struct NodeEquals {
- bool operator()(Node* a, Node* b) const {
- return NodeProperties::Equals(a, b);
- }
- };
- struct NodeHashCode {
- size_t operator()(Node* n) const { return NodeProperties::HashCode(n); }
- };
- ZoneUnorderedSet<Node*, NodeHashCode, NodeEquals> cache_;
- // Unused nodes whose memory can be recycled.
- ZoneVector<Node*> temp_nodes_;
-};
-
-// Modify the graph according to the information computed in the previous phase.
-class V8_EXPORT_PRIVATE NewEscapeAnalysisReducer final
- : public NON_EXPORTED_BASE(AdvancedReducer) {
- public:
- NewEscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
- EscapeAnalysisResult analysis_result, Zone* zone);
-
- Reduction Reduce(Node* node) override;
- const char* reducer_name() const override {
- return "NewEscapeAnalysisReducer";
- }
- void Finalize() override;
-
- // Verifies that all virtual allocation nodes have been dealt with. Run it
- // after this reducer has been applied.
- void VerifyReplacement() const;
-
- private:
- void ReduceFrameStateInputs(Node* node);
- Node* ReduceDeoptState(Node* node, Node* effect, Deduplicator* deduplicator);
- Node* ObjectIdNode(const VirtualObject* vobject);
- Node* MaybeGuard(Node* original, Node* replacement);
-
- JSGraph* jsgraph() const { return jsgraph_; }
- EscapeAnalysisResult analysis_result() const { return analysis_result_; }
- Zone* zone() const { return zone_; }
-
- JSGraph* const jsgraph_;
- EscapeAnalysisResult analysis_result_;
- ZoneVector<Node*> object_id_cache_;
- NodeHashCache node_cache_;
- ZoneSet<Node*> arguments_elements_;
- Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(NewEscapeAnalysisReducer);
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_NEW_ESCAPE_ANALYSIS_REDUCER_H_
diff --git a/deps/v8/src/compiler/new-escape-analysis.cc b/deps/v8/src/compiler/new-escape-analysis.cc
deleted file mode 100644
index 591541b24d..0000000000
--- a/deps/v8/src/compiler/new-escape-analysis.cc
+++ /dev/null
@@ -1,739 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/new-escape-analysis.h"
-
-#include "src/bootstrapper.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/node-matchers.h"
-#include "src/compiler/operator-properties.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/objects-inl.h"
-
-#ifdef DEBUG
-#define TRACE(...) \
- do { \
- if (FLAG_trace_turbo_escape) PrintF(__VA_ARGS__); \
- } while (false)
-#else
-#define TRACE(...)
-#endif
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-template <class T>
-class Sidetable {
- public:
- explicit Sidetable(Zone* zone) : map_(zone) {}
- T& operator[](const Node* node) {
- NodeId id = node->id();
- if (id >= map_.size()) {
- map_.resize(id + 1);
- }
- return map_[id];
- }
-
- private:
- ZoneVector<T> map_;
-};
-
-template <class T>
-class SparseSidetable {
- public:
- explicit SparseSidetable(Zone* zone, T def_value = T())
- : def_value_(std::move(def_value)), map_(zone) {}
- void Set(const Node* node, T value) {
- auto iter = map_.find(node->id());
- if (iter != map_.end()) {
- iter->second = std::move(value);
- } else if (value != def_value_) {
- map_.insert(iter, std::make_pair(node->id(), std::move(value)));
- }
- }
- const T& Get(const Node* node) const {
- auto iter = map_.find(node->id());
- return iter != map_.end() ? iter->second : def_value_;
- }
-
- private:
- T def_value_;
- ZoneUnorderedMap<NodeId, T> map_;
-};
-
-// Keeps track of the changes to the current node during reduction.
-// Encapsulates the current state of the IR graph and the reducer state like
-// side-tables. All access to the IR and the reducer state should happen through
-// a ReduceScope to ensure that changes and dependencies are tracked and all
-// necessary node revisitations happen.
-class ReduceScope {
- public:
- typedef EffectGraphReducer::Reduction Reduction;
- explicit ReduceScope(Node* node, Reduction* reduction)
- : current_node_(node), reduction_(reduction) {}
-
- protected:
- Node* current_node() const { return current_node_; }
- Reduction* reduction() { return reduction_; }
-
- private:
- Node* current_node_;
- Reduction* reduction_;
-};
-
-// A VariableTracker object keeps track of the values of variables at all points
-// of the effect chain and introduces new phi nodes when necessary.
-// Initially and by default, variables are mapped to nullptr, which means that
-// the variable allocation point does not dominate the current point on the
-// effect chain. We map variables that represent uninitialized memory to the
-// Dead node to ensure it is not read.
-// Unmapped values are impossible by construction, it is indistinguishable if a
-// PersistentMap does not contain an element or maps it to the default element.
-class VariableTracker {
- private:
- // The state of all variables at one point in the effect chain.
- class State {
- typedef PersistentMap<Variable, Node*> Map;
-
- public:
- explicit State(Zone* zone) : map_(zone) {}
- Node* Get(Variable var) const {
- CHECK(var != Variable::Invalid());
- return map_.Get(var);
- }
- void Set(Variable var, Node* node) {
- CHECK(var != Variable::Invalid());
- return map_.Set(var, node);
- }
- Map::iterator begin() const { return map_.begin(); }
- Map::iterator end() const { return map_.end(); }
- bool operator!=(const State& other) const { return map_ != other.map_; }
-
- private:
- Map map_;
- };
-
- public:
- VariableTracker(JSGraph* graph, EffectGraphReducer* reducer, Zone* zone);
- Variable NewVariable() { return Variable(next_variable_++); }
- Node* Get(Variable var, Node* effect) { return table_.Get(effect).Get(var); }
- Zone* zone() { return zone_; }
-
- class Scope : public ReduceScope {
- public:
- Scope(VariableTracker* tracker, Node* node, Reduction* reduction);
- ~Scope();
- Node* Get(Variable var) { return current_state_.Get(var); }
- void Set(Variable var, Node* node) { current_state_.Set(var, node); }
-
- private:
- VariableTracker* states_;
- State current_state_;
- };
-
- private:
- State MergeInputs(Node* effect_phi);
- Zone* zone_;
- JSGraph* graph_;
- SparseSidetable<State> table_;
- ZoneVector<Node*> buffer_;
- EffectGraphReducer* reducer_;
- int next_variable_ = 0;
-
- DISALLOW_COPY_AND_ASSIGN(VariableTracker);
-};
-
-// Encapsulates the current state of the escape analysis reducer to preserve
-// invariants regarding changes and re-visitation.
-class EscapeAnalysisTracker : public ZoneObject {
- public:
- EscapeAnalysisTracker(JSGraph* jsgraph, EffectGraphReducer* reducer,
- Zone* zone)
- : virtual_objects_(zone),
- replacements_(zone),
- variable_states_(jsgraph, reducer, zone),
- jsgraph_(jsgraph),
- zone_(zone) {}
-
- class Scope : public VariableTracker::Scope {
- public:
- Scope(EffectGraphReducer* reducer, EscapeAnalysisTracker* tracker,
- Node* node, Reduction* reduction)
- : VariableTracker::Scope(&tracker->variable_states_, node, reduction),
- tracker_(tracker),
- reducer_(reducer) {}
- const VirtualObject* GetVirtualObject(Node* node) {
- VirtualObject* vobject = tracker_->virtual_objects_.Get(node);
- if (vobject) vobject->AddDependency(current_node());
- return vobject;
- }
- // Create or retrieve a virtual object for the current node.
- const VirtualObject* InitVirtualObject(int size) {
- DCHECK(current_node()->opcode() == IrOpcode::kAllocate);
- VirtualObject* vobject = tracker_->virtual_objects_.Get(current_node());
- if (vobject) {
- CHECK(vobject->size() == size);
- } else {
- vobject = tracker_->NewVirtualObject(size);
- }
- if (vobject) vobject->AddDependency(current_node());
- vobject_ = vobject;
- return vobject;
- }
-
- void SetVirtualObject(Node* object) {
- vobject_ = tracker_->virtual_objects_.Get(object);
- }
-
- void SetEscaped(Node* node) {
- if (VirtualObject* object = tracker_->virtual_objects_.Get(node)) {
- if (object->HasEscaped()) return;
- TRACE("Setting %s#%d to escaped because of use by %s#%d\n",
- node->op()->mnemonic(), node->id(),
- current_node()->op()->mnemonic(), current_node()->id());
- object->SetEscaped();
- object->RevisitDependants(reducer_);
- }
- }
- // The inputs of the current node have to be accessed through the scope to
- // ensure that they respect the node replacements.
- Node* ValueInput(int i) {
- return tracker_->ResolveReplacement(
- NodeProperties::GetValueInput(current_node(), i));
- }
- Node* ContextInput() {
- return tracker_->ResolveReplacement(
- NodeProperties::GetContextInput(current_node()));
- }
-
- void SetReplacement(Node* replacement) {
- replacement_ = replacement;
- vobject_ =
- replacement ? tracker_->virtual_objects_.Get(replacement) : nullptr;
- TRACE("Set %s#%d as replacement.\n", replacement->op()->mnemonic(),
- replacement->id());
- }
-
- void MarkForDeletion() { SetReplacement(tracker_->jsgraph_->Dead()); }
-
- ~Scope() {
- if (replacement_ != tracker_->replacements_[current_node()] ||
- vobject_ != tracker_->virtual_objects_.Get(current_node())) {
- reduction()->set_value_changed();
- }
- tracker_->replacements_[current_node()] = replacement_;
- tracker_->virtual_objects_.Set(current_node(), vobject_);
- }
-
- private:
- EscapeAnalysisTracker* tracker_;
- EffectGraphReducer* reducer_;
- VirtualObject* vobject_ = nullptr;
- Node* replacement_ = nullptr;
- };
-
- Node* GetReplacementOf(Node* node) { return replacements_[node]; }
- Node* ResolveReplacement(Node* node) {
- if (Node* replacement = GetReplacementOf(node)) {
- // Replacements cannot have replacements. This is important to ensure
- // re-visitation: If a replacement is replaced, then all nodes accessing
- // the replacement have to be updated.
- DCHECK_NULL(GetReplacementOf(replacement));
- return replacement;
- }
- return node;
- }
-
- private:
- friend class EscapeAnalysisResult;
- static const size_t kMaxTrackedObjects = 100;
-
- VirtualObject* NewVirtualObject(int size) {
- if (next_object_id_ >= kMaxTrackedObjects) return nullptr;
- return new (zone_)
- VirtualObject(&variable_states_, next_object_id_++, size);
- }
-
- SparseSidetable<VirtualObject*> virtual_objects_;
- Sidetable<Node*> replacements_;
- VariableTracker variable_states_;
- VirtualObject::Id next_object_id_ = 0;
- JSGraph* const jsgraph_;
- Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(EscapeAnalysisTracker);
-};
-
-EffectGraphReducer::EffectGraphReducer(
- Graph* graph, std::function<void(Node*, Reduction*)> reduce, Zone* zone)
- : graph_(graph),
- state_(graph, kNumStates),
- revisit_(zone),
- stack_(zone),
- reduce_(reduce) {}
-
-void EffectGraphReducer::ReduceFrom(Node* node) {
- // Perform DFS and eagerly trigger revisitation as soon as possible.
- // A stack element {node, i} indicates that input i of node should be visited
- // next.
- DCHECK(stack_.empty());
- stack_.push({node, 0});
- while (!stack_.empty()) {
- Node* current = stack_.top().node;
- int& input_index = stack_.top().input_index;
- if (input_index < current->InputCount()) {
- Node* input = current->InputAt(input_index);
- input_index++;
- switch (state_.Get(input)) {
- case State::kVisited:
- // The input is already reduced.
- break;
- case State::kOnStack:
- // The input is on the DFS stack right now, so it will be revisited
- // later anyway.
- break;
- case State::kUnvisited:
- case State::kRevisit: {
- state_.Set(input, State::kOnStack);
- stack_.push({input, 0});
- break;
- }
- }
- } else {
- stack_.pop();
- Reduction reduction;
- reduce_(current, &reduction);
- for (Edge edge : current->use_edges()) {
- // Mark uses for revisitation.
- Node* use = edge.from();
- if (NodeProperties::IsEffectEdge(edge)) {
- if (reduction.effect_changed()) Revisit(use);
- } else {
- if (reduction.value_changed()) Revisit(use);
- }
- }
- state_.Set(current, State::kVisited);
- // Process the revisitation buffer immediately. This improves performance
- // of escape analysis. Using a stack for {revisit_} reverses the order in
- // which the revisitation happens. This also seems to improve performance.
- while (!revisit_.empty()) {
- Node* revisit = revisit_.top();
- if (state_.Get(revisit) == State::kRevisit) {
- state_.Set(revisit, State::kOnStack);
- stack_.push({revisit, 0});
- }
- revisit_.pop();
- }
- }
- }
-}
-
-void EffectGraphReducer::Revisit(Node* node) {
- if (state_.Get(node) == State::kVisited) {
- TRACE(" Queueing for revisit: %s#%d\n", node->op()->mnemonic(),
- node->id());
- state_.Set(node, State::kRevisit);
- revisit_.push(node);
- }
-}
-
-VariableTracker::VariableTracker(JSGraph* graph, EffectGraphReducer* reducer,
- Zone* zone)
- : zone_(zone),
- graph_(graph),
- table_(zone, State(zone)),
- buffer_(zone),
- reducer_(reducer) {}
-
-VariableTracker::Scope::Scope(VariableTracker* states, Node* node,
- Reduction* reduction)
- : ReduceScope(node, reduction),
- states_(states),
- current_state_(states->zone_) {
- switch (node->opcode()) {
- case IrOpcode::kEffectPhi:
- current_state_ = states_->MergeInputs(node);
- break;
- default:
- int effect_inputs = node->op()->EffectInputCount();
- if (effect_inputs == 1) {
- current_state_ =
- states_->table_.Get(NodeProperties::GetEffectInput(node, 0));
- } else {
- DCHECK_EQ(0, effect_inputs);
- }
- }
-}
-
-VariableTracker::Scope::~Scope() {
- if (!reduction()->effect_changed() &&
- states_->table_.Get(current_node()) != current_state_) {
- reduction()->set_effect_changed();
- }
- states_->table_.Set(current_node(), current_state_);
-}
-
-VariableTracker::State VariableTracker::MergeInputs(Node* effect_phi) {
- // A variable that is mapped to [nullptr] was not assigned a value on every
- // execution path to the current effect phi. Relying on the invariant that
- // every variable is initialized (at least with a sentinel like the Dead
- // node), this means that the variable initialization does not dominate the
- // current point. So for loop effect phis, we can keep nullptr for a variable
- // as long as the first input of the loop has nullptr for this variable. For
- // non-loop effect phis, we can even keep it nullptr as long as any input has
- // nullptr.
- DCHECK(effect_phi->opcode() == IrOpcode::kEffectPhi);
- int arity = effect_phi->op()->EffectInputCount();
- Node* control = NodeProperties::GetControlInput(effect_phi, 0);
- TRACE("control: %s#%d\n", control->op()->mnemonic(), control->id());
- bool is_loop = control->opcode() == IrOpcode::kLoop;
- buffer_.reserve(arity + 1);
-
- State first_input = table_.Get(NodeProperties::GetEffectInput(effect_phi, 0));
- State result = first_input;
- for (std::pair<Variable, Node*> var_value : first_input) {
- if (Node* value = var_value.second) {
- Variable var = var_value.first;
- TRACE("var %i:\n", var.id_);
- buffer_.clear();
- buffer_.push_back(value);
- bool identical_inputs = true;
- int num_defined_inputs = 1;
- TRACE(" input 0: %s#%d\n", value->op()->mnemonic(), value->id());
- for (int i = 1; i < arity; ++i) {
- Node* next_value =
- table_.Get(NodeProperties::GetEffectInput(effect_phi, i)).Get(var);
- if (next_value != value) identical_inputs = false;
- if (next_value != nullptr) {
- num_defined_inputs++;
- TRACE(" input %i: %s#%d\n", i, next_value->op()->mnemonic(),
- next_value->id());
- } else {
- TRACE(" input %i: nullptr\n", i);
- }
- buffer_.push_back(next_value);
- }
-
- Node* old_value = table_.Get(effect_phi).Get(var);
- if (old_value) {
- TRACE(" old: %s#%d\n", old_value->op()->mnemonic(), old_value->id());
- } else {
- TRACE(" old: nullptr\n");
- }
- // Reuse a previously created phi node if possible.
- if (old_value && old_value->opcode() == IrOpcode::kPhi &&
- NodeProperties::GetControlInput(old_value, 0) == control) {
- // Since a phi node can never dominate its control node,
- // [old_value] cannot originate from the inputs. Thus [old_value]
- // must have been created by a previous reduction of this [effect_phi].
- for (int i = 0; i < arity; ++i) {
- NodeProperties::ReplaceValueInput(
- old_value, buffer_[i] ? buffer_[i] : graph_->Dead(), i);
- // This change cannot affect the rest of the reducer, so there is no
- // need to trigger additional revisitations.
- }
- result.Set(var, old_value);
- } else {
- if (num_defined_inputs == 1 && is_loop) {
- // For loop effect phis, the variable initialization dominates iff it
- // dominates the first input.
- DCHECK_EQ(2, arity);
- DCHECK_EQ(value, buffer_[0]);
- result.Set(var, value);
- } else if (num_defined_inputs < arity) {
- // If the variable is undefined on some input of this non-loop effect
- // phi, then its initialization does not dominate this point.
- result.Set(var, nullptr);
- } else {
- DCHECK_EQ(num_defined_inputs, arity);
- // We only create a phi if the values are different.
- if (identical_inputs) {
- result.Set(var, value);
- } else {
- TRACE("Creating new phi\n");
- buffer_.push_back(control);
- Node* phi = graph_->graph()->NewNode(
- graph_->common()->Phi(MachineRepresentation::kTagged, arity),
- arity + 1, &buffer_.front());
- // TODO(tebbi): Computing precise types here is tricky, because of
- // the necessary revisitations. If we really need this, we should
- // probably do it afterwards.
- NodeProperties::SetType(phi, Type::Any());
- reducer_->AddRoot(phi);
- result.Set(var, phi);
- }
- }
- }
-#ifdef DEBUG
- if (Node* result_node = result.Get(var)) {
- TRACE(" result: %s#%d\n", result_node->op()->mnemonic(),
- result_node->id());
- } else {
- TRACE(" result: nullptr\n");
- }
-#endif
- }
- }
- return result;
-}
-
-namespace {
-
-int OffsetOfFieldAccess(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kLoadField ||
- op->opcode() == IrOpcode::kStoreField);
- FieldAccess access = FieldAccessOf(op);
- return access.offset;
-}
-
-Maybe<int> OffsetOfElementsAccess(const Operator* op, Node* index_node) {
- DCHECK(op->opcode() == IrOpcode::kLoadElement ||
- op->opcode() == IrOpcode::kStoreElement);
- Type* index_type = NodeProperties::GetType(index_node);
- if (!index_type->Is(Type::Number())) return Nothing<int>();
- double max = index_type->Max();
- double min = index_type->Min();
- int index = static_cast<int>(min);
- if (!(index == min && index == max)) return Nothing<int>();
- ElementAccess access = ElementAccessOf(op);
- DCHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
- kPointerSizeLog2);
- return Just(access.header_size + (index << ElementSizeLog2Of(
- access.machine_type.representation())));
-}
-
-void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
- JSGraph* jsgraph) {
- switch (op->opcode()) {
- case IrOpcode::kAllocate: {
- NumberMatcher size(current->ValueInput(0));
- if (!size.HasValue()) break;
- int size_int = static_cast<int>(size.Value());
- if (size_int != size.Value()) break;
- if (const VirtualObject* vobject = current->InitVirtualObject(size_int)) {
- // Initialize with dead nodes as a sentinel for uninitialized memory.
- for (Variable field : *vobject) {
- current->Set(field, jsgraph->Dead());
- }
- }
- break;
- }
- case IrOpcode::kFinishRegion:
- current->SetVirtualObject(current->ValueInput(0));
- break;
- case IrOpcode::kStoreField: {
- Node* object = current->ValueInput(0);
- Node* value = current->ValueInput(1);
- const VirtualObject* vobject = current->GetVirtualObject(object);
- Variable var;
- if (vobject && !vobject->HasEscaped() &&
- vobject->FieldAt(OffsetOfFieldAccess(op)).To(&var)) {
- current->Set(var, value);
- current->MarkForDeletion();
- } else {
- current->SetEscaped(object);
- current->SetEscaped(value);
- }
- break;
- }
- case IrOpcode::kStoreElement: {
- Node* object = current->ValueInput(0);
- Node* index = current->ValueInput(1);
- Node* value = current->ValueInput(2);
- const VirtualObject* vobject = current->GetVirtualObject(object);
- int offset;
- Variable var;
- if (vobject && !vobject->HasEscaped() &&
- OffsetOfElementsAccess(op, index).To(&offset) &&
- vobject->FieldAt(offset).To(&var)) {
- current->Set(var, value);
- current->MarkForDeletion();
- } else {
- current->SetEscaped(value);
- current->SetEscaped(object);
- }
- break;
- }
- case IrOpcode::kLoadField: {
- Node* object = current->ValueInput(0);
- const VirtualObject* vobject = current->GetVirtualObject(object);
- Variable var;
- if (vobject && !vobject->HasEscaped() &&
- vobject->FieldAt(OffsetOfFieldAccess(op)).To(&var)) {
- current->SetReplacement(current->Get(var));
- } else {
- // TODO(tebbi): At the moment, we mark objects as escaping if there
- // is a load from an invalid location to avoid dead nodes. This is a
- // workaround that should be removed once we can handle dead nodes
- // everywhere.
- current->SetEscaped(object);
- }
- break;
- }
- case IrOpcode::kLoadElement: {
- Node* object = current->ValueInput(0);
- Node* index = current->ValueInput(1);
- const VirtualObject* vobject = current->GetVirtualObject(object);
- int offset;
- Variable var;
- if (vobject && !vobject->HasEscaped() &&
- OffsetOfElementsAccess(op, index).To(&offset) &&
- vobject->FieldAt(offset).To(&var)) {
- current->SetReplacement(current->Get(var));
- } else {
- current->SetEscaped(object);
- }
- break;
- }
- case IrOpcode::kTypeGuard: {
- // The type-guard is re-introduced in the final reducer if the types
- // don't match.
- current->SetReplacement(current->ValueInput(0));
- break;
- }
- case IrOpcode::kReferenceEqual: {
- Node* left = current->ValueInput(0);
- Node* right = current->ValueInput(1);
- const VirtualObject* left_object = current->GetVirtualObject(left);
- const VirtualObject* right_object = current->GetVirtualObject(right);
- Node* replacement = nullptr;
- if (left_object && !left_object->HasEscaped()) {
- if (right_object && !right_object->HasEscaped() &&
- left_object->id() == right_object->id()) {
- replacement = jsgraph->TrueConstant();
- } else {
- replacement = jsgraph->FalseConstant();
- }
- } else if (right_object && !right_object->HasEscaped()) {
- replacement = jsgraph->FalseConstant();
- }
- if (replacement) {
- // TODO(tebbi) This is a workaround for uninhabited types. If we
- // replaced a value of uninhabited type with a constant, we would
- // widen the type of the node. This could produce inconsistent
- // types (which might confuse representation selection). We get
- // around this by refusing to constant-fold and escape-analyze
- // if the type is not inhabited.
- if (NodeProperties::GetType(left)->IsInhabited() &&
- NodeProperties::GetType(right)->IsInhabited()) {
- current->SetReplacement(replacement);
- } else {
- current->SetEscaped(left);
- current->SetEscaped(right);
- }
- }
- break;
- }
- case IrOpcode::kCheckMaps: {
- CheckMapsParameters params = CheckMapsParametersOf(op);
- Node* checked = current->ValueInput(0);
- const VirtualObject* vobject = current->GetVirtualObject(checked);
- Variable map_field;
- if (vobject && !vobject->HasEscaped() &&
- vobject->FieldAt(HeapObject::kMapOffset).To(&map_field)) {
- Node* map = current->Get(map_field);
- if (map) {
- Type* const map_type = NodeProperties::GetType(map);
- if (map_type->IsHeapConstant() &&
- params.maps().contains(ZoneHandleSet<Map>(bit_cast<Handle<Map>>(
- map_type->AsHeapConstant()->Value())))) {
- current->MarkForDeletion();
- break;
- }
- }
- }
- current->SetEscaped(checked);
- break;
- }
- case IrOpcode::kCheckHeapObject: {
- Node* checked = current->ValueInput(0);
- switch (checked->opcode()) {
- case IrOpcode::kAllocate:
- case IrOpcode::kFinishRegion:
- case IrOpcode::kHeapConstant:
- current->SetReplacement(checked);
- break;
- default:
- current->SetEscaped(checked);
- break;
- }
- break;
- }
- case IrOpcode::kMapGuard: {
- Node* object = current->ValueInput(0);
- const VirtualObject* vobject = current->GetVirtualObject(object);
- if (vobject && !vobject->HasEscaped()) {
- current->MarkForDeletion();
- }
- break;
- }
- case IrOpcode::kStateValues:
- case IrOpcode::kFrameState:
- // These uses are always safe.
- break;
- default: {
- // For unknown nodes, treat all value inputs as escaping.
- int value_input_count = op->ValueInputCount();
- for (int i = 0; i < value_input_count; ++i) {
- Node* input = current->ValueInput(i);
- current->SetEscaped(input);
- }
- if (OperatorProperties::HasContextInput(op)) {
- current->SetEscaped(current->ContextInput());
- }
- break;
- }
- }
-}
-
-} // namespace
-
-void NewEscapeAnalysis::Reduce(Node* node, Reduction* reduction) {
- const Operator* op = node->op();
- TRACE("Reducing %s#%d\n", op->mnemonic(), node->id());
-
- EscapeAnalysisTracker::Scope current(this, tracker_, node, reduction);
- ReduceNode(op, &current, jsgraph());
-}
-
-NewEscapeAnalysis::NewEscapeAnalysis(JSGraph* jsgraph, Zone* zone)
- : EffectGraphReducer(
- jsgraph->graph(),
- [this](Node* node, Reduction* reduction) { Reduce(node, reduction); },
- zone),
- tracker_(new (zone) EscapeAnalysisTracker(jsgraph, this, zone)),
- jsgraph_(jsgraph) {}
-
-Node* EscapeAnalysisResult::GetReplacementOf(Node* node) {
- return tracker_->GetReplacementOf(node);
-}
-
-Node* EscapeAnalysisResult::GetVirtualObjectField(const VirtualObject* vobject,
- int field, Node* effect) {
- return tracker_->variable_states_.Get(vobject->FieldAt(field).FromJust(),
- effect);
-}
-
-const VirtualObject* EscapeAnalysisResult::GetVirtualObject(Node* node) {
- return tracker_->virtual_objects_.Get(node);
-}
-
-VirtualObject::VirtualObject(VariableTracker* var_states, VirtualObject::Id id,
- int size)
- : Dependable(var_states->zone()), id_(id), fields_(var_states->zone()) {
- DCHECK(size % kPointerSize == 0);
- TRACE("Creating VirtualObject id:%d size:%d\n", id, size);
- int num_fields = size / kPointerSize;
- fields_.reserve(num_fields);
- for (int i = 0; i < num_fields; ++i) {
- fields_.push_back(var_states->NewVariable());
- }
-}
-
-#undef TRACE
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/new-escape-analysis.h b/deps/v8/src/compiler/new-escape-analysis.h
deleted file mode 100644
index 316a20793d..0000000000
--- a/deps/v8/src/compiler/new-escape-analysis.h
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_NEW_ESCAPE_ANALYSIS_H_
-#define V8_COMPILER_NEW_ESCAPE_ANALYSIS_H_
-
-#include "src/base/functional.h"
-#include "src/compiler/graph-reducer.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/persistent-map.h"
-#include "src/globals.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class CommonOperatorBuilder;
-class VariableTracker;
-class EscapeAnalysisTracker;
-
-// {EffectGraphReducer} reduces up to a fixed point. It distinguishes changes to
-// the effect output of a node from changes to the value output to reduce the
-// number of revisitations.
-class EffectGraphReducer {
- public:
- class Reduction {
- public:
- bool value_changed() const { return value_changed_; }
- void set_value_changed() { value_changed_ = true; }
- bool effect_changed() const { return effect_changed_; }
- void set_effect_changed() { effect_changed_ = true; }
-
- private:
- bool value_changed_ = false;
- bool effect_changed_ = false;
- };
-
- EffectGraphReducer(Graph* graph,
- std::function<void(Node*, Reduction*)> reduce, Zone* zone);
-
- void ReduceGraph() { ReduceFrom(graph_->end()); }
-
- // Mark node for revisitation.
- void Revisit(Node* node);
-
- // Add a new root node to start reduction from. This is useful if the reducer
- // adds nodes that are not yet reachable, but should already be considered
- // part of the graph.
- void AddRoot(Node* node) {
- DCHECK(state_.Get(node) == State::kUnvisited);
- state_.Set(node, State::kRevisit);
- revisit_.push(node);
- }
-
- bool Complete() { return stack_.empty() && revisit_.empty(); }
-
- private:
- struct NodeState {
- Node* node;
- int input_index;
- };
- void ReduceFrom(Node* node);
- enum class State : uint8_t { kUnvisited = 0, kRevisit, kOnStack, kVisited };
- const uint8_t kNumStates = static_cast<uint8_t>(State::kVisited) + 1;
- Graph* graph_;
- NodeMarker<State> state_;
- ZoneStack<Node*> revisit_;
- ZoneStack<NodeState> stack_;
- std::function<void(Node*, Reduction*)> reduce_;
-};
-
-// A variable is an abstract storage location, which is lowered to SSA values
-// and phi nodes by {VariableTracker}.
-class Variable {
- public:
- Variable() : id_(kInvalid) {}
- bool operator==(Variable other) const { return id_ == other.id_; }
- bool operator!=(Variable other) const { return id_ != other.id_; }
- bool operator<(Variable other) const { return id_ < other.id_; }
- static Variable Invalid() { return Variable(kInvalid); }
- friend V8_INLINE size_t hash_value(Variable v) {
- return base::hash_value(v.id_);
- }
- friend std::ostream& operator<<(std::ostream& os, Variable var) {
- return os << var.id_;
- }
-
- private:
- typedef int Id;
- explicit Variable(Id id) : id_(id) {}
- Id id_;
- static const Id kInvalid = -1;
-
- friend class VariableTracker;
-};
-
-// An object that can track the nodes in the graph whose current reduction
-// depends on the value of the object.
-class Dependable : public ZoneObject {
- public:
- explicit Dependable(Zone* zone) : dependants_(zone) {}
- void AddDependency(Node* node) { dependants_.push_back(node); }
- void RevisitDependants(EffectGraphReducer* reducer) {
- for (Node* node : dependants_) {
- reducer->Revisit(node);
- }
- dependants_.clear();
- }
-
- private:
- ZoneVector<Node*> dependants_;
-};
-
-// A virtual object represents an allocation site and tracks the Variables
-// associated with its fields as well as its global escape status.
-class VirtualObject : public Dependable {
- public:
- typedef uint32_t Id;
- typedef ZoneVector<Variable>::const_iterator const_iterator;
- VirtualObject(VariableTracker* var_states, Id id, int size);
- Maybe<Variable> FieldAt(int offset) const {
- DCHECK(offset % kPointerSize == 0);
- CHECK(!HasEscaped());
- if (offset >= size()) {
- // This can only happen in unreachable code.
- return Nothing<Variable>();
- }
- return Just(fields_.at(offset / kPointerSize));
- }
- Id id() const { return id_; }
- int size() const { return static_cast<int>(kPointerSize * fields_.size()); }
- // Escaped might mean that the object escaped to untracked memory or that it
- // is used in an operation that requires materialization.
- void SetEscaped() { escaped_ = true; }
- bool HasEscaped() const { return escaped_; }
- const_iterator begin() const { return fields_.begin(); }
- const_iterator end() const { return fields_.end(); }
-
- private:
- bool escaped_ = false;
- Id id_;
- ZoneVector<Variable> fields_;
-};
-
-class EscapeAnalysisResult {
- public:
- explicit EscapeAnalysisResult(EscapeAnalysisTracker* tracker)
- : tracker_(tracker) {}
-
- const VirtualObject* GetVirtualObject(Node* node);
- Node* GetVirtualObjectField(const VirtualObject* vobject, int field,
- Node* effect);
- Node* GetReplacementOf(Node* node);
-
- private:
- EscapeAnalysisTracker* tracker_;
-};
-
-class V8_EXPORT_PRIVATE NewEscapeAnalysis final
- : public NON_EXPORTED_BASE(EffectGraphReducer) {
- public:
- NewEscapeAnalysis(JSGraph* jsgraph, Zone* zone);
-
- EscapeAnalysisResult analysis_result() {
- DCHECK(Complete());
- return EscapeAnalysisResult(tracker_);
- }
-
- private:
- void Reduce(Node* node, Reduction* reduction);
- JSGraph* jsgraph() { return jsgraph_; }
- EscapeAnalysisTracker* tracker_;
- JSGraph* jsgraph_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_NEW_ESCAPE_ANALYSIS_H_
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 0eafc4a2a7..d84c27e86d 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -12,7 +12,6 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/verifier.h"
#include "src/handles-inl.h"
-#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -457,6 +456,20 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
}
// static
+bool NodeProperties::NoObservableSideEffectBetween(Node* effect,
+ Node* dominator) {
+ while (effect != dominator) {
+ if (effect->op()->EffectInputCount() == 1 &&
+ effect->op()->properties() & Operator::kNoWrite) {
+ effect = NodeProperties::GetEffectInput(effect);
+ } else {
+ return false;
+ }
+ }
+ return true;
+}
+
+// static
Node* NodeProperties::GetOuterContext(Node* node, size_t* depth) {
Node* context = NodeProperties::GetContextInput(node);
while (*depth > 0 &&
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index a01a229c64..6bc1fe7078 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -150,6 +150,11 @@ class V8_EXPORT_PRIVATE NodeProperties final {
static InferReceiverMapsResult InferReceiverMaps(
Node* receiver, Node* effect, ZoneHandleSet<Map>* maps_return);
+ // Walks up the {effect} chain to check that there's no observable side-effect
+ // between the {effect} and it's {dominator}. Aborts the walk if there's join
+ // in the effect chain.
+ static bool NoObservableSideEffectBetween(Node* effect, Node* dominator);
+
// ---------------------------------------------------------------------------
// Context.
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 16dc2dbab2..ededcc4806 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -343,7 +343,7 @@ Node::Node(NodeId id, const Operator* op, int inline_count, int inline_capacity)
InlineCapacityField::encode(inline_capacity)),
first_use_(nullptr) {
// Inputs must either be out of line or within the inline capacity.
- DCHECK(inline_capacity <= kMaxInlineCapacity);
+ DCHECK_GE(kMaxInlineCapacity, inline_capacity);
DCHECK(inline_count == kOutlineMarker || inline_count <= inline_capacity);
}
@@ -383,29 +383,29 @@ void Node::Verify() {
if (count > 200 && count % 100) return;
for (int i = 0; i < count; i++) {
- CHECK_EQ(i, this->GetUsePtr(i)->input_index());
- CHECK_EQ(this->GetInputPtr(i), this->GetUsePtr(i)->input_ptr());
- CHECK_EQ(count, this->InputCount());
+ DCHECK_EQ(i, this->GetUsePtr(i)->input_index());
+ DCHECK_EQ(this->GetInputPtr(i), this->GetUsePtr(i)->input_ptr());
+ DCHECK_EQ(count, this->InputCount());
}
{ // Direct input iteration.
int index = 0;
for (Node* input : this->inputs()) {
- CHECK_EQ(this->InputAt(index), input);
+ DCHECK_EQ(this->InputAt(index), input);
index++;
}
- CHECK_EQ(count, index);
- CHECK_EQ(this->InputCount(), index);
+ DCHECK_EQ(count, index);
+ DCHECK_EQ(this->InputCount(), index);
}
{ // Input edge iteration.
int index = 0;
for (Edge edge : this->input_edges()) {
- CHECK_EQ(edge.from(), this);
- CHECK_EQ(index, edge.index());
- CHECK_EQ(this->InputAt(index), edge.to());
+ DCHECK_EQ(edge.from(), this);
+ DCHECK_EQ(index, edge.index());
+ DCHECK_EQ(this->InputAt(index), edge.to());
index++;
}
- CHECK_EQ(count, index);
- CHECK_EQ(this->InputCount(), index);
+ DCHECK_EQ(count, index);
+ DCHECK_EQ(this->InputCount(), index);
}
}
#endif
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index f532a37716..8a4685114b 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -52,7 +52,7 @@ class V8_EXPORT_PRIVATE Node final {
const Operator* op() const { return op_; }
IrOpcode::Value opcode() const {
- DCHECK(op_->opcode() <= IrOpcode::kLast);
+ DCHECK_GE(IrOpcode::kLast, op_->opcode());
return static_cast<IrOpcode::Value>(op_->opcode());
}
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index faac7efe20..97f91b8cac 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -175,6 +175,7 @@
V(JSCallWithSpread) \
V(JSCallRuntime) \
V(JSConvertReceiver) \
+ V(JSForInEnumerate) \
V(JSForInNext) \
V(JSForInPrepare) \
V(JSLoadMessage) \
@@ -314,62 +315,69 @@
#define SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(V) V(SpeculativeToNumber)
-#define SIMPLIFIED_OTHER_OP_LIST(V) \
- V(PlainPrimitiveToNumber) \
- V(PlainPrimitiveToWord32) \
- V(PlainPrimitiveToFloat64) \
- V(BooleanNot) \
- V(StringCharAt) \
- V(StringCharCodeAt) \
- V(SeqStringCharCodeAt) \
- V(StringFromCharCode) \
- V(StringFromCodePoint) \
- V(StringIndexOf) \
- V(StringToLowerCaseIntl) \
- V(StringToUpperCaseIntl) \
- V(CheckBounds) \
- V(CheckIf) \
- V(CheckMaps) \
- V(CheckMapValue) \
- V(CheckNumber) \
- V(CheckInternalizedString) \
- V(CheckReceiver) \
- V(CheckString) \
- V(CheckSeqString) \
- V(CheckSymbol) \
- V(CheckSmi) \
- V(CheckHeapObject) \
- V(CheckFloat64Hole) \
- V(CheckNotTaggedHole) \
- V(CompareMaps) \
- V(ConvertTaggedHoleToUndefined) \
- V(Allocate) \
- V(LoadField) \
- V(LoadElement) \
- V(LoadTypedElement) \
- V(StoreField) \
- V(StoreElement) \
- V(StoreTypedElement) \
- V(TransitionAndStoreElement) \
- V(ObjectIsCallable) \
- V(ObjectIsDetectableCallable) \
- V(ObjectIsNaN) \
- V(ObjectIsNonCallable) \
- V(ObjectIsNumber) \
- V(ObjectIsReceiver) \
- V(ObjectIsSmi) \
- V(ObjectIsString) \
- V(ObjectIsSymbol) \
- V(ObjectIsUndetectable) \
- V(ArgumentsFrame) \
- V(ArgumentsLength) \
- V(NewUnmappedArgumentsElements) \
- V(ArrayBufferWasNeutered) \
- V(EnsureWritableFastElements) \
- V(MaybeGrowFastElements) \
- V(TransitionElementsKind) \
- V(LookupHashStorageIndex) \
- V(LoadHashMapValue)
+#define SIMPLIFIED_OTHER_OP_LIST(V) \
+ V(PlainPrimitiveToNumber) \
+ V(PlainPrimitiveToWord32) \
+ V(PlainPrimitiveToFloat64) \
+ V(BooleanNot) \
+ V(StringCharAt) \
+ V(StringCharCodeAt) \
+ V(SeqStringCharCodeAt) \
+ V(StringFromCharCode) \
+ V(StringFromCodePoint) \
+ V(StringIndexOf) \
+ V(StringToLowerCaseIntl) \
+ V(StringToUpperCaseIntl) \
+ V(CheckBounds) \
+ V(CheckIf) \
+ V(CheckMaps) \
+ V(CheckNumber) \
+ V(CheckInternalizedString) \
+ V(CheckReceiver) \
+ V(CheckString) \
+ V(CheckSeqString) \
+ V(CheckSymbol) \
+ V(CheckSmi) \
+ V(CheckHeapObject) \
+ V(CheckFloat64Hole) \
+ V(CheckNotTaggedHole) \
+ V(CompareMaps) \
+ V(ConvertTaggedHoleToUndefined) \
+ V(Allocate) \
+ V(LoadFieldByIndex) \
+ V(LoadField) \
+ V(LoadElement) \
+ V(LoadTypedElement) \
+ V(StoreField) \
+ V(StoreElement) \
+ V(StoreTypedElement) \
+ V(StoreSignedSmallElement) \
+ V(TransitionAndStoreElement) \
+ V(ObjectIsArrayBufferView) \
+ V(ObjectIsCallable) \
+ V(ObjectIsConstructor) \
+ V(ObjectIsDetectableCallable) \
+ V(ObjectIsMinusZero) \
+ V(ObjectIsNaN) \
+ V(ObjectIsNonCallable) \
+ V(ObjectIsNumber) \
+ V(ObjectIsReceiver) \
+ V(ObjectIsSmi) \
+ V(ObjectIsString) \
+ V(ObjectIsSymbol) \
+ V(ObjectIsUndetectable) \
+ V(ArgumentsFrame) \
+ V(ArgumentsLength) \
+ V(NewDoubleElements) \
+ V(NewSmiOrObjectElements) \
+ V(NewArgumentsElements) \
+ V(ArrayBufferWasNeutered) \
+ V(EnsureWritableFastElements) \
+ V(MaybeGrowFastElements) \
+ V(TransitionElementsKind) \
+ V(FindOrderedHashMapEntry) \
+ V(FindOrderedHashMapEntryForInt32Key) \
+ V(RuntimeAbort)
#define SIMPLIFIED_OP_LIST(V) \
SIMPLIFIED_CHANGE_OP_LIST(V) \
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 3a25855789..0cc98a0ef1 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -133,7 +133,7 @@ namespace {
// There must be at least one non-NaN element.
// Any -0 is converted to 0.
double array_min(double a[], size_t n) {
- DCHECK(n != 0);
+ DCHECK_NE(0, n);
double x = +V8_INFINITY;
for (size_t i = 0; i < n; ++i) {
if (!std::isnan(a[i])) {
@@ -148,7 +148,7 @@ double array_min(double a[], size_t n) {
// There must be at least one non-NaN element.
// Any -0 is converted to 0.
double array_max(double a[], size_t n) {
- DCHECK(n != 0);
+ DCHECK_NE(0, n);
double x = -V8_INFINITY;
for (size_t i = 0; i < n; ++i) {
if (!std::isnan(a[i])) {
@@ -599,6 +599,26 @@ Type* OperationTyper::NumberSubtract(Type* lhs, Type* rhs) {
return type;
}
+Type* OperationTyper::SpeculativeSafeIntegerAdd(Type* lhs, Type* rhs) {
+ Type* result = SpeculativeNumberAdd(lhs, rhs);
+ // If we have a Smi or Int32 feedback, the representation selection will
+ // either truncate or it will check the inputs (i.e., deopt if not int32).
+ // In either case the result will be in the safe integer range, so we
+ // can bake in the type here. This needs to be in sync with
+ // SimplifiedLowering::VisitSpeculativeAdditiveOp.
+ return Type::Intersect(result, cache_.kSafeInteger, zone());
+}
+
+Type* OperationTyper::SpeculativeSafeIntegerSubtract(Type* lhs, Type* rhs) {
+ Type* result = SpeculativeNumberSubtract(lhs, rhs);
+ // If we have a Smi or Int32 feedback, the representation selection will
+ // either truncate or it will check the inputs (i.e., deopt if not int32).
+ // In either case the result will be in the safe integer range, so we
+ // can bake in the type here. This needs to be in sync with
+ // SimplifiedLowering::VisitSpeculativeAdditiveOp.
+ return result = Type::Intersect(result, cache_.kSafeInteger, zone());
+}
+
Type* OperationTyper::NumberMultiply(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
@@ -987,18 +1007,6 @@ SPECULATIVE_NUMBER_BINOP(NumberShiftRight)
SPECULATIVE_NUMBER_BINOP(NumberShiftRightLogical)
#undef SPECULATIVE_NUMBER_BINOP
-Type* OperationTyper::SpeculativeSafeIntegerAdd(Type* lhs, Type* rhs) {
- lhs = SpeculativeToNumber(lhs);
- rhs = SpeculativeToNumber(rhs);
- return NumberAdd(lhs, rhs);
-}
-
-Type* OperationTyper::SpeculativeSafeIntegerSubtract(Type* lhs, Type* rhs) {
- lhs = SpeculativeToNumber(lhs);
- rhs = SpeculativeToNumber(rhs);
- return NumberSubtract(lhs, rhs);
-}
-
Type* OperationTyper::SpeculativeToNumber(Type* type) {
return ToNumber(Type::Intersect(type, Type::NumberOrOddball(), zone()));
}
@@ -1034,7 +1042,7 @@ Type* OperationTyper::FalsifyUndefined(ComparisonOutcome outcome) {
: singleton_false();
}
// Type should be non empty, so we know it should be true.
- DCHECK((outcome & kComparisonTrue) != 0);
+ DCHECK_NE(0, outcome & kComparisonTrue);
return singleton_true();
}
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 6ff7c2582f..5f45a79bcc 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -107,8 +107,8 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSCallWithSpread:
// Misc operations
+ case IrOpcode::kJSForInEnumerate:
case IrOpcode::kJSForInNext:
- case IrOpcode::kJSForInPrepare:
case IrOpcode::kJSStackCheck:
case IrOpcode::kJSDebugger:
case IrOpcode::kJSGetSuperConstructor:
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index 980142c591..a5b840cb57 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -18,7 +18,8 @@ OsrHelper::OsrHelper(CompilationInfo* info)
: parameter_count_(
info->shared_info()->bytecode_array()->parameter_count()),
stack_slot_count_(
- info->shared_info()->bytecode_array()->register_count() +
+ InterpreterFrameConstants::RegisterStackSlotCount(
+ info->shared_info()->bytecode_array()->register_count()) +
InterpreterFrameConstants::kExtraSlotCount) {}
void OsrHelper::SetupFrame(Frame* frame) {
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 4b91e9fc4a..9ad3763403 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -8,6 +8,7 @@
#include <memory>
#include <sstream>
+#include "src/assembler-inl.h"
#include "src/base/adapters.h"
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
@@ -49,8 +50,6 @@
#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/memory-optimizer.h"
#include "src/compiler/move-optimizer.h"
-#include "src/compiler/new-escape-analysis-reducer.h"
-#include "src/compiler/new-escape-analysis.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline-statistics.h"
#include "src/compiler/redundancy-elimination.h"
@@ -74,7 +73,6 @@
#include "src/register-configuration.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils.h"
-#include "src/wasm/wasm-module.h"
namespace v8 {
namespace internal {
@@ -295,7 +293,7 @@ class PipelineData {
}
void InitializeInstructionSequence(const CallDescriptor* descriptor) {
- DCHECK(sequence_ == nullptr);
+ DCHECK_NULL(sequence_);
InstructionBlocks* instruction_blocks =
InstructionSequence::InstructionBlocksFor(instruction_zone(),
schedule());
@@ -310,7 +308,7 @@ class PipelineData {
}
void InitializeFrameData(CallDescriptor* descriptor) {
- DCHECK(frame_ == nullptr);
+ DCHECK_NULL(frame_);
int fixed_frame_size = 0;
if (descriptor != nullptr) {
fixed_frame_size = descriptor->CalculateFixedFrameSize();
@@ -320,7 +318,7 @@ class PipelineData {
void InitializeRegisterAllocationData(const RegisterConfiguration* config,
CallDescriptor* descriptor) {
- DCHECK(register_allocation_data_ == nullptr);
+ DCHECK_NULL(register_allocation_data_);
register_allocation_data_ = new (register_allocation_zone())
RegisterAllocationData(config, register_allocation_zone(), frame(),
sequence(), debug_name());
@@ -551,7 +549,8 @@ class PipelineRunScope {
ZoneStats::Scope zone_scope_;
};
-PipelineStatistics* CreatePipelineStatistics(CompilationInfo* info,
+PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
+ CompilationInfo* info,
ZoneStats* zone_stats) {
PipelineStatistics* pipeline_statistics = nullptr;
@@ -567,7 +566,6 @@ PipelineStatistics* CreatePipelineStatistics(CompilationInfo* info,
json_of << "{\"function\":\"" << function_name.get()
<< "\", \"sourcePosition\":" << pos << ", \"source\":\"";
Isolate* isolate = info->isolate();
- Handle<Script> script = info->script();
if (!script.is_null() && !script->source()->IsUndefined(isolate)) {
DisallowHeapAllocation no_allocation;
int start = info->shared_info()->start_position();
@@ -597,9 +595,9 @@ class PipelineCompilationJob final : public CompilationJob {
parse_info_(parse_info),
zone_stats_(function->GetIsolate()->allocator()),
compilation_info_(parse_info_.get()->zone(), function->GetIsolate(),
- parse_info_->script(), shared_info, function),
- pipeline_statistics_(
- CreatePipelineStatistics(compilation_info(), &zone_stats_)),
+ shared_info, function),
+ pipeline_statistics_(CreatePipelineStatistics(
+ parse_info_->script(), compilation_info(), &zone_stats_)),
data_(&zone_stats_, compilation_info(), pipeline_statistics_.get()),
pipeline_(&data_),
linkage_(nullptr) {}
@@ -743,16 +741,17 @@ class PipelineWasmCompilationJob final : public CompilationJob {
CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
SourcePositionTable* source_positions,
ZoneVector<trap_handler::ProtectedInstructionData>* protected_insts,
- wasm::ModuleOrigin wasm_origin)
+ bool asmjs_origin)
: CompilationJob(info->isolate(), nullptr, info, "TurboFan",
State::kReadyToExecute),
zone_stats_(info->isolate()->allocator()),
- pipeline_statistics_(CreatePipelineStatistics(info, &zone_stats_)),
+ pipeline_statistics_(CreatePipelineStatistics(Handle<Script>::null(),
+ info, &zone_stats_)),
data_(&zone_stats_, info, jsgraph, pipeline_statistics_.get(),
source_positions, protected_insts),
pipeline_(&data_),
linkage_(descriptor),
- wasm_origin_(wasm_origin) {}
+ asmjs_origin_(asmjs_origin) {}
protected:
Status PrepareJobImpl() final;
@@ -773,7 +772,7 @@ class PipelineWasmCompilationJob final : public CompilationJob {
PipelineData data_;
PipelineImpl pipeline_;
Linkage linkage_;
- wasm::ModuleOrigin wasm_origin_;
+ bool asmjs_origin_;
};
PipelineWasmCompilationJob::Status
@@ -791,15 +790,14 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
}
pipeline_.RunPrintAndVerify("Machine", true);
- if (FLAG_wasm_opt || wasm_origin_ == wasm::ModuleOrigin::kAsmJsOrigin) {
+ if (FLAG_wasm_opt || asmjs_origin_) {
PipelineData* data = &data_;
PipelineRunScope scope(data, "Wasm optimization");
JSGraphReducer graph_reducer(data->jsgraph(), scope.zone());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
ValueNumberingReducer value_numbering(scope.zone(), data->graph()->zone());
- MachineOperatorReducer machine_reducer(
- data->jsgraph(), wasm_origin_ == wasm::ModuleOrigin::kAsmJsOrigin);
+ MachineOperatorReducer machine_reducer(data->jsgraph(), asmjs_origin_);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
AddReducer(data, &graph_reducer, &dead_code_elimination);
@@ -894,7 +892,8 @@ struct GraphBuilderPhase {
temp_zone, data->info()->shared_info(),
handle(data->info()->closure()->feedback_vector()),
data->info()->osr_offset(), data->jsgraph(), CallFrequency(1.0f),
- data->source_positions(), SourcePosition::kNotInlined, flags);
+ data->source_positions(), data->native_context(),
+ SourcePosition::kNotInlined, flags);
graph_builder.CreateGraph();
}
};
@@ -1028,11 +1027,9 @@ struct TypedLoweringPhase {
JSBuiltinReducer builtin_reducer(
&graph_reducer, data->jsgraph(),
data->info()->dependencies(), data->native_context());
- Handle<FeedbackVector> feedback_vector(
- data->info()->closure()->feedback_vector());
JSCreateLowering create_lowering(
&graph_reducer, data->info()->dependencies(), data->jsgraph(),
- feedback_vector, data->native_context(), temp_zone);
+ data->native_context(), temp_zone);
JSTypedLowering typed_lowering(&graph_reducer, data->jsgraph(), temp_zone);
TypedOptimization typed_optimization(
&graph_reducer, data->info()->dependencies(), data->jsgraph());
@@ -1057,32 +1054,16 @@ struct EscapeAnalysisPhase {
static const char* phase_name() { return "escape analysis"; }
void Run(PipelineData* data, Zone* temp_zone) {
- if (FLAG_turbo_new_escape) {
- NewEscapeAnalysis escape_analysis(data->jsgraph(), temp_zone);
- escape_analysis.ReduceGraph();
- JSGraphReducer reducer(data->jsgraph(), temp_zone);
- NewEscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(),
- escape_analysis.analysis_result(),
- temp_zone);
- AddReducer(data, &reducer, &escape_reducer);
- reducer.ReduceGraph();
- // TODO(tebbi): Turn this into a debug mode check once we have confidence.
- escape_reducer.VerifyReplacement();
- } else {
- EscapeAnalysis escape_analysis(data->graph(), data->jsgraph()->common(),
- temp_zone);
- if (!escape_analysis.Run()) return;
- JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
- EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
- &escape_analysis, temp_zone);
- AddReducer(data, &graph_reducer, &escape_reducer);
- graph_reducer.ReduceGraph();
- if (escape_reducer.compilation_failed()) {
- data->set_compilation_failed();
- return;
- }
- escape_reducer.VerifyReplacement();
- }
+ EscapeAnalysis escape_analysis(data->jsgraph(), temp_zone);
+ escape_analysis.ReduceGraph();
+ JSGraphReducer reducer(data->jsgraph(), temp_zone);
+ EscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(),
+ escape_analysis.analysis_result(),
+ temp_zone);
+ AddReducer(data, &reducer, &escape_reducer);
+ reducer.ReduceGraph();
+ // TODO(tebbi): Turn this into a debug mode check once we have confidence.
+ escape_reducer.VerifyReplacement();
}
};
@@ -1127,7 +1108,6 @@ struct ConcurrentOptimizationPrepPhase {
// Make sure we cache these code stubs.
data->jsgraph()->CEntryStubConstant(1);
data->jsgraph()->CEntryStubConstant(2);
- data->jsgraph()->CEntryStubConstant(3);
// TODO(turbofan): Remove this line once the Array constructor code
// is a proper builtin and no longer a CodeStub.
@@ -1573,7 +1553,20 @@ struct PrintGraphPhase {
<< AsJSON(*graph, data->source_positions()) << "},\n";
}
- if (FLAG_trace_turbo_graph) { // Simple textual RPO.
+ if (FLAG_trace_turbo_scheduled) { // Scheduled textual output.
+ AccountingAllocator allocator;
+ Schedule* schedule = data->schedule();
+ if (schedule == nullptr) {
+ schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
+ Scheduler::kNoFlags);
+ }
+
+ AllowHandleDereference allow_deref;
+ CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "-- Graph after " << phase << " -- " << std::endl;
+ os << AsScheduledGraph(schedule);
+ } else if (FLAG_trace_turbo_graph) { // Simple textual RPO.
AllowHandleDereference allow_deref;
CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
OFStream os(tracing_scope.file());
@@ -1759,10 +1752,10 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
CallDescriptor* call_descriptor,
Graph* graph, Schedule* schedule,
- Code::Flags flags,
+ Code::Kind kind,
const char* debug_name,
JumpOptimizationInfo* jump_opt) {
- CompilationInfo info(CStrVector(debug_name), isolate, graph->zone(), flags);
+ CompilationInfo info(CStrVector(debug_name), isolate, graph->zone(), kind);
if (isolate->serializer_enabled()) info.MarkAsSerializing();
// Construct a pipeline for scheduling and code generation.
@@ -1801,7 +1794,7 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info) {
ZoneStats zone_stats(info->isolate()->allocator());
std::unique_ptr<PipelineStatistics> pipeline_statistics(
- CreatePipelineStatistics(info, &zone_stats));
+ CreatePipelineStatistics(Handle<Script>::null(), info, &zone_stats));
PipelineData data(&zone_stats, info, pipeline_statistics.get());
PipelineImpl pipeline(&data);
@@ -1871,17 +1864,17 @@ CompilationJob* Pipeline::NewWasmCompilationJob(
CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
SourcePositionTable* source_positions,
ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions,
- wasm::ModuleOrigin wasm_origin) {
+ wasm::ModuleOrigin asmjs_origin) {
return new PipelineWasmCompilationJob(info, jsgraph, descriptor,
source_positions,
- protected_instructions, wasm_origin);
+ protected_instructions, asmjs_origin);
}
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
InstructionSequence* sequence,
bool run_verifier) {
CompilationInfo info(ArrayVector("testing"), sequence->isolate(),
- sequence->zone(), Code::ComputeFlags(Code::STUB));
+ sequence->zone(), Code::STUB);
ZoneStats zone_stats(sequence->isolate()->allocator());
PipelineData data(&zone_stats, &info, sequence);
PipelineImpl pipeline(&data);
@@ -1975,7 +1968,7 @@ bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
// Allocate registers.
if (call_descriptor->HasRestrictedAllocatableRegisters()) {
auto registers = call_descriptor->AllocatableRegisters();
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_LT(0, NumRegs(registers));
std::unique_ptr<const RegisterConfiguration> config;
config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers));
AllocateRegisters(config.get(), call_descriptor, run_verifier);
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index d6fd8865c5..7b7a8b5336 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -55,7 +55,7 @@ class Pipeline : public AllStatic {
static Handle<Code> GenerateCodeForCodeStub(Isolate* isolate,
CallDescriptor* call_descriptor,
Graph* graph, Schedule* schedule,
- Code::Flags flags,
+ Code::Kind kind,
const char* debug_name,
JumpOptimizationInfo* jump_opt);
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index a1fa7d94b6..4e96e19ae5 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -4,6 +4,8 @@
#include "src/compiler/code-generator.h"
+#include "src/assembler-inl.h"
+#include "src/callable.h"
#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
@@ -188,6 +190,29 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
+ void SaveRegisters(RegList registers) {
+ DCHECK_LT(0, NumRegs(registers));
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
+ }
+
+ __ MultiPush(regs);
+ }
+
+ void RestoreRegisters(RegList registers) {
+ DCHECK_LT(0, NumRegs(registers));
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
+ }
+ __ MultiPop(regs);
+ }
+
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
@@ -195,6 +220,12 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
+ if (offset_ == no_reg) {
+ __ addi(scratch1_, object_, Operand(offset_immediate_));
+ } else {
+ DCHECK_EQ(0, offset_immediate_);
+ __ add(scratch1_, object_, offset_);
+ }
RememberedSetAction const remembered_set_action =
mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
: OMIT_REMEMBERED_SET;
@@ -202,15 +233,13 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
- __ mflr(scratch1_);
- __ Push(scratch1_);
- }
- if (offset_.is(no_reg)) {
- __ addi(scratch1_, object_, Operand(offset_immediate_));
- } else {
- DCHECK_EQ(0, offset_immediate_);
- __ add(scratch1_, object_, offset_);
+ __ mflr(scratch0_);
+ __ Push(scratch0_);
}
+#ifdef V8_CSA_WRITE_BARRIER
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+#else
if (must_save_lr_ && FLAG_enable_embedded_constant_pool) {
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
__ CallStubDelayed(
@@ -221,17 +250,18 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
}
+#endif
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
- __ Pop(scratch1_);
- __ mtlr(scratch1_);
+ __ Pop(scratch0_);
+ __ mtlr(scratch0_);
}
}
private:
Register const object_;
Register const offset_;
- int32_t const offset_immediate_; // Valid if offset_.is(no_reg).
+ int32_t const offset_immediate_; // Valid if offset_ == no_reg.
Register const value_;
Register const scratch0_;
Register const scratch1_;
@@ -470,90 +500,89 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-#define ASSEMBLE_FLOAT_MAX() \
- do { \
- DoubleRegister left_reg = i.InputDoubleRegister(0); \
- DoubleRegister right_reg = i.InputDoubleRegister(1); \
- DoubleRegister result_reg = i.OutputDoubleRegister(); \
- Label check_nan_left, check_zero, return_left, return_right, done; \
- __ fcmpu(left_reg, right_reg); \
- __ bunordered(&check_nan_left); \
- __ beq(&check_zero); \
- __ bge(&return_left); \
- __ b(&return_right); \
- \
- __ bind(&check_zero); \
- __ fcmpu(left_reg, kDoubleRegZero); \
- /* left == right != 0. */ \
- __ bne(&return_left); \
- /* At this point, both left and right are either 0 or -0. */ \
- __ fadd(result_reg, left_reg, right_reg); \
- __ b(&done); \
- \
- __ bind(&check_nan_left); \
- __ fcmpu(left_reg, left_reg); \
- /* left == NaN. */ \
- __ bunordered(&return_left); \
- __ bind(&return_right); \
- if (!right_reg.is(result_reg)) { \
- __ fmr(result_reg, right_reg); \
- } \
- __ b(&done); \
- \
- __ bind(&return_left); \
- if (!left_reg.is(result_reg)) { \
- __ fmr(result_reg, left_reg); \
- } \
- __ bind(&done); \
- } while (0) \
-
-
-#define ASSEMBLE_FLOAT_MIN() \
- do { \
- DoubleRegister left_reg = i.InputDoubleRegister(0); \
- DoubleRegister right_reg = i.InputDoubleRegister(1); \
- DoubleRegister result_reg = i.OutputDoubleRegister(); \
- Label check_nan_left, check_zero, return_left, return_right, done; \
- __ fcmpu(left_reg, right_reg); \
- __ bunordered(&check_nan_left); \
- __ beq(&check_zero); \
- __ ble(&return_left); \
- __ b(&return_right); \
- \
- __ bind(&check_zero); \
- __ fcmpu(left_reg, kDoubleRegZero); \
- /* left == right != 0. */ \
- __ bne(&return_left); \
- /* At this point, both left and right are either 0 or -0. */ \
- /* Min: The algorithm is: -((-L) + (-R)), which in case of L and R being */\
- /* different registers is most efficiently expressed as -((-L) - R). */ \
- __ fneg(left_reg, left_reg); \
- if (left_reg.is(right_reg)) { \
- __ fadd(result_reg, left_reg, right_reg); \
- } else { \
- __ fsub(result_reg, left_reg, right_reg); \
- } \
- __ fneg(result_reg, result_reg); \
- __ b(&done); \
- \
- __ bind(&check_nan_left); \
- __ fcmpu(left_reg, left_reg); \
- /* left == NaN. */ \
- __ bunordered(&return_left); \
- \
- __ bind(&return_right); \
- if (!right_reg.is(result_reg)) { \
- __ fmr(result_reg, right_reg); \
- } \
- __ b(&done); \
- \
- __ bind(&return_left); \
- if (!left_reg.is(result_reg)) { \
- __ fmr(result_reg, left_reg); \
- } \
- __ bind(&done); \
+#define ASSEMBLE_FLOAT_MAX() \
+ do { \
+ DoubleRegister left_reg = i.InputDoubleRegister(0); \
+ DoubleRegister right_reg = i.InputDoubleRegister(1); \
+ DoubleRegister result_reg = i.OutputDoubleRegister(); \
+ Label check_nan_left, check_zero, return_left, return_right, done; \
+ __ fcmpu(left_reg, right_reg); \
+ __ bunordered(&check_nan_left); \
+ __ beq(&check_zero); \
+ __ bge(&return_left); \
+ __ b(&return_right); \
+ \
+ __ bind(&check_zero); \
+ __ fcmpu(left_reg, kDoubleRegZero); \
+ /* left == right != 0. */ \
+ __ bne(&return_left); \
+ /* At this point, both left and right are either 0 or -0. */ \
+ __ fadd(result_reg, left_reg, right_reg); \
+ __ b(&done); \
+ \
+ __ bind(&check_nan_left); \
+ __ fcmpu(left_reg, left_reg); \
+ /* left == NaN. */ \
+ __ bunordered(&return_left); \
+ __ bind(&return_right); \
+ if (right_reg != result_reg) { \
+ __ fmr(result_reg, right_reg); \
+ } \
+ __ b(&done); \
+ \
+ __ bind(&return_left); \
+ if (left_reg != result_reg) { \
+ __ fmr(result_reg, left_reg); \
+ } \
+ __ bind(&done); \
} while (0)
+#define ASSEMBLE_FLOAT_MIN() \
+ do { \
+ DoubleRegister left_reg = i.InputDoubleRegister(0); \
+ DoubleRegister right_reg = i.InputDoubleRegister(1); \
+ DoubleRegister result_reg = i.OutputDoubleRegister(); \
+ Label check_nan_left, check_zero, return_left, return_right, done; \
+ __ fcmpu(left_reg, right_reg); \
+ __ bunordered(&check_nan_left); \
+ __ beq(&check_zero); \
+ __ ble(&return_left); \
+ __ b(&return_right); \
+ \
+ __ bind(&check_zero); \
+ __ fcmpu(left_reg, kDoubleRegZero); \
+ /* left == right != 0. */ \
+ __ bne(&return_left); \
+ /* At this point, both left and right are either 0 or -0. */ \
+ /* Min: The algorithm is: -((-L) + (-R)), which in case of L and R */ \
+ /* being different registers is most efficiently expressed */ \
+ /* as -((-L) - R). */ \
+ __ fneg(left_reg, left_reg); \
+ if (left_reg == right_reg) { \
+ __ fadd(result_reg, left_reg, right_reg); \
+ } else { \
+ __ fsub(result_reg, left_reg, right_reg); \
+ } \
+ __ fneg(result_reg, result_reg); \
+ __ b(&done); \
+ \
+ __ bind(&check_nan_left); \
+ __ fcmpu(left_reg, left_reg); \
+ /* left == NaN. */ \
+ __ bunordered(&return_left); \
+ \
+ __ bind(&return_right); \
+ if (right_reg != result_reg) { \
+ __ fmr(result_reg, right_reg); \
+ } \
+ __ b(&done); \
+ \
+ __ bind(&return_left); \
+ if (left_reg != result_reg) { \
+ __ fmr(result_reg, left_reg); \
+ } \
+ __ bind(&done); \
+ } while (0)
#define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrx) \
do { \
@@ -863,7 +892,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm,
break;
}
frame_access_state->IncreaseSPDelta(pending_pushes->size());
- pending_pushes->resize(0);
+ pending_pushes->clear();
}
void AdjustStackPointerForTailCall(
@@ -932,6 +961,27 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check if the code object is marked for deoptimization. If it is, then it
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. load the address of the current instruction;
+// 2. read from memory the word that contains that bit, which can be found in
+// the first set of flags ({kKindSpecificFlags1Offset});
+// 3. test kMarkedForDeoptimizationBit in those flags; and
+// 4. if it is not zero then it jumps to the builtin.
+void CodeGenerator::BailoutIfDeoptimized() {
+ Label current;
+ __ mov_label_addr(r11, &current);
+ int pc_offset = __ pc_offset();
+ __ bind(&current);
+ int offset =
+ Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc_offset);
+ __ LoadWordArith(r11, MemOperand(r11, offset));
+ __ TestBit(r11, Code::kMarkedForDeoptimizationBit);
+ Handle<Code> code = isolate()->builtins()->builtin_handle(
+ Builtins::kCompileLazyDeoptimizedCode);
+ __ Jump(code, RelocInfo::CODE_TARGET, ne, cr0);
+}
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
@@ -1011,13 +1061,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchSaveCallerRegisters: {
+ fp_mode_ =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// kReturnRegister0 should have been saved before entering the stub.
- __ PushCallerSaved(kSaveFPRegs, kReturnRegister0);
+ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
+ DCHECK_EQ(0, bytes % kPointerSize);
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ DCHECK(!caller_registers_saved_);
+ caller_registers_saved_ = true;
break;
}
case kArchRestoreCallerRegisters: {
+ DCHECK(fp_mode_ ==
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// Don't overwrite the returned value.
- __ PopCallerSaved(kSaveFPRegs, kReturnRegister0);
+ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ DCHECK(caller_registers_saved_);
+ caller_registers_saved_ = false;
break;
}
case kArchPrepareTailCall:
@@ -1038,7 +1103,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CallCFunction(func, num_parameters);
}
frame_access_state()->SetFrameAccessToDefault();
+ // Ideally, we should decrement SP delta to match the change of stack
+ // pointer in CallCFunction. However, for certain architectures (e.g.
+ // ARM), there may be more strict alignment requirement, causing old SP
+ // to be saved on the stack. In those cases, we can not calculate the SP
+ // delta statically.
frame_access_state()->ClearSPDelta();
+ if (caller_registers_saved_) {
+ // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
+ // Here, we assume the sequence to be:
+ // kArchSaveCallerRegisters;
+ // kArchCallCFunction;
+ // kArchRestoreCallerRegisters;
+ int bytes =
+ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ }
break;
}
case kArchJmp:
@@ -1054,7 +1134,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kArchDebugAbort:
- DCHECK(i.InputRegister(0).is(r4));
+ DCHECK(i.InputRegister(0) == r4);
if (!frame_access_state()->has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
@@ -1398,10 +1478,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
case kPPC_Mul32WithHigh32:
- if (i.OutputRegister(0).is(i.InputRegister(0)) ||
- i.OutputRegister(0).is(i.InputRegister(1)) ||
- i.OutputRegister(1).is(i.InputRegister(0)) ||
- i.OutputRegister(1).is(i.InputRegister(1))) {
+ if (i.OutputRegister(0) == i.InputRegister(0) ||
+ i.OutputRegister(0) == i.InputRegister(1) ||
+ i.OutputRegister(1) == i.InputRegister(0) ||
+ i.OutputRegister(1) == i.InputRegister(1)) {
__ mullw(kScratchReg,
i.InputRegister(0), i.InputRegister(1)); // low
__ mulhw(i.OutputRegister(1),
@@ -1657,7 +1737,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ StoreDoubleU(i.InputDoubleRegister(0),
MemOperand(sp, -num_slots * kPointerSize), r0);
} else {
- DCHECK(op->representation() == MachineRepresentation::kFloat32);
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
__ StoreSingleU(i.InputDoubleRegister(0),
MemOperand(sp, -num_slots * kPointerSize), r0);
}
@@ -1675,7 +1755,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ StoreDouble(i.InputDoubleRegister(0),
MemOperand(sp, slot * kPointerSize), r0);
} else {
- DCHECK(op->representation() == MachineRepresentation::kFloat32);
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
__ StoreSingle(i.InputDoubleRegister(0),
MemOperand(sp, slot * kPointerSize), r0);
}
@@ -2030,6 +2110,10 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
+void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
+ BranchInfo* branch) {
+ AssembleArchBranch(instr, branch);
+}
void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
@@ -2073,6 +2157,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
+ CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ __ Drop(pop_count);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
@@ -2207,8 +2294,8 @@ void CodeGenerator::FinishFrame(Frame* frame) {
// Save callee-saved Double registers.
if (double_saves != 0) {
frame->AlignSavedCalleeRegisterSlots();
- DCHECK(kNumCalleeSavedDoubles ==
- base::bits::CountPopulation32(double_saves));
+ DCHECK_EQ(kNumCalleeSavedDoubles,
+ base::bits::CountPopulation32(double_saves));
frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
(kDoubleSize / kPointerSize));
}
@@ -2276,8 +2363,8 @@ void CodeGenerator::AssembleConstructFrame() {
// Save callee-saved Double registers.
if (double_saves != 0) {
__ MultiPushDoubles(double_saves);
- DCHECK(kNumCalleeSavedDoubles ==
- base::bits::CountPopulation32(double_saves));
+ DCHECK_EQ(kNumCalleeSavedDoubles,
+ base::bits::CountPopulation32(double_saves));
}
// Save callee-saved registers.
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index 12f3eddcda..8f24d9205b 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -89,8 +89,9 @@ bool PropertyAccessBuilder::TryBuildNumberCheck(MapHandles const& maps,
return false;
}
-Node* PropertyAccessBuilder::BuildCheckHeapObject(Node* receiver, Node** effect,
- Node* control) {
+namespace {
+
+bool NeedsCheckHeapObject(Node* receiver) {
switch (receiver->opcode()) {
case IrOpcode::kHeapConstant:
case IrOpcode::kJSCreate:
@@ -99,22 +100,44 @@ Node* PropertyAccessBuilder::BuildCheckHeapObject(Node* receiver, Node** effect,
case IrOpcode::kJSCreateClosure:
case IrOpcode::kJSCreateIterResultObject:
case IrOpcode::kJSCreateLiteralArray:
+ case IrOpcode::kJSCreateEmptyLiteralArray:
case IrOpcode::kJSCreateLiteralObject:
+ case IrOpcode::kJSCreateEmptyLiteralObject:
case IrOpcode::kJSCreateLiteralRegExp:
+ case IrOpcode::kJSCreateGeneratorObject:
case IrOpcode::kJSConvertReceiver:
+ case IrOpcode::kJSConstructForwardVarargs:
+ case IrOpcode::kJSConstruct:
+ case IrOpcode::kJSConstructWithArrayLike:
+ case IrOpcode::kJSConstructWithSpread:
case IrOpcode::kJSToName:
case IrOpcode::kJSToString:
case IrOpcode::kJSToObject:
- case IrOpcode::kJSTypeOf: {
- return receiver;
- }
- default: {
- return *effect = graph()->NewNode(simplified()->CheckHeapObject(),
- receiver, *effect, control);
+ case IrOpcode::kJSTypeOf:
+ case IrOpcode::kJSGetSuperConstructor:
+ return false;
+ case IrOpcode::kPhi: {
+ Node* control = NodeProperties::GetControlInput(receiver);
+ if (control->opcode() != IrOpcode::kMerge) return true;
+ for (int i = 0; i < receiver->InputCount() - 1; ++i) {
+ if (NeedsCheckHeapObject(receiver->InputAt(i))) return true;
+ }
+ return false;
}
+ default:
+ return true;
}
- UNREACHABLE();
- return nullptr;
+}
+
+} // namespace
+
+Node* PropertyAccessBuilder::BuildCheckHeapObject(Node* receiver, Node** effect,
+ Node* control) {
+ if (NeedsCheckHeapObject(receiver)) {
+ receiver = *effect = graph()->NewNode(simplified()->CheckHeapObject(),
+ receiver, *effect, control);
+ }
+ return receiver;
}
void PropertyAccessBuilder::BuildCheckMaps(
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 8a8be06fa5..b685fc5d66 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -7,7 +7,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/scheduler.h"
-#include "src/objects-inl.h"
+#include "src/factory-inl.h"
namespace v8 {
namespace internal {
@@ -35,6 +35,14 @@ RawMachineAssembler::RawMachineAssembler(
graph->SetEnd(graph->NewNode(common_.End(0)));
}
+Node* RawMachineAssembler::NullConstant() {
+ return HeapConstant(isolate()->factory()->null_value());
+}
+
+Node* RawMachineAssembler::UndefinedConstant() {
+ return HeapConstant(isolate()->factory()->undefined_value());
+}
+
Node* RawMachineAssembler::RelocatableIntPtrConstant(intptr_t value,
RelocInfo::Mode rmode) {
return kPointerSize == 8
@@ -231,14 +239,16 @@ Node* RawMachineAssembler::CallCFunction1(MachineType return_type,
}
Node* RawMachineAssembler::CallCFunction1WithCallerSavedRegisters(
- MachineType return_type, MachineType arg0_type, Node* function,
- Node* arg0) {
+ MachineType return_type, MachineType arg0_type, Node* function, Node* arg0,
+ SaveFPRegsMode mode) {
MachineSignature::Builder builder(zone(), 1, 1);
builder.AddReturn(return_type);
builder.AddParam(arg0_type);
- const CallDescriptor* descriptor =
+ CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+ descriptor->set_save_fp_mode(mode);
+
return AddNode(common()->CallWithCallerSavedRegisters(descriptor), function,
arg0);
}
@@ -275,19 +285,57 @@ Node* RawMachineAssembler::CallCFunction3(MachineType return_type,
Node* RawMachineAssembler::CallCFunction3WithCallerSavedRegisters(
MachineType return_type, MachineType arg0_type, MachineType arg1_type,
- MachineType arg2_type, Node* function, Node* arg0, Node* arg1, Node* arg2) {
+ MachineType arg2_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
+ SaveFPRegsMode mode) {
MachineSignature::Builder builder(zone(), 1, 3);
builder.AddReturn(return_type);
builder.AddParam(arg0_type);
builder.AddParam(arg1_type);
builder.AddParam(arg2_type);
- const CallDescriptor* descriptor =
+ CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+ descriptor->set_save_fp_mode(mode);
+
return AddNode(common()->CallWithCallerSavedRegisters(descriptor), function,
arg0, arg1, arg2);
}
+Node* RawMachineAssembler::CallCFunction4(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, MachineType arg3_type, Node* function, Node* arg0,
+ Node* arg1, Node* arg2, Node* arg3) {
+ MachineSignature::Builder builder(zone(), 1, 4);
+ builder.AddReturn(return_type);
+ builder.AddParam(arg0_type);
+ builder.AddParam(arg1_type);
+ builder.AddParam(arg2_type);
+ builder.AddParam(arg3_type);
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+ return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2, arg3);
+}
+
+Node* RawMachineAssembler::CallCFunction5(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
+ Node* function, Node* arg0, Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4) {
+ MachineSignature::Builder builder(zone(), 1, 5);
+ builder.AddReturn(return_type);
+ builder.AddParam(arg0_type);
+ builder.AddParam(arg1_type);
+ builder.AddParam(arg2_type);
+ builder.AddParam(arg3_type);
+ builder.AddParam(arg4_type);
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+ return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2, arg3,
+ arg4);
+}
+
Node* RawMachineAssembler::CallCFunction6(
MachineType return_type, MachineType arg0_type, MachineType arg1_type,
MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
@@ -367,7 +415,7 @@ BasicBlock* RawMachineAssembler::EnsureBlock(RawMachineLabel* label) {
}
void RawMachineAssembler::Bind(RawMachineLabel* label) {
- DCHECK(current_block_ == nullptr);
+ DCHECK_NULL(current_block_);
DCHECK(!label->bound_);
label->bound_ = true;
current_block_ = EnsureBlock(label);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index adbf659bc4..3ee91a1ff9 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -63,13 +63,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
// place them into the current basic block. They don't perform control flow,
// hence will not switch the current basic block.
- Node* NullConstant() {
- return HeapConstant(isolate()->factory()->null_value());
- }
-
- Node* UndefinedConstant() {
- return HeapConstant(isolate()->factory()->undefined_value());
- }
+ Node* NullConstant();
+ Node* UndefinedConstant();
// Constants.
Node* PointerConstant(void* value) {
@@ -766,9 +761,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* function, Node* arg0);
// Call to a C function with one argument, while saving/restoring caller
// registers.
- Node* CallCFunction1WithCallerSavedRegisters(MachineType return_type,
- MachineType arg0_type,
- Node* function, Node* arg0);
+ Node* CallCFunction1WithCallerSavedRegisters(
+ MachineType return_type, MachineType arg0_type, Node* function,
+ Node* arg0, SaveFPRegsMode mode = kSaveFPRegs);
// Call to a C function with two arguments.
Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, Node* function, Node* arg0,
@@ -779,12 +774,21 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* function, Node* arg0, Node* arg1, Node* arg2);
// Call to a C function with three arguments, while saving/restoring caller
// registers.
- Node* CallCFunction3WithCallerSavedRegisters(MachineType return_type,
- MachineType arg0_type,
- MachineType arg1_type,
- MachineType arg2_type,
- Node* function, Node* arg0,
- Node* arg1, Node* arg2);
+ Node* CallCFunction3WithCallerSavedRegisters(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
+ SaveFPRegsMode mode = kSaveFPRegs);
+ // Call to a C function with four arguments.
+ Node* CallCFunction4(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ MachineType arg3_type, Node* function, Node* arg0,
+ Node* arg1, Node* arg2, Node* arg3);
+ // Call to a C function with five arguments.
+ Node* CallCFunction5(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ MachineType arg3_type, MachineType arg4_type,
+ Node* function, Node* arg0, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4);
// Call to a C function with six arguments.
Node* CallCFunction6(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, MachineType arg2_type,
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index 983bd31dcc..3a40e8d5bf 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -42,6 +42,8 @@ Reduction RedundancyElimination::Reduce(Node* node) {
return ReduceCheckNode(node);
case IrOpcode::kSpeculativeNumberAdd:
case IrOpcode::kSpeculativeNumberSubtract:
+ case IrOpcode::kSpeculativeSafeIntegerAdd:
+ case IrOpcode::kSpeculativeSafeIntegerSubtract:
// For increments and decrements by a constant, try to learn from the last
// bounds check.
return TryReuseBoundsCheckForFirstInput(node);
@@ -192,7 +194,9 @@ Reduction RedundancyElimination::ReduceCheckNode(Node* node) {
Reduction RedundancyElimination::TryReuseBoundsCheckForFirstInput(Node* node) {
DCHECK(node->opcode() == IrOpcode::kSpeculativeNumberAdd ||
- node->opcode() == IrOpcode::kSpeculativeNumberSubtract);
+ node->opcode() == IrOpcode::kSpeculativeNumberSubtract ||
+ node->opcode() == IrOpcode::kSpeculativeSafeIntegerAdd ||
+ node->opcode() == IrOpcode::kSpeculativeSafeIntegerSubtract);
DCHECK_EQ(1, node->op()->EffectInputCount());
DCHECK_EQ(1, node->op()->EffectOutputCount());
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/register-allocator-verifier.cc
index a57ed4f1a6..452ff705bd 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/register-allocator-verifier.cc
@@ -23,7 +23,7 @@ void VerifyEmptyGaps(const Instruction* instr) {
i <= Instruction::LAST_GAP_POSITION; i++) {
Instruction::GapPosition inner_pos =
static_cast<Instruction::GapPosition>(i);
- CHECK(instr->GetParallelMove(inner_pos) == nullptr);
+ CHECK_NULL(instr->GetParallelMove(inner_pos));
}
}
@@ -77,7 +77,7 @@ RegisterAllocatorVerifier::RegisterAllocatorVerifier(
for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
BuildConstraint(instr->OutputAt(i), &op_constraints[count]);
if (op_constraints[count].type_ == kSameAsFirst) {
- CHECK(instr->InputCount() > 0);
+ CHECK_LT(0, instr->InputCount());
op_constraints[count].type_ = op_constraints[0].type_;
op_constraints[count].value_ = op_constraints[0].value_;
}
@@ -333,7 +333,7 @@ BlockAssessments* RegisterAllocatorVerifier::CreateForBlock(
// TODO(mtrofin): the following check should hold, however, in certain
// unit tests it is invalidated by the last block. Investigate and
// normalize the CFG.
- // CHECK(current_block_id.ToInt() == 0);
+ // CHECK_EQ(0, current_block_id.ToInt());
// The phi size test below is because we can, technically, have phi
// instructions with one argument. Some tests expose that, too.
} else if (block->PredecessorCount() == 1 && block->phis().size() == 0) {
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 5bdb749996..7f65695ee2 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -635,7 +635,7 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
}
current = next;
}
- DCHECK(nullptr != after);
+ DCHECK_NOT_NULL(after);
// Partition original use intervals to the two live ranges.
UseInterval* before = current;
@@ -1003,7 +1003,7 @@ void TopLevelLiveRange::UpdateSpillRangePostMerge(TopLevelLiveRange* merged) {
if (HasNoSpillType() && merged->HasSpillRange()) {
set_spill_type(merged->spill_type());
- DCHECK(GetSpillRange()->live_ranges().size() > 0);
+ DCHECK_LT(0, GetSpillRange()->live_ranges().size());
merged->spill_range_ = nullptr;
merged->bits_ =
SpillTypeField::update(merged->bits_, SpillType::kNoSpillType);
@@ -1089,7 +1089,7 @@ void TopLevelLiveRange::Verify() const {
void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
TRACE("Shorten live range %d to [%d\n", vreg(), start.value());
- DCHECK(first_interval_ != nullptr);
+ DCHECK_NOT_NULL(first_interval_);
DCHECK(first_interval_->start() <= start);
DCHECK(start < first_interval_->end());
first_interval_->set_start(start);
@@ -1670,7 +1670,7 @@ void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
for (const RpoNumber& succ : block->successors()) {
const InstructionBlock* successor = code()->InstructionBlockAt(succ);
- DCHECK(successor->PredecessorCount() == 1);
+ DCHECK_EQ(1, successor->PredecessorCount());
int gap_index = successor->first_instruction_index();
// Create an unconstrained operand for the same virtual register
// and insert a gap move from the fixed output to the operand.
@@ -1682,7 +1682,7 @@ void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
if (!assigned) {
for (const RpoNumber& succ : block->successors()) {
const InstructionBlock* successor = code()->InstructionBlockAt(succ);
- DCHECK(successor->PredecessorCount() == 1);
+ DCHECK_EQ(1, successor->PredecessorCount());
int gap_index = successor->first_instruction_index();
range->RecordSpillLocation(allocation_zone(), gap_index, output);
range->SetSpillStartIndex(gap_index);
@@ -1769,7 +1769,7 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
if (!output->IsUnallocated()) continue;
UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
if (!second_output->HasSameAsInputPolicy()) continue;
- DCHECK(i == 0); // Only valid for first output.
+ DCHECK_EQ(0, i); // Only valid for first output.
UnallocatedOperand* cur_input =
UnallocatedOperand::cast(second->InputAt(0));
int output_vreg = second_output->virtual_register();
@@ -2042,7 +2042,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
LifetimePosition curr_position =
LifetimePosition::InstructionFromInstructionIndex(index);
Instruction* instr = code()->InstructionAt(index);
- DCHECK(instr != nullptr);
+ DCHECK_NOT_NULL(instr);
DCHECK(curr_position.IsInstructionPosition());
// Process output, inputs, and temps of this instruction.
for (size_t i = 0; i < instr->OutputCount(); i++) {
@@ -2058,8 +2058,8 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
}
if (block->IsHandler() && index == block_start && output->IsAllocated() &&
output->IsRegister() &&
- AllocatedOperand::cast(output)->GetRegister().is(
- v8::internal::kReturnRegister0)) {
+ AllocatedOperand::cast(output)->GetRegister() ==
+ v8::internal::kReturnRegister0) {
// The register defined here is blocked from gap start - it is the
// exception value.
// TODO(mtrofin): should we explore an explicit opcode for
@@ -2739,8 +2739,8 @@ LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
inactive_live_ranges().reserve(8);
// TryAllocateFreeReg and AllocateBlockedReg assume this
// when allocating local arrays.
- DCHECK(RegisterConfiguration::kMaxFPRegisters >=
- this->data()->config()->num_general_registers());
+ DCHECK_GE(RegisterConfiguration::kMaxFPRegisters,
+ this->data()->config()->num_general_registers());
}
@@ -3413,7 +3413,7 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
// Try to merge the spilled operands and count the number of merged spilled
// operands.
- DCHECK(first_op != nullptr);
+ DCHECK_NOT_NULL(first_op);
SpillRange* first_op_spill = first_op->TopLevel()->GetSpillRange();
size_t num_merged = 1;
for (size_t i = 1; i < phi->operands().size(); i++) {
@@ -3838,7 +3838,7 @@ int LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
gap_index = block->first_instruction_index();
position = Instruction::START;
} else {
- DCHECK(pred->SuccessorCount() == 1);
+ DCHECK_EQ(1, pred->SuccessorCount());
DCHECK(!code()
->InstructionAt(pred->last_instruction_index())
->HasReferenceMap());
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index 308bdfe3a3..63e94fbdc8 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -106,7 +106,7 @@ class LifetimePosition final {
// Returns the lifetime position for the beginning of the previous START.
LifetimePosition PrevStart() const {
DCHECK(IsValid());
- DCHECK(value_ >= kHalfStep);
+ DCHECK_LE(kHalfStep, value_);
return LifetimePosition(Start().value_ - kHalfStep);
}
@@ -531,17 +531,17 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
}
SpillType spill_type() const { return SpillTypeField::decode(bits_); }
InstructionOperand* GetSpillOperand() const {
- DCHECK(spill_type() == SpillType::kSpillOperand);
+ DCHECK_EQ(SpillType::kSpillOperand, spill_type());
return spill_operand_;
}
SpillRange* GetAllocatedSpillRange() const {
- DCHECK(spill_type() != SpillType::kSpillOperand);
+ DCHECK_NE(SpillType::kSpillOperand, spill_type());
return spill_range_;
}
SpillRange* GetSpillRange() const {
- DCHECK(spill_type() == SpillType::kSpillRange);
+ DCHECK_EQ(SpillType::kSpillRange, spill_type());
return spill_range_;
}
bool HasNoSpillType() const {
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index b21bf4cf61..321b9c6687 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -10,7 +10,7 @@
#include "src/code-factory.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
-#include "src/objects-inl.h"
+#include "src/factory-inl.h"
namespace v8 {
namespace internal {
@@ -172,18 +172,18 @@ Node* RepresentationChanger::GetRepresentationFor(
return GetTaggedPointerRepresentationFor(node, output_rep, output_type,
use_node, use_info);
case MachineRepresentation::kTagged:
- DCHECK(use_info.type_check() == TypeCheckKind::kNone);
+ DCHECK_EQ(TypeCheckKind::kNone, use_info.type_check());
return GetTaggedRepresentationFor(node, output_rep, output_type,
use_info.truncation());
case MachineRepresentation::kFloat32:
- DCHECK(use_info.type_check() == TypeCheckKind::kNone);
+ DCHECK_EQ(TypeCheckKind::kNone, use_info.type_check());
return GetFloat32RepresentationFor(node, output_rep, output_type,
use_info.truncation());
case MachineRepresentation::kFloat64:
return GetFloat64RepresentationFor(node, output_rep, output_type,
use_node, use_info);
case MachineRepresentation::kBit:
- DCHECK(use_info.type_check() == TypeCheckKind::kNone);
+ DCHECK_EQ(TypeCheckKind::kNone, use_info.type_check());
return GetBitRepresentationFor(node, output_rep, output_type);
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
@@ -191,7 +191,7 @@ Node* RepresentationChanger::GetRepresentationFor(
return GetWord32RepresentationFor(node, output_rep, output_type, use_node,
use_info);
case MachineRepresentation::kWord64:
- DCHECK(use_info.type_check() == TypeCheckKind::kNone);
+ DCHECK_EQ(TypeCheckKind::kNone, use_info.type_check());
return GetWord64RepresentationFor(node, output_rep, output_type);
case MachineRepresentation::kSimd128:
case MachineRepresentation::kNone:
@@ -722,7 +722,7 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
}
} else if (output_rep == MachineRepresentation::kWord8 ||
output_rep == MachineRepresentation::kWord16) {
- DCHECK(use_info.representation() == MachineRepresentation::kWord32);
+ DCHECK_EQ(MachineRepresentation::kWord32, use_info.representation());
DCHECK(use_info.type_check() == TypeCheckKind::kSignedSmall ||
use_info.type_check() == TypeCheckKind::kSigned32);
return node;
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index a6be44cc52..52a3e75c8a 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -201,7 +201,7 @@ class UseInfo {
Truncation::Any(identify_zeros), TypeCheckKind::kSigned32);
}
static UseInfo CheckedNumberAsFloat64() {
- return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64(),
+ return UseInfo(MachineRepresentation::kFloat64, Truncation::Any(),
TypeCheckKind::kNumber);
}
static UseInfo CheckedNumberAsWord32() {
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index 1ad427cca4..8aeab0ac0d 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -4,6 +4,8 @@
#include "src/compiler/code-generator.h"
+#include "src/assembler-inl.h"
+#include "src/callable.h"
#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
@@ -223,6 +225,28 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
+ void SaveRegisters(RegList registers) {
+ DCHECK_LT(0, NumRegs(registers));
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
+ }
+ __ MultiPush(regs | r14.bit());
+ }
+
+ void RestoreRegisters(RegList registers) {
+ DCHECK_LT(0, NumRegs(registers));
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
+ }
+ __ MultiPop(regs | r14.bit());
+ }
+
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
@@ -230,6 +254,12 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
+ if (offset_ == no_reg) {
+ __ AddP(scratch1_, object_, Operand(offset_immediate_));
+ } else {
+ DCHECK_EQ(0, offset_immediate_);
+ __ AddP(scratch1_, object_, offset_);
+ }
RememberedSetAction const remembered_set_action =
mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
: OMIT_REMEMBERED_SET;
@@ -239,15 +269,14 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore r14 if the frame was elided.
__ Push(r14);
}
- if (offset_.is(no_reg)) {
- __ AddP(scratch1_, object_, Operand(offset_immediate_));
- } else {
- DCHECK_EQ(0, offset_immediate_);
- __ AddP(scratch1_, object_, offset_);
- }
+#ifdef V8_CSA_WRITE_BARRIER
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+#else
__ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
+#endif
if (must_save_lr_) {
// We need to save and restore r14 if the frame was elided.
__ Pop(r14);
@@ -257,7 +286,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
private:
Register const object_;
Register const offset_;
- int32_t const offset_immediate_; // Valid if offset_.is(no_reg).
+ int32_t const offset_immediate_; // Valid if offset_ == no_reg.
Register const value_;
Register const scratch0_;
Register const scratch1_;
@@ -363,24 +392,24 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
return mem; \
})(ret)
-#define RRInstr(instr) \
- [&]() { \
- DCHECK(i.OutputRegister().is(i.InputRegister(0))); \
- __ instr(i.OutputRegister(), i.InputRegister(1)); \
- return 2; \
+#define RRInstr(instr) \
+ [&]() { \
+ DCHECK(i.OutputRegister() == i.InputRegister(0)); \
+ __ instr(i.OutputRegister(), i.InputRegister(1)); \
+ return 2; \
}
#define RIInstr(instr) \
[&]() { \
- DCHECK(i.OutputRegister().is(i.InputRegister(0))); \
+ DCHECK(i.OutputRegister() == i.InputRegister(0)); \
__ instr(i.OutputRegister(), i.InputImmediate(1)); \
return 2; \
}
-#define RMInstr(instr, GETMEM) \
- [&]() { \
- DCHECK(i.OutputRegister().is(i.InputRegister(0))); \
- int ret = 2; \
- __ instr(i.OutputRegister(), GETMEM(ret, 1)); \
- return ret; \
+#define RMInstr(instr, GETMEM) \
+ [&]() { \
+ DCHECK(i.OutputRegister() == i.InputRegister(0)); \
+ int ret = 2; \
+ __ instr(i.OutputRegister(), GETMEM(ret, 1)); \
+ return ret; \
}
#define RM32Instr(instr) RMInstr(instr, GET_MEMOPERAND32)
#define RM64Instr(instr) RMInstr(instr, GET_MEMOPERAND)
@@ -404,28 +433,28 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
#define RRM32Instr(instr) RRMInstr(instr, GET_MEMOPERAND32)
#define RRM64Instr(instr) RRMInstr(instr, GET_MEMOPERAND)
-#define DDInstr(instr) \
- [&]() { \
- DCHECK(i.OutputDoubleRegister().is(i.InputDoubleRegister(0))); \
- __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
- return 2; \
+#define DDInstr(instr) \
+ [&]() { \
+ DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
+ __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
+ return 2; \
}
-#define DMInstr(instr) \
- [&]() { \
- DCHECK(i.OutputDoubleRegister().is(i.InputDoubleRegister(0))); \
- int ret = 2; \
- __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1)); \
- return ret; \
+#define DMInstr(instr) \
+ [&]() { \
+ DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
+ int ret = 2; \
+ __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1)); \
+ return ret; \
}
-#define DMTInstr(instr) \
- [&]() { \
- DCHECK(i.OutputDoubleRegister().is(i.InputDoubleRegister(0))); \
- int ret = 2; \
- __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1), \
- kScratchDoubleReg); \
- return ret; \
+#define DMTInstr(instr) \
+ [&]() { \
+ DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
+ int ret = 2; \
+ __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1), \
+ kScratchDoubleReg); \
+ return ret; \
}
#define R_MInstr(instr) \
@@ -689,13 +718,13 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ bunordered(&return_left, Label::kNear); \
\
__ bind(&return_right); \
- if (!right_reg.is(result_reg)) { \
+ if (right_reg != result_reg) { \
__ ldr(result_reg, right_reg); \
} \
__ b(&done, Label::kNear); \
\
__ bind(&return_left); \
- if (!left_reg.is(result_reg)) { \
+ if (left_reg != result_reg) { \
__ ldr(result_reg, left_reg); \
} \
__ bind(&done); \
@@ -723,7 +752,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
/* For min we want logical-or of sign bit: -(-L + -R) */ \
__ lcdbr(left_reg, left_reg); \
__ ldr(result_reg, left_reg); \
- if (left_reg.is(right_reg)) { \
+ if (left_reg == right_reg) { \
__ adbr(result_reg, right_reg); \
} else { \
__ sdbr(result_reg, right_reg); \
@@ -737,13 +766,13 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ bunordered(&return_left, Label::kNear); \
\
__ bind(&return_right); \
- if (!right_reg.is(result_reg)) { \
+ if (right_reg != result_reg) { \
__ ldr(result_reg, right_reg); \
} \
__ b(&done, Label::kNear); \
\
__ bind(&return_left); \
- if (!left_reg.is(result_reg)) { \
+ if (left_reg != result_reg) { \
__ ldr(result_reg, left_reg); \
} \
__ bind(&done); \
@@ -779,13 +808,13 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ bunordered(&return_left, Label::kNear); \
\
__ bind(&return_right); \
- if (!right_reg.is(result_reg)) { \
+ if (right_reg != result_reg) { \
__ ldr(result_reg, right_reg); \
} \
__ b(&done, Label::kNear); \
\
__ bind(&return_left); \
- if (!left_reg.is(result_reg)) { \
+ if (left_reg != result_reg) { \
__ ldr(result_reg, left_reg); \
} \
__ bind(&done); \
@@ -813,7 +842,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
/* For min we want logical-or of sign bit: -(-L + -R) */ \
__ lcebr(left_reg, left_reg); \
__ ldr(result_reg, left_reg); \
- if (left_reg.is(right_reg)) { \
+ if (left_reg == right_reg) { \
__ aebr(result_reg, right_reg); \
} else { \
__ sebr(result_reg, right_reg); \
@@ -827,13 +856,13 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ bunordered(&return_left, Label::kNear); \
\
__ bind(&return_right); \
- if (!right_reg.is(result_reg)) { \
+ if (right_reg != result_reg) { \
__ ldr(result_reg, right_reg); \
} \
__ b(&done, Label::kNear); \
\
__ bind(&return_left); \
- if (!left_reg.is(result_reg)) { \
+ if (left_reg != result_reg) { \
__ ldr(result_reg, left_reg); \
} \
__ bind(&done); \
@@ -1071,7 +1100,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm,
break;
}
frame_access_state->IncreaseSPDelta(pending_pushes->size());
- pending_pushes->resize(0);
+ pending_pushes->clear();
}
void AdjustStackPointerForTailCall(
@@ -1140,6 +1169,28 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check if the code object is marked for deoptimization. If it is, then it
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. load the address of the current instruction;
+// 2. read from memory the word that contains that bit, which can be found in
+// the first set of flags ({kKindSpecificFlags1Offset});
+// 3. test kMarkedForDeoptimizationBit in those flags; and
+// 4. if it is not zero then it jumps to the builtin.
+void CodeGenerator::BailoutIfDeoptimized() {
+ Label current;
+ __ larl(r1, &current);
+ int pc_offset = __ pc_offset();
+ __ bind(&current);
+ int offset =
+ Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc_offset);
+ __ LoadW(ip, MemOperand(r1, offset));
+ __ TestBit(ip, Code::kMarkedForDeoptimizationBit);
+ Handle<Code> code = isolate()->builtins()->builtin_handle(
+ Builtins::kCompileLazyDeoptimizedCode);
+ __ Jump(code, RelocInfo::CODE_TARGET, ne);
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -1216,13 +1267,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchSaveCallerRegisters: {
+ fp_mode_ =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// kReturnRegister0 should have been saved before entering the stub.
- __ PushCallerSaved(kSaveFPRegs, kReturnRegister0);
+ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
+ DCHECK_EQ(0, bytes % kPointerSize);
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ DCHECK(!caller_registers_saved_);
+ caller_registers_saved_ = true;
break;
}
case kArchRestoreCallerRegisters: {
+ DCHECK(fp_mode_ ==
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// Don't overwrite the returned value.
- __ PopCallerSaved(kSaveFPRegs, kReturnRegister0);
+ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ DCHECK(caller_registers_saved_);
+ caller_registers_saved_ = false;
break;
}
case kArchPrepareTailCall:
@@ -1238,7 +1304,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CallCFunction(func, num_parameters);
}
frame_access_state()->SetFrameAccessToDefault();
+ // Ideally, we should decrement SP delta to match the change of stack
+ // pointer in CallCFunction. However, for certain architectures (e.g.
+ // ARM), there may be more strict alignment requirement, causing old SP
+ // to be saved on the stack. In those cases, we can not calculate the SP
+ // delta statically.
frame_access_state()->ClearSPDelta();
+ if (caller_registers_saved_) {
+ // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
+ // Here, we assume the sequence to be:
+ // kArchSaveCallerRegisters;
+ // kArchCallCFunction;
+ // kArchRestoreCallerRegisters;
+ int bytes =
+ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ }
break;
}
case kArchJmp:
@@ -1251,7 +1332,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchTableSwitch(instr);
break;
case kArchDebugAbort:
- DCHECK(i.InputRegister(0).is(r3));
+ DCHECK(i.InputRegister(0) == r3);
if (!frame_access_state()->has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
@@ -1901,7 +1982,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (op->representation() == MachineRepresentation::kFloat64) {
__ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
} else {
- DCHECK(op->representation() == MachineRepresentation::kFloat32);
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
__ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
}
} else {
@@ -1918,7 +1999,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ StoreDouble(i.InputDoubleRegister(0),
MemOperand(sp, slot * kPointerSize));
} else {
- DCHECK(op->representation() == MachineRepresentation::kFloat32);
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
__ StoreFloat32(i.InputDoubleRegister(0),
MemOperand(sp, slot * kPointerSize));
}
@@ -2441,6 +2522,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
+void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
+ BranchInfo* branch) {
+ AssembleArchBranch(instr, branch);
+}
+
void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
}
@@ -2483,6 +2569,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
+ CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ __ Drop(pop_count);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
@@ -2587,8 +2676,8 @@ void CodeGenerator::FinishFrame(Frame* frame) {
// Save callee-saved Double registers.
if (double_saves != 0) {
frame->AlignSavedCalleeRegisterSlots();
- DCHECK(kNumCalleeSavedDoubles ==
- base::bits::CountPopulation32(double_saves));
+ DCHECK_EQ(kNumCalleeSavedDoubles,
+ base::bits::CountPopulation32(double_saves));
frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
(kDoubleSize / kPointerSize));
}
@@ -2645,8 +2734,8 @@ void CodeGenerator::AssembleConstructFrame() {
// Save callee-saved Double registers.
if (double_saves != 0) {
__ MultiPushDoubles(double_saves);
- DCHECK(kNumCalleeSavedDoubles ==
- base::bits::CountPopulation32(double_saves));
+ DCHECK_EQ(kNumCalleeSavedDoubles,
+ base::bits::CountPopulation32(double_saves));
}
// Save callee-saved registers.
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index 3ed0a34c72..d8c7c64d83 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -221,7 +221,7 @@ class S390OperandGenerator final : public OperandGenerator {
DCHECK(m.matches());
if ((m.displacement() == nullptr ||
CanBeImmediate(m.displacement(), immediate_mode))) {
- DCHECK(m.scale() == 0);
+ DCHECK_EQ(0, m.scale());
return GenerateMemoryOperandInputs(m.index(), m.base(), m.displacement(),
m.displacement_mode(), inputs,
input_count);
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index aa15fb0be0..2ec3f11be7 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -35,8 +35,8 @@ BasicBlock::BasicBlock(Zone* zone, Id id)
bool BasicBlock::LoopContains(BasicBlock* block) const {
// RPO numbers must be initialized.
- DCHECK(rpo_number_ >= 0);
- DCHECK(block->rpo_number_ >= 0);
+ DCHECK_LE(0, rpo_number_);
+ DCHECK_LE(0, block->rpo_number_);
if (loop_end_ == nullptr) return false; // This is not a loop.
return block->rpo_number_ >= rpo_number_ &&
block->rpo_number_ < loop_end_->rpo_number_;
@@ -198,7 +198,7 @@ void Schedule::PlanNode(BasicBlock* block, Node* node) {
os << "Planning #" << node->id() << ":" << node->op()->mnemonic()
<< " for future add to B" << block->id() << "\n";
}
- DCHECK(this->block(node) == nullptr);
+ DCHECK_NULL(this->block(node));
SetBlockForNode(block, node);
}
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index 8ad8d02779..91e4e02dd3 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -76,24 +76,28 @@ Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
return &node_data_[node->id()];
}
-
-Scheduler::Placement Scheduler::GetPlacement(Node* node) {
+Scheduler::Placement Scheduler::InitializePlacement(Node* node) {
SchedulerData* data = GetData(node);
- if (data->placement_ == kUnknown) { // Compute placement, once, on demand.
- switch (node->opcode()) {
- case IrOpcode::kParameter:
- case IrOpcode::kOsrValue:
- // Parameters and OSR values are always fixed to the start block.
- data->placement_ = kFixed;
- break;
- case IrOpcode::kPhi:
- case IrOpcode::kEffectPhi: {
- // Phis and effect phis are fixed if their control inputs are, whereas
- // otherwise they are coupled to a floating control node.
- Placement p = GetPlacement(NodeProperties::GetControlInput(node));
- data->placement_ = (p == kFixed ? kFixed : kCoupled);
- break;
- }
+ if (data->placement_ == kFixed) {
+ // Nothing to do for control nodes that have been already fixed in
+ // the schedule.
+ return data->placement_;
+ }
+ DCHECK_EQ(kUnknown, data->placement_);
+ switch (node->opcode()) {
+ case IrOpcode::kParameter:
+ case IrOpcode::kOsrValue:
+ // Parameters and OSR values are always fixed to the start block.
+ data->placement_ = kFixed;
+ break;
+ case IrOpcode::kPhi:
+ case IrOpcode::kEffectPhi: {
+ // Phis and effect phis are fixed if their control inputs are, whereas
+ // otherwise they are coupled to a floating control node.
+ Placement p = GetPlacement(NodeProperties::GetControlInput(node));
+ data->placement_ = (p == kFixed ? kFixed : kCoupled);
+ break;
+ }
#define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
#undef DEFINE_CONTROL_CASE
@@ -101,34 +105,46 @@ Scheduler::Placement Scheduler::GetPlacement(Node* node) {
// Control nodes that were not control-reachable from end may float.
data->placement_ = kSchedulable;
break;
- }
- default:
- data->placement_ = kSchedulable;
- break;
}
+ default:
+ data->placement_ = kSchedulable;
+ break;
}
return data->placement_;
}
+Scheduler::Placement Scheduler::GetPlacement(Node* node) {
+ return GetData(node)->placement_;
+}
+
+bool Scheduler::IsLive(Node* node) { return GetPlacement(node) != kUnknown; }
void Scheduler::UpdatePlacement(Node* node, Placement placement) {
SchedulerData* data = GetData(node);
- if (data->placement_ != kUnknown) { // Trap on mutation, not initialization.
- switch (node->opcode()) {
- case IrOpcode::kParameter:
- // Parameters are fixed once and for all.
- UNREACHABLE();
- break;
- case IrOpcode::kPhi:
- case IrOpcode::kEffectPhi: {
- // Phis and effect phis are coupled to their respective blocks.
- DCHECK_EQ(Scheduler::kCoupled, data->placement_);
- DCHECK_EQ(Scheduler::kFixed, placement);
- Node* control = NodeProperties::GetControlInput(node);
- BasicBlock* block = schedule_->block(control);
- schedule_->AddNode(block, node);
- break;
- }
+ if (data->placement_ == kUnknown) {
+ // We only update control nodes from {kUnknown} to {kFixed}. Ideally, we
+ // should check that {node} is a control node (including exceptional calls),
+ // but that is expensive.
+ DCHECK_EQ(Scheduler::kFixed, placement);
+ data->placement_ = placement;
+ return;
+ }
+
+ switch (node->opcode()) {
+ case IrOpcode::kParameter:
+ // Parameters are fixed once and for all.
+ UNREACHABLE();
+ break;
+ case IrOpcode::kPhi:
+ case IrOpcode::kEffectPhi: {
+ // Phis and effect phis are coupled to their respective blocks.
+ DCHECK_EQ(Scheduler::kCoupled, data->placement_);
+ DCHECK_EQ(Scheduler::kFixed, placement);
+ Node* control = NodeProperties::GetControlInput(node);
+ BasicBlock* block = schedule_->block(control);
+ schedule_->AddNode(block, node);
+ break;
+ }
#define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
#undef DEFINE_CONTROL_CASE
@@ -139,20 +155,19 @@ void Scheduler::UpdatePlacement(Node* node, Placement placement) {
DCHECK_EQ(node, NodeProperties::GetControlInput(use));
UpdatePlacement(use, placement);
}
- }
- break;
}
- default:
- DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
- DCHECK_EQ(Scheduler::kScheduled, placement);
- break;
- }
- // Reduce the use count of the node's inputs to potentially make them
- // schedulable. If all the uses of a node have been scheduled, then the node
- // itself can be scheduled.
- for (Edge const edge : node->input_edges()) {
- DecrementUnscheduledUseCount(edge.to(), edge.index(), edge.from());
+ break;
}
+ default:
+ DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
+ DCHECK_EQ(Scheduler::kScheduled, placement);
+ break;
+ }
+ // Reduce the use count of the node's inputs to potentially make them
+ // schedulable. If all the uses of a node have been scheduled, then the node
+ // itself can be scheduled.
+ for (Edge const edge : node->input_edges()) {
+ DecrementUnscheduledUseCount(edge.to(), edge.index(), edge.from());
}
data->placement_ = placement;
}
@@ -201,7 +216,7 @@ void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
return DecrementUnscheduledUseCount(control, index, from);
}
- DCHECK(GetData(node)->unscheduled_count_ > 0);
+ DCHECK_LT(0, GetData(node)->unscheduled_count_);
--(GetData(node)->unscheduled_count_);
if (FLAG_trace_turbo_scheduler) {
TRACE(" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
@@ -636,7 +651,7 @@ class SpecialRPONumberer : public ZoneObject {
// Computes the special reverse-post-order for the main control flow graph,
// that is for the graph spanned between the schedule's start and end blocks.
void ComputeSpecialRPO() {
- DCHECK(schedule_->end()->SuccessorCount() == 0);
+ DCHECK_EQ(0, schedule_->end()->SuccessorCount());
DCHECK(!order_); // Main order does not exist yet.
ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end());
}
@@ -781,7 +796,7 @@ class SpecialRPONumberer : public ZoneObject {
}
} else {
// Push the successor onto the stack.
- DCHECK(succ->rpo_number() == kBlockUnvisited1);
+ DCHECK_EQ(kBlockUnvisited1, succ->rpo_number());
stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1);
}
} else {
@@ -847,7 +862,7 @@ class SpecialRPONumberer : public ZoneObject {
// Process the next successor.
if (succ->rpo_number() == kBlockOnStack) continue;
if (succ->rpo_number() == kBlockVisited2) continue;
- DCHECK(succ->rpo_number() == kBlockUnvisited2);
+ DCHECK_EQ(kBlockUnvisited2, succ->rpo_number());
if (loop != nullptr && !loop->members->Contains(succ->id().ToInt())) {
// The successor is not in the current loop or any nested loop.
// Add it to the outgoing edges of this loop and visit it later.
@@ -1028,8 +1043,8 @@ class SpecialRPONumberer : public ZoneObject {
void VerifySpecialRPO() {
BasicBlockVector* order = schedule_->rpo_order();
- DCHECK(order->size() > 0);
- DCHECK((*order)[0]->id().ToInt() == 0); // entry should be first.
+ DCHECK_LT(0, order->size());
+ DCHECK_EQ(0, (*order)[0]->id().ToInt()); // entry should be first.
for (size_t i = 0; i < loops_.size(); i++) {
LoopInfo* loop = &loops_[i];
@@ -1037,12 +1052,12 @@ class SpecialRPONumberer : public ZoneObject {
BasicBlock* end = header->loop_end();
DCHECK_NOT_NULL(header);
- DCHECK(header->rpo_number() >= 0);
- DCHECK(header->rpo_number() < static_cast<int>(order->size()));
+ DCHECK_LE(0, header->rpo_number());
+ DCHECK_LT(header->rpo_number(), order->size());
DCHECK_NOT_NULL(end);
- DCHECK(end->rpo_number() <= static_cast<int>(order->size()));
- DCHECK(end->rpo_number() > header->rpo_number());
- DCHECK(header->loop_header() != header);
+ DCHECK_LE(end->rpo_number(), order->size());
+ DCHECK_GT(end->rpo_number(), header->rpo_number());
+ DCHECK_NE(header->loop_header(), header);
// Verify the start ... end list relationship.
int links = 0;
@@ -1060,8 +1075,8 @@ class SpecialRPONumberer : public ZoneObject {
block = block->rpo_next();
DCHECK_LT(links, static_cast<int>(2 * order->size())); // cycle?
}
- DCHECK(links > 0);
- DCHECK(links == end->rpo_number() - header->rpo_number());
+ DCHECK_LT(0, links);
+ DCHECK_EQ(links, end->rpo_number() - header->rpo_number());
DCHECK(end_found);
// Check loop depth of the header.
@@ -1075,7 +1090,7 @@ class SpecialRPONumberer : public ZoneObject {
int count = 0;
for (int j = 0; j < static_cast<int>(order->size()); j++) {
BasicBlock* block = order->at(j);
- DCHECK(block->rpo_number() == j);
+ DCHECK_EQ(block->rpo_number(), j);
if (j < header->rpo_number() || j >= end->rpo_number()) {
DCHECK(!header->LoopContains(block));
} else {
@@ -1084,7 +1099,7 @@ class SpecialRPONumberer : public ZoneObject {
count++;
}
}
- DCHECK(links == count);
+ DCHECK_EQ(links, count);
}
}
#endif // DEBUG
@@ -1165,7 +1180,7 @@ class PrepareUsesVisitor {
: scheduler_(scheduler), schedule_(scheduler->schedule_) {}
void Pre(Node* node) {
- if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
+ if (scheduler_->InitializePlacement(node) == Scheduler::kFixed) {
// Fixed nodes are always roots for schedule late.
scheduler_->schedule_root_nodes_.push_back(node);
if (!schedule_->IsScheduled(node)) {
@@ -1269,7 +1284,9 @@ class ScheduleEarlyNodeVisitor {
// Propagate schedule early position.
DCHECK_NOT_NULL(data->minimum_block_);
for (auto use : node->uses()) {
- PropagateMinimumPositionToNode(data->minimum_block_, use);
+ if (scheduler_->IsLive(use)) {
+ PropagateMinimumPositionToNode(data->minimum_block_, use);
+ }
}
}
@@ -1455,6 +1472,7 @@ class ScheduleLateNodeVisitor {
// Check if the {node} has uses in {block}.
for (Edge edge : node->use_edges()) {
+ if (!scheduler_->IsLive(edge.from())) continue;
BasicBlock* use_block = GetBlockForUse(edge);
if (use_block == nullptr || marked_[use_block->id().ToSize()]) continue;
if (use_block == block) {
@@ -1497,6 +1515,7 @@ class ScheduleLateNodeVisitor {
// the {node} itself.
ZoneMap<BasicBlock*, Node*> dominators(scheduler_->zone_);
for (Edge edge : node->use_edges()) {
+ if (!scheduler_->IsLive(edge.from())) continue;
BasicBlock* use_block = GetBlockForUse(edge);
if (use_block == nullptr) continue;
while (marked_[use_block->dominator()->id().ToSize()]) {
@@ -1545,6 +1564,7 @@ class ScheduleLateNodeVisitor {
BasicBlock* GetCommonDominatorOfUses(Node* node) {
BasicBlock* block = nullptr;
for (Edge edge : node->use_edges()) {
+ if (!scheduler_->IsLive(edge.from())) continue;
BasicBlock* use_block = GetBlockForUse(edge);
block = block == nullptr
? use_block
@@ -1735,7 +1755,9 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
NodeVector propagation_roots(control_flow_builder_->control_);
for (Node* node : control_flow_builder_->control_) {
for (Node* use : node->uses()) {
- if (NodeProperties::IsPhi(use)) propagation_roots.push_back(use);
+ if (NodeProperties::IsPhi(use) && IsLive(use)) {
+ propagation_roots.push_back(use);
+ }
}
}
if (FLAG_trace_turbo_scheduler) {
diff --git a/deps/v8/src/compiler/scheduler.h b/deps/v8/src/compiler/scheduler.h
index 4d297e1756..40f9d56c42 100644
--- a/deps/v8/src/compiler/scheduler.h
+++ b/deps/v8/src/compiler/scheduler.h
@@ -49,8 +49,13 @@ class V8_EXPORT_PRIVATE Scheduler {
// \ /
// +----> kSchedulable ----+--------> kScheduled
//
- // 1) GetPlacement(): kUnknown -> kCoupled|kSchedulable|kFixed
+ // 1) InitializePlacement(): kUnknown -> kCoupled|kSchedulable|kFixed
// 2) UpdatePlacement(): kCoupled|kSchedulable -> kFixed|kScheduled
+ //
+ // We maintain the invariant that all nodes that are not reachable
+ // from the end have kUnknown placement. After the PrepareUses phase runs,
+ // also the opposite is true - all nodes with kUnknown placement are not
+ // reachable from the end.
enum Placement { kUnknown, kSchedulable, kFixed, kCoupled, kScheduled };
// Per-node data tracked during scheduling.
@@ -81,7 +86,9 @@ class V8_EXPORT_PRIVATE Scheduler {
inline SchedulerData* GetData(Node* node);
Placement GetPlacement(Node* node);
+ Placement InitializePlacement(Node* node);
void UpdatePlacement(Node* node, Placement placement);
+ bool IsLive(Node* node);
inline bool IsCoupledControlEdge(Node* node, int index);
void IncrementUnscheduledUseCount(Node* node, int index, Node* from);
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 198dc43ed9..582fbd6424 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -3,15 +3,13 @@
// found in the LICENSE file.
#include "src/compiler/simd-scalar-lowering.h"
+
#include "src/compiler/diamond.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
-
#include "src/compiler/node.h"
#include "src/compiler/wasm-compiler.h"
-#include "src/objects-inl.h"
-#include "src/wasm/wasm-module.h"
namespace v8 {
namespace internal {
@@ -327,7 +325,7 @@ void SimdScalarLowering::LowerLoadOp(MachineRepresentation rep, Node* node,
rep_nodes[0]->ReplaceInput(1, indices[0]);
NodeProperties::ChangeOp(rep_nodes[0], load_op);
if (node->InputCount() > 2) {
- DCHECK(node->InputCount() > 3);
+ DCHECK_LT(3, node->InputCount());
Node* effect_input = node->InputAt(2);
Node* control_input = node->InputAt(3);
for (int i = num_lanes - 1; i > 0; --i) {
@@ -356,7 +354,7 @@ void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
int num_lanes = NumLanes(rep_type);
Node** indices = zone()->NewArray<Node*>(num_lanes);
GetIndexNodes(index, indices, rep_type);
- DCHECK(node->InputCount() > 2);
+ DCHECK_LT(2, node->InputCount());
Node* value = node->InputAt(2);
DCHECK(HasReplacement(1, value));
Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
@@ -366,7 +364,7 @@ void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
rep_nodes[0]->ReplaceInput(1, indices[0]);
NodeProperties::ChangeOp(node, store_op);
if (node->InputCount() > 3) {
- DCHECK(node->InputCount() > 4);
+ DCHECK_LT(4, node->InputCount());
Node* effect_input = node->InputAt(3);
Node* control_input = node->InputAt(4);
for (int i = num_lanes - 1; i > 0; --i) {
@@ -390,7 +388,7 @@ void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType input_rep_type,
const Operator* op) {
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
int num_lanes = NumLanes(input_rep_type);
@@ -404,7 +402,7 @@ void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType input_rep_type,
void SimdScalarLowering::LowerCompareOp(Node* node, SimdType input_rep_type,
const Operator* op,
bool invert_inputs) {
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
int num_lanes = NumLanes(input_rep_type);
@@ -439,7 +437,7 @@ Node* SimdScalarLowering::FixUpperBits(Node* input, int32_t shift) {
void SimdScalarLowering::LowerBinaryOpForSmallInt(Node* node,
SimdType input_rep_type,
const Operator* op) {
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
DCHECK(input_rep_type == SimdType::kInt16x8 ||
input_rep_type == SimdType::kInt8x16);
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
@@ -464,7 +462,7 @@ void SimdScalarLowering::LowerSaturateBinaryOp(Node* node,
SimdType input_rep_type,
const Operator* op,
bool is_signed) {
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
DCHECK(input_rep_type == SimdType::kInt16x8 ||
input_rep_type == SimdType::kInt8x16);
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
@@ -520,7 +518,7 @@ void SimdScalarLowering::LowerSaturateBinaryOp(Node* node,
void SimdScalarLowering::LowerUnaryOp(Node* node, SimdType input_rep_type,
const Operator* op) {
- DCHECK(node->InputCount() == 1);
+ DCHECK_EQ(1, node->InputCount());
Node** rep = GetReplacementsWithType(node->InputAt(0), input_rep_type);
int num_lanes = NumLanes(input_rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
@@ -532,7 +530,7 @@ void SimdScalarLowering::LowerUnaryOp(Node* node, SimdType input_rep_type,
void SimdScalarLowering::LowerIntMinMax(Node* node, const Operator* op,
bool is_max, SimdType type) {
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
Node** rep_left = GetReplacementsWithType(node->InputAt(0), type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), type);
int num_lanes = NumLanes(type);
@@ -590,7 +588,7 @@ Node* SimdScalarLowering::BuildF64Trunc(Node* input) {
}
void SimdScalarLowering::LowerConvertFromFloat(Node* node, bool is_signed) {
- DCHECK(node->InputCount() == 1);
+ DCHECK_EQ(1, node->InputCount());
Node** rep = GetReplacementsWithType(node->InputAt(0), SimdType::kFloat32x4);
Node* rep_node[kNumLanes32];
Node* double_zero = graph()->NewNode(common()->Float64Constant(0.0));
@@ -672,7 +670,7 @@ void SimdScalarLowering::LowerShiftOp(Node* node, SimdType type) {
void SimdScalarLowering::LowerNotEqual(Node* node, SimdType input_rep_type,
const Operator* op) {
- DCHECK(node->InputCount() == 2);
+ DCHECK_EQ(2, node->InputCount());
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
int num_lanes = NumLanes(input_rep_type);
@@ -706,7 +704,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kParameter: {
- DCHECK(node->InputCount() == 1);
+ DCHECK_EQ(1, node->InputCount());
// Only exchange the node if the parameter count actually changed. We do
// not even have to do the default lowering because the the start node,
// the only input of a parameter node, only changes if the parameter count
@@ -895,7 +893,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
case IrOpcode::kI32x4Neg:
case IrOpcode::kI16x8Neg:
case IrOpcode::kI8x16Neg: {
- DCHECK(node->InputCount() == 1);
+ DCHECK_EQ(1, node->InputCount());
Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
int num_lanes = NumLanes(rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
@@ -912,7 +910,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kS128Not: {
- DCHECK(node->InputCount() == 1);
+ DCHECK_EQ(1, node->InputCount());
Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
Node* rep_node[kNumLanes32];
Node* mask = graph()->NewNode(common()->Int32Constant(0xffffffff));
@@ -1068,7 +1066,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kS128Select: {
- DCHECK(node->InputCount() == 3);
+ DCHECK_EQ(3, node->InputCount());
DCHECK(ReplacementType(node->InputAt(0)) == SimdType::kInt32x4 ||
ReplacementType(node->InputAt(0)) == SimdType::kInt16x8 ||
ReplacementType(node->InputAt(0)) == SimdType::kInt8x16);
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index ec763442a5..28634b8c9f 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -192,7 +192,7 @@ class InputUseInfos {
}
// Check that the new use informatin is a super-type of the old
// one.
- CHECK(IsUseLessGeneral(input_use_infos_[index], use_info));
+ DCHECK(IsUseLessGeneral(input_use_infos_[index], use_info));
input_use_infos_[index] = use_info;
}
@@ -1262,78 +1262,77 @@ class RepresentationSelector {
void VisitSpeculativeIntegerAdditiveOp(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
+ Type* left_upper = GetUpperBound(node->InputAt(0));
+ Type* right_upper = GetUpperBound(node->InputAt(1));
// Only eliminate eliminate the node if the ToNumber conversion cannot
// cause any observable side-effect and if we know for sure that it
// is a number addition (we must exclude strings).
- if (BothInputsAre(node, Type::NumberOrOddball())) {
+ if (left_upper->Is(Type::NumberOrOddball()) &&
+ right_upper->Is(Type::NumberOrOddball())) {
if (truncation.IsUnused()) return VisitUnused(node);
}
- if (BothInputsAre(node, type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
- (GetUpperBound(node)->Is(Type::Signed32()) ||
- GetUpperBound(node)->Is(Type::Unsigned32()) ||
- truncation.IsUsedAsWord32())) {
- // => Int32Add/Sub
- VisitWord32TruncatingBinop(node);
- if (lower()) ChangeToPureOp(node, Int32Op(node));
- return;
+ if (left_upper->Is(type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
+ right_upper->Is(type_cache_.kAdditiveSafeIntegerOrMinusZero)) {
+ // If we know how to interpret the result or if the users only care
+ // about the low 32-bits, we can truncate to Word32 do a wrapping
+ // addition.
+ if (GetUpperBound(node)->Is(Type::Signed32()) ||
+ GetUpperBound(node)->Is(Type::Unsigned32()) ||
+ truncation.IsUsedAsWord32()) {
+ // => Int32Add/Sub
+ VisitWord32TruncatingBinop(node);
+ if (lower()) ChangeToPureOp(node, Int32Op(node));
+ return;
+ }
}
// Try to use type feedback.
NumberOperationHint hint = NumberOperationHintOf(node->op());
- if (hint == NumberOperationHint::kSignedSmall ||
- hint == NumberOperationHint::kSigned32) {
- Type* left_feedback_type = TypeOf(node->InputAt(0));
- Type* right_feedback_type = TypeOf(node->InputAt(1));
- // Handle the case when no int32 checks on inputs are necessary (but
- // an overflow check is needed on the output).
- // TODO(jarin) We should not look at the upper bound because the typer
- // could have already baked in some feedback into the upper bound.
- if (BothInputsAre(node, Type::Signed32()) ||
- (BothInputsAre(node, Type::Signed32OrMinusZero()) &&
- GetUpperBound(node)->Is(type_cache_.kSafeInteger))) {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32, Type::Signed32());
- } else {
- // If the output's truncation is identify-zeros, we can pass it
- // along. Moreover, if the operation is addition and we know the
- // right-hand side is not minus zero, we do not have to distinguish
- // between 0 and -0.
- IdentifyZeros left_identify_zeros = truncation.identify_zeros();
- if (node->opcode() == IrOpcode::kSpeculativeSafeIntegerAdd &&
- !right_feedback_type->Maybe(Type::MinusZero())) {
- left_identify_zeros = kIdentifyZeros;
- }
- UseInfo left_use =
- CheckedUseInfoAsWord32FromHint(hint, left_identify_zeros);
- // For CheckedInt32Add and CheckedInt32Sub, we don't need to do
- // a minus zero check for the right hand side, since we already
- // know that the left hand side is a proper Signed32 value,
- // potentially guarded by a check.
- UseInfo right_use =
- CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros);
- VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32,
- Type::Signed32());
- }
- if (lower()) {
- if (truncation.IsUsedAsWord32() ||
- !CanOverflowSigned32(node->op(), left_feedback_type,
- right_feedback_type, graph_zone())) {
- ChangeToPureOp(node, Int32Op(node));
-
- } else {
- ChangeToInt32OverflowOp(node);
- }
- }
- return;
+ DCHECK(hint == NumberOperationHint::kSignedSmall ||
+ hint == NumberOperationHint::kSigned32);
+
+ Type* left_feedback_type = TypeOf(node->InputAt(0));
+ Type* right_feedback_type = TypeOf(node->InputAt(1));
+ // Handle the case when no int32 checks on inputs are necessary (but
+ // an overflow check is needed on the output). Note that we do not
+ // have to do any check if at most one side can be minus zero.
+ if (left_upper->Is(Type::Signed32OrMinusZero()) &&
+ right_upper->Is(Type::Signed32OrMinusZero()) &&
+ (left_upper->Is(Type::Signed32()) ||
+ right_upper->Is(Type::Signed32()))) {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, Type::Signed32());
+ } else {
+ // If the output's truncation is identify-zeros, we can pass it
+ // along. Moreover, if the operation is addition and we know the
+ // right-hand side is not minus zero, we do not have to distinguish
+ // between 0 and -0.
+ IdentifyZeros left_identify_zeros = truncation.identify_zeros();
+ if (node->opcode() == IrOpcode::kSpeculativeSafeIntegerAdd &&
+ !right_feedback_type->Maybe(Type::MinusZero())) {
+ left_identify_zeros = kIdentifyZeros;
+ }
+ UseInfo left_use =
+ CheckedUseInfoAsWord32FromHint(hint, left_identify_zeros);
+ // For CheckedInt32Add and CheckedInt32Sub, we don't need to do
+ // a minus zero check for the right hand side, since we already
+ // know that the left hand side is a proper Signed32 value,
+ // potentially guarded by a check.
+ UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros);
+ VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32,
+ Type::Signed32());
}
-
- // default case => Float64Add/Sub
- VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
- MachineRepresentation::kFloat64, Type::Number());
if (lower()) {
- ChangeToPureOp(node, Float64Op(node));
+ if (truncation.IsUsedAsWord32() ||
+ !CanOverflowSigned32(node->op(), left_feedback_type,
+ right_feedback_type, graph_zone())) {
+ ChangeToPureOp(node, Int32Op(node));
+
+ } else {
+ ChangeToInt32OverflowOp(node);
+ }
}
return;
}
@@ -2492,6 +2491,12 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kTaggedPointer);
return;
}
+ case IrOpcode::kLoadFieldByIndex: {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTagged);
+ return;
+ }
case IrOpcode::kLoadField: {
if (truncation.IsUnused()) return VisitUnused(node);
FieldAccess access = FieldAccessOf(node->op());
@@ -2572,9 +2577,19 @@ class RepresentationSelector {
return;
}
case IrOpcode::kTransitionAndStoreElement: {
+ Type* value_type = TypeOf(node->InputAt(2));
+
ProcessInput(node, 0, UseInfo::AnyTagged()); // array
ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
ProcessInput(node, 2, UseInfo::AnyTagged()); // value
+
+ if (value_type->Is(Type::SignedSmall())) {
+ if (lower()) {
+ NodeProperties::ChangeOp(node,
+ simplified()->StoreSignedSmallElement());
+ }
+ }
+
ProcessRemainingInputs(node, 3);
SetOutput(node, MachineRepresentation::kNone);
return;
@@ -2659,14 +2674,57 @@ class RepresentationSelector {
if (lower()) DeferReplacement(node, node->InputAt(0));
return;
}
+ case IrOpcode::kObjectIsArrayBufferView: {
+ // TODO(turbofan): Introduce a Type::ArrayBufferView?
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ return;
+ }
case IrOpcode::kObjectIsCallable: {
VisitObjectIs(node, Type::Callable(), lowering);
return;
}
+ case IrOpcode::kObjectIsConstructor: {
+ // TODO(turbofan): Introduce a Type::Constructor?
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ return;
+ }
case IrOpcode::kObjectIsDetectableCallable: {
VisitObjectIs(node, Type::DetectableCallable(), lowering);
return;
}
+ case IrOpcode::kObjectIsMinusZero: {
+ Type* const input_type = GetUpperBound(node->InputAt(0));
+ if (input_type->Is(Type::MinusZero())) {
+ VisitUnop(node, UseInfo::None(), MachineRepresentation::kBit);
+ if (lower()) {
+ DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
+ }
+ } else if (!input_type->Maybe(Type::MinusZero())) {
+ VisitUnop(node, UseInfo::Any(), MachineRepresentation::kBit);
+ if (lower()) {
+ DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
+ }
+ } else if (input_type->Is(Type::Number())) {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
+ if (lower()) {
+ // ObjectIsMinusZero(x:kRepFloat64)
+ // => Float64Equal(Float64Div(1.0,x),-Infinity)
+ Node* const input = node->InputAt(0);
+ node->ReplaceInput(
+ 0, jsgraph_->graph()->NewNode(
+ lowering->machine()->Float64Div(),
+ lowering->jsgraph()->Float64Constant(1.0), input));
+ node->AppendInput(jsgraph_->zone(),
+ jsgraph_->Float64Constant(
+ -std::numeric_limits<double>::infinity()));
+ NodeProperties::ChangeOp(node, lowering->machine()->Float64Equal());
+ }
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ }
+ return;
+ }
case IrOpcode::kObjectIsNaN: {
Type* const input_type = GetUpperBound(node->InputAt(0));
if (input_type->Is(Type::NaN())) {
@@ -2734,7 +2792,13 @@ class RepresentationSelector {
MachineRepresentation::kTaggedSigned);
return;
}
- case IrOpcode::kNewUnmappedArgumentsElements: {
+ case IrOpcode::kNewDoubleElements:
+ case IrOpcode::kNewSmiOrObjectElements: {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTaggedPointer);
+ return;
+ }
+ case IrOpcode::kNewArgumentsElements: {
VisitBinop(node, UseInfo::PointerInt(), UseInfo::TaggedSigned(),
MachineRepresentation::kTaggedPointer);
return;
@@ -2804,7 +2868,6 @@ class RepresentationSelector {
// Eliminate MapGuard nodes here.
return VisitUnused(node);
case IrOpcode::kCheckMaps:
- case IrOpcode::kCheckMapValue:
case IrOpcode::kTransitionElementsKind: {
VisitInputs(node);
return SetOutput(node, MachineRepresentation::kNone);
@@ -2869,15 +2932,11 @@ class RepresentationSelector {
// Assume the output is tagged.
return SetOutput(node, MachineRepresentation::kTagged);
- case IrOpcode::kLookupHashStorageIndex:
- VisitInputs(node);
- return SetOutput(node, MachineRepresentation::kTaggedSigned);
-
- case IrOpcode::kLoadHashMapValue:
- ProcessInput(node, 0, UseInfo::AnyTagged()); // table
- ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
- ProcessRemainingInputs(node, 2);
- return SetOutput(node, MachineRepresentation::kTagged);
+ case IrOpcode::kFindOrderedHashMapEntry: {
+ VisitBinop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedSigned);
+ return;
+ }
// Operators with all inputs tagged and no or tagged output have uniform
// handling.
@@ -2900,6 +2959,7 @@ class RepresentationSelector {
case IrOpcode::kOsrValue:
case IrOpcode::kArgumentsElementsState:
case IrOpcode::kArgumentsLengthState:
+ case IrOpcode::kRuntimeAbort:
// All JavaScript operators except JSToNumber have uniform handling.
#define OPCODE_CASE(name) case IrOpcode::k##name:
JS_SIMPLE_BINOP_LIST(OPCODE_CASE)
@@ -3125,7 +3185,7 @@ void SimplifiedLowering::DoJSToNumberTruncatesToFloat64(
edge.from()->ReplaceUses(control);
edge.from()->Kill();
} else {
- DCHECK(edge.from()->opcode() != IrOpcode::kIfException);
+ DCHECK_NE(IrOpcode::kIfException, edge.from()->opcode());
edge.UpdateTo(control);
}
} else if (NodeProperties::IsEffectEdge(edge)) {
@@ -3208,7 +3268,7 @@ void SimplifiedLowering::DoJSToNumberTruncatesToWord32(
edge.from()->ReplaceUses(control);
edge.from()->Kill();
} else {
- DCHECK(edge.from()->opcode() != IrOpcode::kIfException);
+ DCHECK_NE(IrOpcode::kIfException, edge.from()->opcode());
edge.UpdateTo(control);
}
} else if (NodeProperties::IsEffectEdge(edge)) {
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index c204a40a18..0d2333e126 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -240,29 +240,19 @@ CheckTaggedInputMode CheckTaggedInputModeOf(const Operator* op) {
return OpParameter<CheckTaggedInputMode>(op);
}
-std::ostream& operator<<(std::ostream& os, GrowFastElementsFlags flags) {
- bool empty = true;
- if (flags & GrowFastElementsFlag::kArrayObject) {
- os << "ArrayObject";
- empty = false;
- }
- if (flags & GrowFastElementsFlag::kDoubleElements) {
- if (!empty) os << "|";
- os << "DoubleElements";
- empty = false;
- }
- if (flags & GrowFastElementsFlag::kHoleyElements) {
- if (!empty) os << "|";
- os << "HoleyElements";
- empty = false;
+std::ostream& operator<<(std::ostream& os, GrowFastElementsMode mode) {
+ switch (mode) {
+ case GrowFastElementsMode::kDoubleElements:
+ return os << "DoubleElements";
+ case GrowFastElementsMode::kSmiOrObjectElements:
+ return os << "SmiOrObjectElements";
}
- if (empty) os << "None";
- return os;
+ UNREACHABLE();
}
-GrowFastElementsFlags GrowFastElementsFlagsOf(const Operator* op) {
+GrowFastElementsMode GrowFastElementsModeOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kMaybeGrowFastElements, op->opcode());
- return OpParameter<GrowFastElementsFlags>(op);
+ return OpParameter<GrowFastElementsMode>(op);
}
bool operator==(ElementsTransition const& lhs, ElementsTransition const& rhs) {
@@ -338,12 +328,12 @@ std::ostream& operator<<(std::ostream& os,
} // namespace
Handle<Map> DoubleMapParameterOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kTransitionAndStoreElement);
+ DCHECK_EQ(IrOpcode::kTransitionAndStoreElement, op->opcode());
return OpParameter<TransitionAndStoreElementParameters>(op).double_map();
}
Handle<Map> FastMapParameterOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kTransitionAndStoreElement);
+ DCHECK_EQ(IrOpcode::kTransitionAndStoreElement, op->opcode());
return OpParameter<TransitionAndStoreElementParameters>(op).fast_map();
}
@@ -407,6 +397,10 @@ bool operator!=(AllocateParameters const& lhs, AllocateParameters const& rhs) {
}
PretenureFlag PretenureFlagOf(const Operator* op) {
+ if (op->opcode() == IrOpcode::kNewDoubleElements ||
+ op->opcode() == IrOpcode::kNewSmiOrObjectElements) {
+ return OpParameter<PretenureFlag>(op);
+ }
DCHECK_EQ(IrOpcode::kAllocate, op->opcode());
return OpParameter<AllocateParameters>(op).pretenure();
}
@@ -417,10 +411,15 @@ Type* AllocateTypeOf(const Operator* op) {
}
UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kStringFromCodePoint);
+ DCHECK_EQ(IrOpcode::kStringFromCodePoint, op->opcode());
return OpParameter<UnicodeEncoding>(op);
}
+BailoutReason BailoutReasonOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kRuntimeAbort, op->opcode());
+ return OpParameter<BailoutReason>(op);
+}
+
#define PURE_OP_LIST(V) \
V(BooleanNot, Operator::kNoProperties, 1, 0) \
V(NumberEqual, Operator::kCommutative, 2, 0) \
@@ -500,8 +499,11 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsArrayBufferView, Operator::kNoProperties, 1, 0) \
V(ObjectIsCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsConstructor, Operator::kNoProperties, 1, 0) \
V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsMinusZero, Operator::kNoProperties, 1, 0) \
V(ObjectIsNaN, Operator::kNoProperties, 1, 0) \
V(ObjectIsNonCallable, Operator::kNoProperties, 1, 0) \
V(ObjectIsNumber, Operator::kNoProperties, 1, 0) \
@@ -588,19 +590,21 @@ struct SimplifiedOperatorGlobalCache final {
};
ArrayBufferWasNeuteredOperator kArrayBufferWasNeutered;
- struct LookupHashStorageIndexOperator final : public Operator {
- LookupHashStorageIndexOperator()
- : Operator(IrOpcode::kLookupHashStorageIndex, Operator::kEliminatable,
- "LookupHashStorageIndex", 2, 1, 1, 1, 1, 0) {}
+ struct FindOrderedHashMapEntryOperator final : public Operator {
+ FindOrderedHashMapEntryOperator()
+ : Operator(IrOpcode::kFindOrderedHashMapEntry, Operator::kEliminatable,
+ "FindOrderedHashMapEntry", 2, 1, 1, 1, 1, 0) {}
};
- LookupHashStorageIndexOperator kLookupHashStorageIndex;
+ FindOrderedHashMapEntryOperator kFindOrderedHashMapEntry;
- struct LoadHashMapValueOperator final : public Operator {
- LoadHashMapValueOperator()
- : Operator(IrOpcode::kLoadHashMapValue, Operator::kEliminatable,
- "LoadHashMapValue", 2, 1, 1, 1, 1, 0) {}
+ struct FindOrderedHashMapEntryForInt32KeyOperator final : public Operator {
+ FindOrderedHashMapEntryForInt32KeyOperator()
+ : Operator(IrOpcode::kFindOrderedHashMapEntryForInt32Key,
+ Operator::kEliminatable,
+ "FindOrderedHashMapEntryForInt32Key", 2, 1, 1, 1, 1, 0) {}
};
- LoadHashMapValueOperator kLoadHashMapValue;
+ FindOrderedHashMapEntryForInt32KeyOperator
+ kFindOrderedHashMapEntryForInt32Key;
struct ArgumentsFrameOperator final : public Operator {
ArgumentsFrameOperator()
@@ -609,14 +613,6 @@ struct SimplifiedOperatorGlobalCache final {
};
ArgumentsFrameOperator kArgumentsFrame;
- struct NewUnmappedArgumentsElementsOperator final : public Operator {
- NewUnmappedArgumentsElementsOperator()
- : Operator(IrOpcode::kNewUnmappedArgumentsElements,
- Operator::kEliminatable, "NewUnmappedArgumentsElements", 2,
- 1, 0, 1, 1, 0) {}
- };
- NewUnmappedArgumentsElementsOperator kNewUnmappedArgumentsElements;
-
template <CheckForMinusZeroMode kMode>
struct ChangeFloat64ToTaggedOperator final
: public Operator1<CheckForMinusZeroMode> {
@@ -700,16 +696,6 @@ struct SimplifiedOperatorGlobalCache final {
CheckedTruncateTaggedToWord32Operator<CheckTaggedInputMode::kNumberOrOddball>
kCheckedTruncateTaggedToWord32NumberOrOddballOperator;
- struct CheckMapValueOperator final : public Operator {
- CheckMapValueOperator()
- : Operator( // --
- IrOpcode::kCheckMapValue, // opcode
- Operator::kNoThrow | Operator::kNoWrite, // flags
- "CheckMapValue", // name
- 2, 1, 1, 0, 1, 0) {} // counts
- };
- CheckMapValueOperator kCheckMapValue;
-
template <CheckFloat64HoleMode kMode>
struct CheckFloat64HoleNaNOperator final
: public Operator1<CheckFloat64HoleMode> {
@@ -734,6 +720,16 @@ struct SimplifiedOperatorGlobalCache final {
};
EnsureWritableFastElementsOperator kEnsureWritableFastElements;
+ struct LoadFieldByIndexOperator final : public Operator {
+ LoadFieldByIndexOperator()
+ : Operator( // --
+ IrOpcode::kLoadFieldByIndex, // opcode
+ Operator::kEliminatable, // flags,
+ "LoadFieldByIndex", // name
+ 2, 1, 1, 1, 1, 0) {} // counts;
+ };
+ LoadFieldByIndexOperator kLoadFieldByIndex;
+
#define SPECULATIVE_NUMBER_BINOP(Name) \
template <NumberOperationHint kHint> \
struct Name##Operator final : public Operator1<NumberOperationHint> { \
@@ -786,12 +782,20 @@ PURE_OP_LIST(GET_FROM_CACHE)
CHECKED_OP_LIST(GET_FROM_CACHE)
GET_FROM_CACHE(ArrayBufferWasNeutered)
GET_FROM_CACHE(ArgumentsFrame)
-GET_FROM_CACHE(LookupHashStorageIndex)
-GET_FROM_CACHE(LoadHashMapValue)
-GET_FROM_CACHE(CheckMapValue)
-GET_FROM_CACHE(NewUnmappedArgumentsElements)
+GET_FROM_CACHE(FindOrderedHashMapEntry)
+GET_FROM_CACHE(FindOrderedHashMapEntryForInt32Key)
+GET_FROM_CACHE(LoadFieldByIndex)
#undef GET_FROM_CACHE
+const Operator* SimplifiedOperatorBuilder::RuntimeAbort(BailoutReason reason) {
+ return new (zone()) Operator1<BailoutReason>( // --
+ IrOpcode::kRuntimeAbort, // opcode
+ Operator::kNoThrow | Operator::kNoDeopt, // flags
+ "RuntimeAbort", // name
+ 0, 1, 1, 0, 1, 0, // counts
+ reason); // parameter
+}
+
const Operator* SimplifiedOperatorBuilder::ChangeFloat64ToTagged(
CheckForMinusZeroMode mode) {
switch (mode) {
@@ -912,13 +916,13 @@ const Operator* SimplifiedOperatorBuilder::EnsureWritableFastElements() {
}
const Operator* SimplifiedOperatorBuilder::MaybeGrowFastElements(
- GrowFastElementsFlags flags) {
- return new (zone()) Operator1<GrowFastElementsFlags>( // --
- IrOpcode::kMaybeGrowFastElements, // opcode
- Operator::kNoThrow, // flags
- "MaybeGrowFastElements", // name
- 4, 1, 1, 1, 1, 0, // counts
- flags); // parameter
+ GrowFastElementsMode mode) {
+ return new (zone()) Operator1<GrowFastElementsMode>( // --
+ IrOpcode::kMaybeGrowFastElements, // opcode
+ Operator::kNoThrow, // flags
+ "MaybeGrowFastElements", // name
+ 4, 1, 1, 1, 1, 0, // counts
+ mode); // parameter
}
const Operator* SimplifiedOperatorBuilder::TransitionElementsKind(
@@ -967,15 +971,45 @@ const Operator* SimplifiedOperatorBuilder::ArgumentsLength(
}
int FormalParameterCountOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kArgumentsLength);
+ DCHECK_EQ(IrOpcode::kArgumentsLength, op->opcode());
return OpParameter<ArgumentsLengthParameters>(op).formal_parameter_count;
}
bool IsRestLengthOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kArgumentsLength);
+ DCHECK_EQ(IrOpcode::kArgumentsLength, op->opcode());
return OpParameter<ArgumentsLengthParameters>(op).is_rest_length;
}
+const Operator* SimplifiedOperatorBuilder::NewDoubleElements(
+ PretenureFlag pretenure) {
+ return new (zone()) Operator1<PretenureFlag>( // --
+ IrOpcode::kNewDoubleElements, // opcode
+ Operator::kEliminatable, // flags
+ "NewDoubleElements", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ pretenure); // parameter
+}
+
+const Operator* SimplifiedOperatorBuilder::NewSmiOrObjectElements(
+ PretenureFlag pretenure) {
+ return new (zone()) Operator1<PretenureFlag>( // --
+ IrOpcode::kNewSmiOrObjectElements, // opcode
+ Operator::kEliminatable, // flags
+ "NewSmiOrObjectElements", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ pretenure); // parameter
+}
+
+const Operator* SimplifiedOperatorBuilder::NewArgumentsElements(
+ int mapped_count) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kNewArgumentsElements, // opcode
+ Operator::kEliminatable, // flags
+ "NewArgumentsElements", // name
+ 2, 1, 0, 1, 1, 0, // counts
+ mapped_count); // parameter
+}
+
const Operator* SimplifiedOperatorBuilder::Allocate(Type* type,
PretenureFlag pretenure) {
return new (zone()) Operator1<AllocateParameters>(
@@ -1044,6 +1078,12 @@ const Operator* SimplifiedOperatorBuilder::TransitionAndStoreElement(
1, 1, 0, 1, 0, parameters);
}
+const Operator* SimplifiedOperatorBuilder::StoreSignedSmallElement() {
+ return new (zone()) Operator(IrOpcode::kStoreSignedSmallElement,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "StoreSignedSmallElement", 3, 1, 1, 0, 1, 0);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 8b3566ff56..6d43bcac50 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -162,20 +162,18 @@ ZoneHandleSet<Map> const& CompareMapsParametersOf(Operator const*)
WARN_UNUSED_RESULT;
// A descriptor for growing elements backing stores.
-enum class GrowFastElementsFlag : uint8_t {
- kNone = 0u,
- kArrayObject = 1u << 0, // Update JSArray::length field.
- kHoleyElements = 1u << 1, // Backing store is holey.
- kDoubleElements = 1u << 2, // Backing store contains doubles.
+enum class GrowFastElementsMode : uint8_t {
+ kDoubleElements,
+ kSmiOrObjectElements
};
-typedef base::Flags<GrowFastElementsFlag> GrowFastElementsFlags;
-DEFINE_OPERATORS_FOR_FLAGS(GrowFastElementsFlags)
+inline size_t hash_value(GrowFastElementsMode mode) {
+ return static_cast<uint8_t>(mode);
+}
-std::ostream& operator<<(std::ostream&, GrowFastElementsFlags);
+std::ostream& operator<<(std::ostream&, GrowFastElementsMode);
-GrowFastElementsFlags GrowFastElementsFlagsOf(const Operator*)
- WARN_UNUSED_RESULT;
+GrowFastElementsMode GrowFastElementsModeOf(const Operator*) WARN_UNUSED_RESULT;
// A descriptor for elements kind transitions.
class ElementsTransition final {
@@ -257,6 +255,8 @@ Type* AllocateTypeOf(const Operator* op) WARN_UNUSED_RESULT;
UnicodeEncoding UnicodeEncodingOf(const Operator*) WARN_UNUSED_RESULT;
+BailoutReason BailoutReasonOf(const Operator* op) WARN_UNUSED_RESULT;
+
// Interface for building simplified operators, which represent the
// medium-level operations of V8, including adding numbers, allocating objects,
// indexing into objects and arrays, etc.
@@ -373,8 +373,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StringToLowerCaseIntl();
const Operator* StringToUpperCaseIntl();
- const Operator* LookupHashStorageIndex();
- const Operator* LoadHashMapValue();
+ const Operator* FindOrderedHashMapEntry();
+ const Operator* FindOrderedHashMapEntryForInt32Key();
const Operator* SpeculativeToNumber(NumberOperationHint hint);
@@ -402,7 +402,6 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckIf();
const Operator* CheckBounds();
const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>);
- const Operator* CheckMapValue();
const Operator* CompareMaps(ZoneHandleSet<Map>);
const Operator* CheckHeapObject();
@@ -436,8 +435,11 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckNotTaggedHole();
const Operator* ConvertTaggedHoleToUndefined();
+ const Operator* ObjectIsArrayBufferView();
const Operator* ObjectIsCallable();
+ const Operator* ObjectIsConstructor();
const Operator* ObjectIsDetectableCallable();
+ const Operator* ObjectIsMinusZero();
const Operator* ObjectIsNaN();
const Operator* ObjectIsNonCallable();
const Operator* ObjectIsNumber();
@@ -451,8 +453,11 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ArgumentsLength(int formal_parameter_count,
bool is_rest_length);
- // new-unmapped-arguments-elements
- const Operator* NewUnmappedArgumentsElements();
+ const Operator* NewDoubleElements(PretenureFlag);
+ const Operator* NewSmiOrObjectElements(PretenureFlag);
+
+ // new-arguments-elements arguments-frame, arguments-length
+ const Operator* NewArgumentsElements(int mapped_count);
// array-buffer-was-neutered buffer
const Operator* ArrayBufferWasNeutered();
@@ -461,13 +466,14 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* EnsureWritableFastElements();
// maybe-grow-fast-elements object, elements, index, length
- const Operator* MaybeGrowFastElements(GrowFastElementsFlags flags);
+ const Operator* MaybeGrowFastElements(GrowFastElementsMode mode);
// transition-elements-kind object, from-map, to-map
const Operator* TransitionElementsKind(ElementsTransition transition);
const Operator* Allocate(Type* type, PretenureFlag pretenure = NOT_TENURED);
+ const Operator* LoadFieldByIndex();
const Operator* LoadField(FieldAccess const&);
const Operator* StoreField(FieldAccess const&);
@@ -480,6 +486,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// store-element [base + index], value, only with fast arrays.
const Operator* TransitionAndStoreElement(Handle<Map> double_map,
Handle<Map> fast_map);
+ // store-element [base + index], smi value, only with fast arrays.
+ const Operator* StoreSignedSmallElement();
// load-typed-element buffer, [base + external + index]
const Operator* LoadTypedElement(ExternalArrayType const&);
@@ -487,6 +495,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// store-typed-element buffer, [base + external + index], value
const Operator* StoreTypedElement(ExternalArrayType const&);
+ // Abort (for terminating execution on internal error).
+ const Operator* RuntimeAbort(BailoutReason reason);
+
private:
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc
index 899c91af85..30586f307c 100644
--- a/deps/v8/src/compiler/state-values-utils.cc
+++ b/deps/v8/src/compiler/state-values-utils.cc
@@ -50,7 +50,7 @@ bool StateValuesCache::IsKeysEqualToNode(StateValuesKey* key, Node* node) {
return false;
}
- DCHECK(node->opcode() == IrOpcode::kStateValues);
+ DCHECK_EQ(IrOpcode::kStateValues, node->opcode());
SparseInputMask node_mask = SparseInputMaskOf(node->op());
if (node_mask != key->mask) {
@@ -159,8 +159,8 @@ SparseInputMask::BitMaskType StateValuesCache::FillBufferWithValues(
(*values_idx)++;
}
- DCHECK(*node_count <= StateValuesCache::kMaxInputCount);
- DCHECK(virtual_node_count <= SparseInputMask::kMaxSparseInputs);
+ DCHECK_GE(StateValuesCache::kMaxInputCount, *node_count);
+ DCHECK_GE(SparseInputMask::kMaxSparseInputs, virtual_node_count);
// Add the end marker at the end of the mask.
input_mask |= SparseInputMask::kEndMarker << virtual_node_count;
@@ -232,7 +232,7 @@ namespace {
void CheckTreeContainsValues(Node* tree, Node** values, size_t count,
const BitVector* liveness, int liveness_offset) {
- CHECK_EQ(count, StateValuesAccess(tree).size());
+ DCHECK_EQ(count, StateValuesAccess(tree).size());
int i;
auto access = StateValuesAccess(tree);
@@ -240,12 +240,12 @@ void CheckTreeContainsValues(Node* tree, Node** values, size_t count,
auto itend = access.end();
for (i = 0; it != itend; ++it, ++i) {
if (liveness == nullptr || liveness->Contains(liveness_offset + i)) {
- CHECK((*it).node == values[i]);
+ DCHECK_EQ((*it).node, values[i]);
} else {
- CHECK((*it).node == nullptr);
+ DCHECK_NULL((*it).node);
}
}
- CHECK_EQ(static_cast<size_t>(i), count);
+ DCHECK_EQ(static_cast<size_t>(i), count);
}
} // namespace
@@ -311,21 +311,21 @@ StateValuesAccess::iterator::iterator(Node* node) : current_depth_(0) {
}
SparseInputMask::InputIterator* StateValuesAccess::iterator::Top() {
- DCHECK(current_depth_ >= 0);
- DCHECK(current_depth_ < kMaxInlineDepth);
+ DCHECK_LE(0, current_depth_);
+ DCHECK_GT(kMaxInlineDepth, current_depth_);
return &(stack_[current_depth_]);
}
void StateValuesAccess::iterator::Push(Node* node) {
current_depth_++;
- CHECK(current_depth_ < kMaxInlineDepth);
+ CHECK_GT(kMaxInlineDepth, current_depth_);
stack_[current_depth_] =
SparseInputMaskOf(node->op()).IterateOverInputs(node);
}
void StateValuesAccess::iterator::Pop() {
- DCHECK(current_depth_ >= 0);
+ DCHECK_LE(0, current_depth_);
current_depth_--;
}
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index 71aa2110bb..8fb7b35020 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -172,7 +172,7 @@ class RedundantStoreFinder final {
// To safely cast an offset from a FieldAccess, which has a potentially wider
// range (namely int).
StoreOffset ToOffset(int offset) {
- CHECK(0 <= offset);
+ CHECK_LE(0, offset);
return static_cast<StoreOffset>(offset);
}
@@ -560,6 +560,10 @@ bool UnobservableStore::operator<(const UnobservableStore other) const {
return (id_ < other.id_) || (id_ == other.id_ && offset_ < other.offset_);
}
+#undef TRACE
+#undef CHECK_EXTRA
+#undef DCHECK_EXTRA
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index edf1cb1003..2590342d2e 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -287,8 +287,11 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
+ static Type* ObjectIsArrayBufferView(Type*, Typer*);
static Type* ObjectIsCallable(Type*, Typer*);
+ static Type* ObjectIsConstructor(Type*, Typer*);
static Type* ObjectIsDetectableCallable(Type*, Typer*);
+ static Type* ObjectIsMinusZero(Type*, Typer*);
static Type* ObjectIsNaN(Type*, Typer*);
static Type* ObjectIsNonCallable(Type*, Typer*);
static Type* ObjectIsNumber(Type*, Typer*);
@@ -414,7 +417,7 @@ Type* Typer::Visitor::FalsifyUndefined(ComparisonOutcome outcome, Typer* t) {
: t->singleton_false_;
}
// Type should be non empty, so we know it should be true.
- DCHECK((outcome & kComparisonTrue) != 0);
+ DCHECK_NE(0, outcome & kComparisonTrue);
return t->singleton_true_;
}
@@ -509,18 +512,36 @@ Type* Typer::Visitor::ToString(Type* type, Typer* t) {
// Type checks.
+Type* Typer::Visitor::ObjectIsArrayBufferView(Type* type, Typer* t) {
+ // TODO(turbofan): Introduce a Type::ArrayBufferView?
+ if (!type->Maybe(Type::OtherObject())) return t->singleton_false_;
+ return Type::Boolean();
+}
+
Type* Typer::Visitor::ObjectIsCallable(Type* type, Typer* t) {
if (type->Is(Type::Callable())) return t->singleton_true_;
if (!type->Maybe(Type::Callable())) return t->singleton_false_;
return Type::Boolean();
}
+Type* Typer::Visitor::ObjectIsConstructor(Type* type, Typer* t) {
+ // TODO(turbofan): Introduce a Type::Constructor?
+ if (!type->Maybe(Type::Callable())) return t->singleton_false_;
+ return Type::Boolean();
+}
+
Type* Typer::Visitor::ObjectIsDetectableCallable(Type* type, Typer* t) {
if (type->Is(Type::DetectableCallable())) return t->singleton_true_;
if (!type->Maybe(Type::DetectableCallable())) return t->singleton_false_;
return Type::Boolean();
}
+Type* Typer::Visitor::ObjectIsMinusZero(Type* type, Typer* t) {
+ if (type->Is(Type::MinusZero())) return t->singleton_true_;
+ if (!type->Maybe(Type::MinusZero())) return t->singleton_false_;
+ return Type::Boolean();
+}
+
Type* Typer::Visitor::ObjectIsNaN(Type* type, Typer* t) {
if (type->Is(Type::NaN())) return t->singleton_true_;
if (!type->Maybe(Type::NaN())) return t->singleton_false_;
@@ -720,7 +741,7 @@ Type* Typer::Visitor::TypeInductionVariablePhi(Node* node) {
increment_min = increment_type->Min();
increment_max = increment_type->Max();
} else {
- DCHECK(arithmetic_type == InductionVariable::ArithmeticType::kSubtraction);
+ DCHECK_EQ(InductionVariable::ArithmeticType::kSubtraction, arithmetic_type);
increment_min = -increment_type->Max();
increment_max = -increment_type->Min();
}
@@ -1488,6 +1509,9 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
case kMapIteratorNext:
case kSetIteratorNext:
return Type::OtherObject();
+ case kTypedArrayToStringTag:
+ return Type::Union(Type::InternalizedString(), Type::Undefined(),
+ t->zone());
// Array functions.
case kArrayIsArray:
@@ -1525,11 +1549,16 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
case kArrayUnshift:
return t->cache_.kPositiveSafeInteger;
+ // ArrayBuffer functions.
+ case kArrayBufferIsView:
+ return Type::Boolean();
+
// Object functions.
case kObjectAssign:
return Type::Receiver();
case kObjectCreate:
return Type::OtherObject();
+ case kObjectIs:
case kObjectHasOwnProperty:
case kObjectIsPrototypeOf:
return Type::Boolean();
@@ -1672,6 +1701,9 @@ Type* Typer::Visitor::TypeJSConvertReceiver(Node* node) {
return Type::Receiver();
}
+Type* Typer::Visitor::TypeJSForInEnumerate(Node* node) {
+ return Type::OtherInternal();
+}
Type* Typer::Visitor::TypeJSForInNext(Node* node) {
return Type::Union(Type::String(), Type::Undefined(), zone());
@@ -1864,10 +1896,6 @@ Type* Typer::Visitor::TypeCheckMaps(Node* node) {
Type* Typer::Visitor::TypeCompareMaps(Node* node) { return Type::Boolean(); }
-Type* Typer::Visitor::TypeCheckMapValue(Node* node) {
- UNREACHABLE();
-}
-
Type* Typer::Visitor::TypeCheckNumber(Node* node) {
return typer_->operation_typer_.CheckNumber(Operand(node, 0));
}
@@ -1916,6 +1944,10 @@ Type* Typer::Visitor::TypeAllocate(Node* node) {
return AllocateTypeOf(node->op());
}
+Type* Typer::Visitor::TypeLoadFieldByIndex(Node* node) {
+ return Type::NonInternal();
+}
+
Type* Typer::Visitor::TypeLoadField(Node* node) {
return FieldAccessOf(node->op()).type;
}
@@ -1947,18 +1979,32 @@ Type* Typer::Visitor::TypeTransitionAndStoreElement(Node* node) {
UNREACHABLE();
}
+Type* Typer::Visitor::TypeStoreSignedSmallElement(Node* node) { UNREACHABLE(); }
+
Type* Typer::Visitor::TypeStoreTypedElement(Node* node) {
UNREACHABLE();
}
+Type* Typer::Visitor::TypeObjectIsArrayBufferView(Node* node) {
+ return TypeUnaryOp(node, ObjectIsArrayBufferView);
+}
+
Type* Typer::Visitor::TypeObjectIsCallable(Node* node) {
return TypeUnaryOp(node, ObjectIsCallable);
}
+Type* Typer::Visitor::TypeObjectIsConstructor(Node* node) {
+ return TypeUnaryOp(node, ObjectIsConstructor);
+}
+
Type* Typer::Visitor::TypeObjectIsDetectableCallable(Node* node) {
return TypeUnaryOp(node, ObjectIsDetectableCallable);
}
+Type* Typer::Visitor::TypeObjectIsMinusZero(Node* node) {
+ return TypeUnaryOp(node, ObjectIsMinusZero);
+}
+
Type* Typer::Visitor::TypeObjectIsNaN(Node* node) {
return TypeUnaryOp(node, ObjectIsNaN);
}
@@ -2001,7 +2047,15 @@ Type* Typer::Visitor::TypeArgumentsFrame(Node* node) {
return Type::ExternalPointer();
}
-Type* Typer::Visitor::TypeNewUnmappedArgumentsElements(Node* node) {
+Type* Typer::Visitor::TypeNewDoubleElements(Node* node) {
+ return Type::OtherInternal();
+}
+
+Type* Typer::Visitor::TypeNewSmiOrObjectElements(Node* node) {
+ return Type::OtherInternal();
+}
+
+Type* Typer::Visitor::TypeNewArgumentsElements(Node* node) {
return Type::OtherInternal();
}
@@ -2009,14 +2063,16 @@ Type* Typer::Visitor::TypeArrayBufferWasNeutered(Node* node) {
return Type::Boolean();
}
-Type* Typer::Visitor::TypeLookupHashStorageIndex(Node* node) {
- return Type::SignedSmall();
+Type* Typer::Visitor::TypeFindOrderedHashMapEntry(Node* node) {
+ return Type::Range(-1.0, FixedArray::kMaxLength, zone());
}
-Type* Typer::Visitor::TypeLoadHashMapValue(Node* node) {
- return Type::NonInternal();
+Type* Typer::Visitor::TypeFindOrderedHashMapEntryForInt32Key(Node* node) {
+ return Type::Range(-1.0, FixedArray::kMaxLength, zone());
}
+Type* Typer::Visitor::TypeRuntimeAbort(Node* node) { UNREACHABLE(); }
+
// Heap constants.
Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index ffab6d26a0..568c606b2c 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -236,7 +236,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
- case JS_PROMISE_CAPABILITY_TYPE:
+ case PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE:
case WASM_MODULE_TYPE:
case WASM_INSTANCE_TYPE:
@@ -276,6 +276,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case MODULE_TYPE:
case MODULE_INFO_ENTRY_TYPE:
case CELL_TYPE:
+ case BIGINT_TYPE:
return kOtherInternal;
// Remaining instance types are unsupported for now. If any of them do
@@ -580,7 +581,7 @@ bool UnionType::Wellformed() {
// 5. No element (except the bitset) is a subtype of any other.
// 6. If there is a range, then the bitset type does not contain
// plain number bits.
- DCHECK(this->Length() >= 2); // (1)
+ DCHECK_LE(2, this->Length()); // (1)
DCHECK(this->Get(0)->IsBitset()); // (2a)
for (int i = 0; i < this->Length(); ++i) {
@@ -890,7 +891,7 @@ int Type::AddToUnion(Type* type, UnionType* result, int size, Zone* zone) {
Type* Type::NormalizeUnion(Type* union_type, int size, Zone* zone) {
UnionType* unioned = union_type->AsUnion();
- DCHECK(size >= 1);
+ DCHECK_LE(1, size);
DCHECK(unioned->Get(0)->IsBitset());
// If the union has just one element, return it.
if (size == 1) {
@@ -970,7 +971,7 @@ void BitsetType::Print(std::ostream& os, // NOLINT
bits -= subset;
}
}
- DCHECK(bits == 0);
+ DCHECK_EQ(0, bits);
os << ")";
}
diff --git a/deps/v8/src/compiler/value-numbering-reducer.cc b/deps/v8/src/compiler/value-numbering-reducer.cc
index 2fd646700f..5b253ae3d0 100644
--- a/deps/v8/src/compiler/value-numbering-reducer.cc
+++ b/deps/v8/src/compiler/value-numbering-reducer.cc
@@ -29,8 +29,8 @@ Reduction ValueNumberingReducer::Reduce(Node* node) {
const size_t hash = NodeProperties::HashCode(node);
if (!entries_) {
- DCHECK(size_ == 0);
- DCHECK(capacity_ == 0);
+ DCHECK_EQ(0, size_);
+ DCHECK_EQ(0, capacity_);
// Allocate the initial entries and insert the first entry.
capacity_ = kInitialCapacity;
entries_ = temp_zone()->NewArray<Node*>(kInitialCapacity);
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index ef62d04df9..5869a0d491 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -205,9 +205,9 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kEnd:
// End has no outputs.
- CHECK(node->op()->ValueOutputCount() == 0);
- CHECK(node->op()->EffectOutputCount() == 0);
- CHECK(node->op()->ControlOutputCount() == 0);
+ CHECK_EQ(0, node->op()->ValueOutputCount());
+ CHECK_EQ(0, node->op()->EffectOutputCount());
+ CHECK_EQ(0, node->op()->ControlOutputCount());
// All inputs are graph terminators.
for (const Node* input : node->inputs()) {
CHECK(IrOpcode::IsGraphTerminator(input->opcode()));
@@ -726,15 +726,18 @@ void Verifier::Visitor::Check(Node* node) {
CheckTypeIs(node, Type::Any());
break;
- case IrOpcode::kJSForInPrepare: {
+ case IrOpcode::kJSForInEnumerate:
+ // Any -> OtherInternal.
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::OtherInternal());
+ break;
+ case IrOpcode::kJSForInPrepare:
// TODO(bmeurer): What are the constraints on thse?
CheckTypeIs(node, Type::Any());
break;
- }
- case IrOpcode::kJSForInNext: {
+ case IrOpcode::kJSForInNext:
CheckTypeIs(node, Type::Union(Type::Name(), Type::Undefined(), zone));
break;
- }
case IrOpcode::kJSLoadMessage:
case IrOpcode::kJSStoreMessage:
@@ -774,6 +777,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kDebugBreak:
case IrOpcode::kRetain:
case IrOpcode::kUnsafePointerAdd:
+ case IrOpcode::kRuntimeAbort:
CheckNotTyped(node);
break;
@@ -1001,8 +1005,11 @@ void Verifier::Visitor::Check(Node* node) {
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kObjectIsArrayBufferView:
case IrOpcode::kObjectIsCallable:
+ case IrOpcode::kObjectIsConstructor:
case IrOpcode::kObjectIsDetectableCallable:
+ case IrOpcode::kObjectIsMinusZero:
case IrOpcode::kObjectIsNaN:
case IrOpcode::kObjectIsNonCallable:
case IrOpcode::kObjectIsNumber:
@@ -1015,11 +1022,13 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Boolean());
break;
- case IrOpcode::kLookupHashStorageIndex:
+ case IrOpcode::kFindOrderedHashMapEntry:
+ CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::SignedSmall());
break;
- case IrOpcode::kLoadHashMapValue:
- CheckValueInputIs(node, 2, Type::SignedSmall());
+ case IrOpcode::kFindOrderedHashMapEntryForInt32Key:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Signed32());
CheckTypeIs(node, Type::SignedSmall());
break;
case IrOpcode::kArgumentsLength:
@@ -1029,7 +1038,13 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kArgumentsFrame:
CheckTypeIs(node, Type::ExternalPointer());
break;
- case IrOpcode::kNewUnmappedArgumentsElements:
+ case IrOpcode::kNewDoubleElements:
+ case IrOpcode::kNewSmiOrObjectElements:
+ CheckValueInputIs(node, 0,
+ Type::Range(0.0, FixedArray::kMaxLength, zone));
+ CheckTypeIs(node, Type::OtherInternal());
+ break;
+ case IrOpcode::kNewArgumentsElements:
CheckValueInputIs(node, 0, Type::ExternalPointer());
CheckValueInputIs(node, 1, Type::Range(-Code::kMaxArguments,
Code::kMaxArguments, zone));
@@ -1196,11 +1211,6 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Boolean());
break;
- case IrOpcode::kCheckMapValue:
- CheckValueInputIs(node, 0, Type::Any());
- CheckValueInputIs(node, 1, Type::Any());
- CheckNotTyped(node);
- break;
case IrOpcode::kCheckNumber:
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Number());
@@ -1256,6 +1266,11 @@ void Verifier::Visitor::Check(Node* node) {
CheckTypeIs(node, Type::NonInternal());
break;
+ case IrOpcode::kLoadFieldByIndex:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::SignedSmall());
+ CheckTypeIs(node, Type::NonInternal());
+ break;
case IrOpcode::kLoadField:
// Object -> fieldtype
// TODO(rossberg): activate once machine ops are typed.
@@ -1287,6 +1302,10 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kTransitionAndStoreElement:
CheckNotTyped(node);
break;
+ case IrOpcode::kStoreSignedSmallElement:
+ CheckValueInputIs(node, 1, Type::SignedSmall());
+ CheckNotTyped(node);
+ break;
case IrOpcode::kStoreTypedElement:
CheckNotTyped(node);
break;
@@ -1755,8 +1774,8 @@ void ScheduleVerifier::Run(Schedule* schedule) {
// static
void Verifier::VerifyNode(Node* node) {
- CHECK_EQ(OperatorProperties::GetTotalInputCount(node->op()),
- node->InputCount());
+ DCHECK_EQ(OperatorProperties::GetTotalInputCount(node->op()),
+ node->InputCount());
// If this node has no effect or no control outputs,
// we check that none of its uses are effect or control inputs.
bool check_no_control = node->op()->ControlOutputCount() == 0;
@@ -1766,35 +1785,35 @@ void Verifier::VerifyNode(Node* node) {
if (check_no_effect || check_no_control) {
for (Edge edge : node->use_edges()) {
Node* const user = edge.from();
- CHECK(!user->IsDead());
+ DCHECK(!user->IsDead());
if (NodeProperties::IsControlEdge(edge)) {
- CHECK(!check_no_control);
+ DCHECK(!check_no_control);
} else if (NodeProperties::IsEffectEdge(edge)) {
- CHECK(!check_no_effect);
+ DCHECK(!check_no_effect);
effect_edges++;
} else if (NodeProperties::IsFrameStateEdge(edge)) {
- CHECK(!check_no_frame_state);
+ DCHECK(!check_no_frame_state);
}
}
}
// Frame state input should be a frame state (or sentinel).
if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
Node* input = NodeProperties::GetFrameStateInput(node);
- CHECK(input->opcode() == IrOpcode::kFrameState ||
- input->opcode() == IrOpcode::kStart ||
- input->opcode() == IrOpcode::kDead);
+ DCHECK(input->opcode() == IrOpcode::kFrameState ||
+ input->opcode() == IrOpcode::kStart ||
+ input->opcode() == IrOpcode::kDead);
}
// Effect inputs should be effect-producing nodes (or sentinels).
for (int i = 0; i < node->op()->EffectInputCount(); i++) {
Node* input = NodeProperties::GetEffectInput(node, i);
- CHECK(input->op()->EffectOutputCount() > 0 ||
- input->opcode() == IrOpcode::kDead);
+ DCHECK(input->op()->EffectOutputCount() > 0 ||
+ input->opcode() == IrOpcode::kDead);
}
// Control inputs should be control-producing nodes (or sentinels).
for (int i = 0; i < node->op()->ControlInputCount(); i++) {
Node* input = NodeProperties::GetControlInput(node, i);
- CHECK(input->op()->ControlOutputCount() > 0 ||
- input->opcode() == IrOpcode::kDead);
+ DCHECK(input->op()->ControlOutputCount() > 0 ||
+ input->opcode() == IrOpcode::kDead);
}
}
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index fe90492e73..bc731b2bb8 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -34,10 +34,14 @@
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-text.h"
+namespace v8 {
+namespace internal {
+namespace compiler {
+
// TODO(titzer): pull WASM_64 up to a common header.
#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
#define WASM_64 1
@@ -49,12 +53,10 @@
V8_Fatal(__FILE__, __LINE__, "Unsupported opcode #%d:%s", (opcode), \
wasm::WasmOpcodes::OpcodeName(opcode));
-namespace v8 {
-namespace internal {
-namespace compiler {
-
namespace {
+constexpr uint32_t kBytesPerExceptionValuesArrayElement = 2;
+
void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
Graph* g = jsgraph->graph();
if (g->end()) {
@@ -69,7 +71,8 @@ void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
WasmGraphBuilder::WasmGraphBuilder(
ModuleEnv* env, Zone* zone, JSGraph* jsgraph, Handle<Code> centry_stub,
wasm::FunctionSig* sig,
- compiler::SourcePositionTable* source_position_table)
+ compiler::SourcePositionTable* source_position_table,
+ RuntimeExceptionSupport exception_support)
: zone_(zone),
jsgraph_(jsgraph),
centry_stub_node_(jsgraph_->HeapConstant(centry_stub)),
@@ -79,6 +82,7 @@ WasmGraphBuilder::WasmGraphBuilder(
function_table_sizes_(zone),
cur_buffer_(def_buffer_),
cur_bufsize_(kDefaultBufferSize),
+ runtime_exception_support_(exception_support),
sig_(sig),
source_position_table_(source_position_table) {
for (size_t i = sig->parameter_count(); i > 0 && !has_simd_; --i) {
@@ -190,11 +194,15 @@ Node* WasmGraphBuilder::Int64Constant(int64_t value) {
return jsgraph()->Int64Constant(value);
}
+Node* WasmGraphBuilder::IntPtrConstant(intptr_t value) {
+ return jsgraph()->IntPtrConstant(value);
+}
+
void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
Node** effect, Node** control) {
// TODO(mtrofin): "!env_" happens when we generate a wrapper.
// We should factor wrappers separately from wasm codegen.
- if (FLAG_wasm_no_stack_checks || !env_ || !has_runtime_exception_support_) {
+ if (FLAG_wasm_no_stack_checks || !env_ || !runtime_exception_support_) {
return;
}
if (effect == nullptr) effect = effect_;
@@ -830,7 +838,7 @@ Node* WasmGraphBuilder::BranchExpectFalse(Node* cond, Node** true_node,
}
Builtins::Name WasmGraphBuilder::GetBuiltinIdForTrap(wasm::TrapReason reason) {
- if (!has_runtime_exception_support_) {
+ if (runtime_exception_support_ == kNoRuntimeExceptionSupport) {
// We use Builtins::builtin_count as a marker to tell the code generator
// to generate a call to a testing c-function instead of a runtime
// function. This code should only be called from a cctest.
@@ -1098,8 +1106,8 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(Node* node,
Node* lowerByte;
Node* higherByte;
- DCHECK(shiftCount > 0);
- DCHECK((shiftCount + 8) % 16 == 0);
+ DCHECK_LT(0, shiftCount);
+ DCHECK_EQ(0, (shiftCount + 8) % 16);
if (valueSizeInBits > 32) {
shiftLower = graph()->NewNode(m->Word64Shl(), value,
@@ -1235,8 +1243,8 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
Node* lowerByte;
Node* higherByte;
- DCHECK(shiftCount > 0);
- DCHECK((shiftCount + 8) % 16 == 0);
+ DCHECK_LT(0, shiftCount);
+ DCHECK_EQ(0, (shiftCount + 8) % 16);
if (valueSizeInBits > 32) {
shiftLower = graph()->NewNode(m->Word64Shl(), value,
@@ -1459,9 +1467,8 @@ Node* WasmGraphBuilder::BuildBitCountingCall(Node* input, ExternalReference ref,
sig_builder.AddParam(MachineType::Pointer());
Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
- Node* args[] = {function, stack_slot_param};
- return BuildCCall(sig_builder.Build(), args);
+ return BuildCCall(sig_builder.Build(), function, stack_slot_param);
}
Node* WasmGraphBuilder::BuildI32Ctz(Node* input) {
@@ -1595,12 +1602,14 @@ Node* WasmGraphBuilder::BuildCFuncInstruction(ExternalReference ref,
*control_);
Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
- Node** args = Buffer(5);
- args[0] = function;
- args[1] = stack_slot_param0;
- int input_count = 1;
- if (input1 != nullptr) {
+ if (input1 == nullptr) {
+ const int input_count = 1;
+ Signature<MachineType>::Builder sig_builder(jsgraph()->zone(), 0,
+ input_count);
+ sig_builder.AddParam(MachineType::Pointer());
+ BuildCCall(sig_builder.Build(), function, stack_slot_param0);
+ } else {
Node* stack_slot_param1 = graph()->NewNode(
jsgraph()->machine()->StackSlot(type.representation()));
const Operator* store_op1 = jsgraph()->machine()->Store(
@@ -1608,17 +1617,15 @@ Node* WasmGraphBuilder::BuildCFuncInstruction(ExternalReference ref,
*effect_ = graph()->NewNode(store_op1, stack_slot_param1,
jsgraph()->Int32Constant(0), input1, *effect_,
*control_);
- args[2] = stack_slot_param1;
- ++input_count;
- }
- Signature<MachineType>::Builder sig_builder(jsgraph()->zone(), 0,
- input_count);
- sig_builder.AddParam(MachineType::Pointer());
- if (input1 != nullptr) {
+ const int input_count = 2;
+ Signature<MachineType>::Builder sig_builder(jsgraph()->zone(), 0,
+ input_count);
sig_builder.AddParam(MachineType::Pointer());
+ sig_builder.AddParam(MachineType::Pointer());
+ BuildCCall(sig_builder.Build(), function, stack_slot_param0,
+ stack_slot_param1);
}
- BuildCCall(sig_builder.Build(), args);
const Operator* load_op = jsgraph()->machine()->Load(type);
@@ -1669,8 +1676,8 @@ Node* WasmGraphBuilder::BuildIntToFloatConversionInstruction(
sig_builder.AddParam(MachineType::Pointer());
sig_builder.AddParam(MachineType::Pointer());
Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
- Node* args[] = {function, stack_slot_param, stack_slot_result};
- BuildCCall(sig_builder.Build(), args);
+ BuildCCall(sig_builder.Build(), function, stack_slot_param,
+ stack_slot_result);
const Operator* load_op = jsgraph()->machine()->Load(result_type);
Node* load =
graph()->NewNode(load_op, stack_slot_result, jsgraph()->Int32Constant(0),
@@ -1769,9 +1776,10 @@ Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
sig_builder.AddParam(MachineType::Pointer());
sig_builder.AddParam(MachineType::Pointer());
Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
- Node* args[] = {function, stack_slot_param, stack_slot_result};
ZeroCheck32(wasm::kTrapFloatUnrepresentable,
- BuildCCall(sig_builder.Build(), args), position);
+ BuildCCall(sig_builder.Build(), function, stack_slot_param,
+ stack_slot_result),
+ position);
const Operator* load_op = jsgraph()->machine()->Load(result_type);
Node* load =
graph()->NewNode(load_op, stack_slot_result, jsgraph()->Int32Constant(0),
@@ -1806,28 +1814,161 @@ Node* WasmGraphBuilder::GrowMemory(Node* input) {
return result;
}
-Node* WasmGraphBuilder::Throw(Node* input) {
+uint32_t WasmGraphBuilder::GetExceptionEncodedSize(
+ const wasm::WasmException* exception) const {
+ const wasm::WasmExceptionSig* sig = exception->sig;
+ uint32_t encoded_size = 0;
+ for (size_t i = 0; i < sig->parameter_count(); ++i) {
+ size_t byte_size = size_t(1) << ElementSizeLog2Of(sig->GetParam(i));
+ DCHECK_EQ(byte_size % kBytesPerExceptionValuesArrayElement, 0);
+ DCHECK_LE(1, byte_size / kBytesPerExceptionValuesArrayElement);
+ encoded_size += byte_size / kBytesPerExceptionValuesArrayElement;
+ }
+ return encoded_size;
+}
+
+Node* WasmGraphBuilder::Throw(uint32_t tag,
+ const wasm::WasmException* exception,
+ const Vector<Node*> values) {
SetNeedsStackCheck();
- Node* parameters[] = {BuildChangeInt32ToSmi(input)};
- return BuildCallToRuntime(Runtime::kWasmThrow, parameters,
- arraysize(parameters));
+ uint32_t encoded_size = GetExceptionEncodedSize(exception);
+ Node* create_parameters[] = {
+ BuildChangeUint32ToSmi(ConvertExceptionTagToRuntimeId(tag)),
+ BuildChangeUint32ToSmi(Uint32Constant(encoded_size))};
+ BuildCallToRuntime(Runtime::kWasmThrowCreate, create_parameters,
+ arraysize(create_parameters));
+ uint32_t index = 0;
+ const wasm::WasmExceptionSig* sig = exception->sig;
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ for (size_t i = 0; i < sig->parameter_count(); ++i) {
+ Node* value = values[i];
+ switch (sig->GetParam(i)) {
+ case wasm::kWasmF32:
+ value = graph()->NewNode(m->BitcastFloat32ToInt32(), value);
+ // Intentionally fall to next case.
+ case wasm::kWasmI32:
+ BuildEncodeException32BitValue(&index, value);
+ break;
+ case wasm::kWasmF64:
+ value = graph()->NewNode(m->BitcastFloat64ToInt64(), value);
+ // Intentionally fall to next case.
+ case wasm::kWasmI64: {
+ Node* upper32 = graph()->NewNode(
+ m->TruncateInt64ToInt32(),
+ Binop(wasm::kExprI64ShrU, value, Int64Constant(32)));
+ BuildEncodeException32BitValue(&index, upper32);
+ Node* lower32 = graph()->NewNode(m->TruncateInt64ToInt32(), value);
+ BuildEncodeException32BitValue(&index, lower32);
+ break;
+ }
+ default:
+ CHECK(false);
+ break;
+ }
+ }
+ DCHECK_EQ(encoded_size, index);
+ return BuildCallToRuntime(Runtime::kWasmThrow, nullptr, 0);
+}
+
+void WasmGraphBuilder::BuildEncodeException32BitValue(uint32_t* index,
+ Node* value) {
+ MachineOperatorBuilder* machine = jsgraph()->machine();
+ Node* upper_parameters[] = {
+ BuildChangeUint32ToSmi(Int32Constant(*index)),
+ BuildChangeUint32ToSmi(
+ graph()->NewNode(machine->Word32Shr(), value, Int32Constant(16))),
+ };
+ BuildCallToRuntime(Runtime::kWasmExceptionSetElement, upper_parameters,
+ arraysize(upper_parameters));
+ ++(*index);
+ Node* lower_parameters[] = {
+ BuildChangeUint32ToSmi(Int32Constant(*index)),
+ BuildChangeUint32ToSmi(graph()->NewNode(machine->Word32And(), value,
+ Int32Constant(0xFFFFu))),
+ };
+ BuildCallToRuntime(Runtime::kWasmExceptionSetElement, lower_parameters,
+ arraysize(lower_parameters));
+ ++(*index);
+}
+
+Node* WasmGraphBuilder::BuildDecodeException32BitValue(Node* const* values,
+ uint32_t* index) {
+ MachineOperatorBuilder* machine = jsgraph()->machine();
+ Node* upper = BuildChangeSmiToInt32(values[*index]);
+ (*index)++;
+ upper = graph()->NewNode(machine->Word32Shl(), upper, Int32Constant(16));
+ Node* lower = BuildChangeSmiToInt32(values[*index]);
+ (*index)++;
+ Node* value = graph()->NewNode(machine->Word32Or(), upper, lower);
+ return value;
}
Node* WasmGraphBuilder::Rethrow() {
SetNeedsStackCheck();
- Node* result = BuildCallToRuntime(Runtime::kWasmRethrow, nullptr, 0);
+ Node* result = BuildCallToRuntime(Runtime::kWasmThrow, nullptr, 0);
return result;
}
-Node* WasmGraphBuilder::Catch(Node* input, wasm::WasmCodePosition position) {
+Node* WasmGraphBuilder::ConvertExceptionTagToRuntimeId(uint32_t tag) {
+ // TODO(kschimpf): Handle exceptions from different modules, when they are
+ // linked at runtime.
+ return Uint32Constant(tag);
+}
+
+Node* WasmGraphBuilder::GetExceptionRuntimeId() {
SetNeedsStackCheck();
- Node* parameters[] = {input}; // caught value
- Node* value = BuildCallToRuntime(Runtime::kWasmSetCaughtExceptionValue,
+ return BuildChangeSmiToInt32(
+ BuildCallToRuntime(Runtime::kWasmGetExceptionRuntimeId, nullptr, 0));
+}
+
+Node** WasmGraphBuilder::GetExceptionValues(
+ const wasm::WasmException* except_decl) {
+ // TODO(kschimpf): We need to move this code to the function-body-decoder.cc
+ // in order to build landing-pad (exception) edges in case the runtime
+ // call causes an exception.
+
+ // Start by getting the encoded values from the exception.
+ uint32_t encoded_size = GetExceptionEncodedSize(except_decl);
+ Node** values = Buffer(encoded_size);
+ for (uint32_t i = 0; i < encoded_size; ++i) {
+ Node* parameters[] = {BuildChangeUint32ToSmi(Uint32Constant(i))};
+ values[i] = BuildCallToRuntime(Runtime::kWasmExceptionGetElement,
parameters, arraysize(parameters));
- parameters[0] = value;
- value = BuildCallToRuntime(Runtime::kWasmGetExceptionTag, parameters,
- arraysize(parameters));
- return BuildChangeSmiToInt32(value);
+ }
+
+ // Now convert the leading entries to the corresponding parameter values.
+ uint32_t index = 0;
+ const wasm::WasmExceptionSig* sig = except_decl->sig;
+ for (size_t i = 0; i < sig->parameter_count(); ++i) {
+ Node* value = BuildDecodeException32BitValue(values, &index);
+ switch (wasm::ValueType type = sig->GetParam(i)) {
+ case wasm::kWasmF32: {
+ value = Unop(wasm::kExprF32ReinterpretI32, value);
+ break;
+ }
+ case wasm::kWasmI32:
+ break;
+ case wasm::kWasmF64:
+ case wasm::kWasmI64: {
+ Node* upper =
+ Binop(wasm::kExprI64Shl, Unop(wasm::kExprI64UConvertI32, value),
+ Int64Constant(32));
+ Node* lower = Unop(wasm::kExprI64UConvertI32,
+ BuildDecodeException32BitValue(values, &index));
+ value = Binop(wasm::kExprI64Ior, upper, lower);
+ if (type == wasm::kWasmF64) {
+ value = Unop(wasm::kExprF64ReinterpretI64, value);
+ }
+ break;
+ }
+ default:
+ CHECK(false);
+ break;
+ }
+ values[i] = value;
+ }
+ DCHECK_EQ(index, encoded_size);
+ return values;
}
Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right,
@@ -2147,9 +2288,8 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
sig_builder.AddParam(MachineType::Pointer());
Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
- Node* args[] = {function, stack_slot_dst, stack_slot_src};
-
- Node* call = BuildCCall(sig_builder.Build(), args);
+ Node* call =
+ BuildCCall(sig_builder.Build(), function, stack_slot_dst, stack_slot_src);
ZeroCheck32(static_cast<wasm::TrapReason>(trap_zero), call, position);
TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1, position);
@@ -2161,23 +2301,18 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
return load;
}
-Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node** args) {
- const size_t params = sig->parameter_count();
- const size_t extra = 2; // effect and control inputs.
- const size_t count = 1 + params + extra;
-
- // Reallocate the buffer to make space for extra inputs.
- args = Realloc(args, 1 + params, count);
-
- // Add effect and control inputs.
- args[params + 1] = *effect_;
- args[params + 2] = *control_;
+template <typename... Args>
+Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node* function,
+ Args... args) {
+ DCHECK_LE(sig->return_count(), 1);
+ DCHECK_EQ(sizeof...(args), sig->parameter_count());
+ Node* const call_args[] = {function, args..., *effect_, *control_};
CallDescriptor* desc =
Linkage::GetSimplifiedCDescriptor(jsgraph()->zone(), sig);
const Operator* op = jsgraph()->common()->Call(desc);
- Node* call = graph()->NewNode(op, static_cast<int>(count), args);
+ Node* call = graph()->NewNode(op, arraysize(call_args), call_args);
*effect_ = call;
return call;
}
@@ -2185,17 +2320,22 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node** args) {
Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
Node*** rets,
wasm::WasmCodePosition position) {
+ DCHECK_NOT_NULL(wasm_context_);
SetNeedsStackCheck();
const size_t params = sig->parameter_count();
- const size_t extra = 2; // effect and control inputs.
+ const size_t extra = 3; // wasm_context, effect, and control.
const size_t count = 1 + params + extra;
// Reallocate the buffer to make space for extra inputs.
args = Realloc(args, 1 + params, count);
+ // Make room for the wasm_context parameter at index 1, just after code.
+ memmove(&args[2], &args[1], params * sizeof(Node*));
+ args[1] = wasm_context_;
+
// Add effect and control inputs.
- args[params + 1] = *effect_;
- args[params + 2] = *control_;
+ args[params + 2] = *effect_;
+ args[params + 3] = *control_;
CallDescriptor* descriptor = GetWasmCallDescriptor(jsgraph()->zone(), sig);
const Operator* op = jsgraph()->common()->Call(descriptor);
@@ -2441,7 +2581,7 @@ Node* WasmGraphBuilder::ToJS(Node* node, wasm::ValueType type) {
}
}
-Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* context) {
+Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* js_context) {
Callable callable =
Builtins::CallableFor(jsgraph()->isolate(), Builtins::kToNumber);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -2450,7 +2590,7 @@ Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* context) {
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* result = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
- node, context, *effect_, *control_);
+ node, js_context, *effect_, *control_);
SetSourcePosition(result, 1);
@@ -2511,12 +2651,12 @@ Node* WasmGraphBuilder::BuildChangeTaggedToFloat64(Node* value) {
return phi;
}
-Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
+Node* WasmGraphBuilder::FromJS(Node* node, Node* js_context,
wasm::ValueType type) {
DCHECK_NE(wasm::kWasmStmt, type);
// Do a JavaScript ToNumber.
- Node* num = BuildJavaScriptToNumber(node, context);
+ Node* num = BuildJavaScriptToNumber(node, js_context);
// Change representation.
SimplifiedOperatorBuilder simplified(jsgraph()->zone());
@@ -2590,11 +2730,12 @@ Node* WasmGraphBuilder::BuildAllocateHeapNumberWithValue(Node* value,
Node* control) {
MachineOperatorBuilder* machine = jsgraph()->machine();
CommonOperatorBuilder* common = jsgraph()->common();
- // The AllocateHeapNumberStub does not use the context, so we can safely pass
- // in Smi zero here.
- Callable callable = CodeFactory::AllocateHeapNumber(jsgraph()->isolate());
+ // The AllocateHeapNumber builtin does not use the js_context, so we can
+ // safely pass in Smi zero here.
+ Callable callable = Builtins::CallableFor(jsgraph()->isolate(),
+ Builtins::kAllocateHeapNumber);
Node* target = jsgraph()->HeapConstant(callable.code());
- Node* context = jsgraph()->NoContextConstant();
+ Node* js_context = jsgraph()->NoContextConstant();
Node* effect =
graph()->NewNode(common->BeginRegion(RegionObservability::kNotObservable),
graph()->start());
@@ -2605,7 +2746,7 @@ Node* WasmGraphBuilder::BuildAllocateHeapNumberWithValue(Node* value,
allocate_heap_number_operator_.set(common->Call(descriptor));
}
Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
- target, context, effect, control);
+ target, js_context, effect, control);
Node* store =
graph()->NewNode(machine->Store(StoreRepresentation(
MachineRepresentation::kFloat64, kNoWriteBarrier)),
@@ -2624,9 +2765,11 @@ Node* WasmGraphBuilder::BuildHeapNumberValueIndexConstant() {
return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
}
-void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code) {
- int wasm_count = static_cast<int>(sig_->parameter_count());
- int count = wasm_count + 3;
+void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
+ Address wasm_context_address) {
+ const int wasm_count = static_cast<int>(sig_->parameter_count());
+ const int count =
+ wasm_count + 4; // wasm_code, wasm_context, effect, and control.
Node** args = Buffer(count);
// Build the start and the JS parameter nodes.
@@ -2634,27 +2777,33 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code) {
*control_ = start;
*effect_ = start;
- // Create the context parameter
- Node* context = graph()->NewNode(
+ // Create the js_context parameter
+ Node* js_context = graph()->NewNode(
jsgraph()->common()->Parameter(
Linkage::GetJSCallContextParamIndex(wasm_count + 1), "%context"),
graph()->start());
- // Set the ThreadInWasm flag before we do the actual call.
- BuildModifyThreadInWasmFlag(true);
+ // Create the wasm_context node to pass as parameter. This must be a
+ // RelocatableIntPtrConstant because JSToWasm wrappers are compiled at module
+ // compile time and patched at instance build time.
+ DCHECK_NULL(wasm_context_);
+ wasm_context_ = jsgraph()->RelocatableIntPtrConstant(
+ reinterpret_cast<uintptr_t>(wasm_context_address),
+ RelocInfo::WASM_CONTEXT_REFERENCE);
if (!wasm::IsJSCompatibleSignature(sig_)) {
- // Throw a TypeError. Use the context of the calling javascript function
- // (passed as a parameter), such that the generated code is context
+ // Throw a TypeError. Use the js_context of the calling javascript function
+ // (passed as a parameter), such that the generated code is js_context
// independent.
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, context,
- nullptr, 0);
+ BuildCallToRuntimeWithContextFromJS(Runtime::kWasmThrowTypeError,
+ js_context, nullptr, 0);
// Add a dummy call to the wasm function so that the generated wrapper
// contains a reference to the wrapped wasm function. Without this reference
// the wasm function could not be re-imported into another wasm module.
int pos = 0;
args[pos++] = HeapConstant(wasm_code);
+ args[pos++] = wasm_context_;
args[pos++] = *effect_;
args[pos++] = *control_;
@@ -2669,14 +2818,18 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code) {
int pos = 0;
args[pos++] = HeapConstant(wasm_code);
+ args[pos++] = wasm_context_;
// Convert JS parameters to wasm numbers.
for (int i = 0; i < wasm_count; ++i) {
Node* param = Param(i + 1);
- Node* wasm_param = FromJS(param, context, sig_->GetParam(i));
+ Node* wasm_param = FromJS(param, js_context, sig_->GetParam(i));
args[pos++] = wasm_param;
}
+ // Set the ThreadInWasm flag before we do the actual call.
+ BuildModifyThreadInWasmFlag(true);
+
args[pos++] = *effect_;
args[pos++] = *control_;
@@ -2699,13 +2852,42 @@ int WasmGraphBuilder::AddParameterNodes(Node** args, int pos, int param_count,
wasm::FunctionSig* sig) {
// Convert wasm numbers to JS values.
for (int i = 0; i < param_count; ++i) {
- Node* param = Param(i);
+ Node* param = Param(i + 1); // Start from index 1 to drop the wasm_context.
args[pos++] = ToJS(param, sig->GetParam(i));
}
return pos;
}
-void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target) {
+Node* WasmGraphBuilder::LoadImportDataAtOffset(int offset, Node* table) {
+ offset = FixedArray::OffsetOfElementAt(offset) - kHeapObjectTag;
+ Node* offset_node = jsgraph()->Int32Constant(offset);
+ Node* import_data = graph()->NewNode(
+ jsgraph()->machine()->Load(LoadRepresentation::TaggedPointer()), table,
+ offset_node, *effect_, *control_);
+ *effect_ = import_data;
+ return import_data;
+}
+
+Node* WasmGraphBuilder::LoadNativeContext(Node* table) {
+ // The js_imports_table is set up so that index 0 has isolate->native_context
+ return LoadImportDataAtOffset(0, table);
+}
+
+int OffsetForImportData(int index, WasmGraphBuilder::ImportDataType type) {
+ // The js_imports_table is set up so that index 0 has isolate->native_context
+ // and for every index, 3*index+1 has the JSReceiver, 3*index+2 has function's
+ // global proxy and 3*index+3 has function's context.
+ return 3 * index + type;
+}
+
+Node* WasmGraphBuilder::LoadImportData(int index, ImportDataType type,
+ Node* table) {
+ return LoadImportDataAtOffset(OffsetForImportData(index, type), table);
+}
+
+bool WasmGraphBuilder::BuildWasmToJSWrapper(
+ Handle<JSReceiver> target, Handle<FixedArray> global_js_imports_table,
+ int index) {
DCHECK(target->IsCallable());
int wasm_count = static_cast<int>(sig_->parameter_count());
@@ -2717,17 +2899,28 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target) {
*effect_ = start;
*control_ = start;
+ // We add the target function to a table and look it up during runtime. This
+ // ensures that if the GC kicks in, it doesn't need to patch the code for the
+ // JS function.
+ // js_imports_table is fixed array with global handle scope whose lifetime is
+ // tied to the instance.
+ // TODO(aseemgarg): explore using per-import global handle instead of a table
+ Node* table_ptr = jsgraph()->IntPtrConstant(
+ reinterpret_cast<intptr_t>(global_js_imports_table.location()));
+ Node* table = graph()->NewNode(
+ jsgraph()->machine()->Load(LoadRepresentation::TaggedPointer()),
+ table_ptr, jsgraph()->IntPtrConstant(0), *effect_, *control_);
+ *effect_ = table;
+
if (!wasm::IsJSCompatibleSignature(sig_)) {
- // Throw a TypeError. Embedding the context is ok here, since this code is
- // regenerated at instantiation time.
- Node* context =
- jsgraph()->HeapConstant(jsgraph()->isolate()->native_context());
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, context,
+ // Throw a TypeError.
+ Node* native_context = LoadNativeContext(table);
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, native_context,
nullptr, 0);
// We don't need to return a value here, as the runtime call will not return
// anyway (the c entry stub will trigger stack unwinding).
ReturnVoid();
- return;
+ return false;
}
Node** args = Buffer(wasm_count + 7);
@@ -2740,12 +2933,12 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target) {
Handle<JSFunction> function = Handle<JSFunction>::cast(target);
if (function->shared()->internal_formal_parameter_count() == wasm_count) {
int pos = 0;
- args[pos++] = jsgraph()->Constant(target); // target callable.
+ args[pos++] =
+ LoadImportData(index, kFunction, table); // target callable.
// Receiver.
if (is_sloppy(function->shared()->language_mode()) &&
!function->shared()->native()) {
- args[pos++] =
- HeapConstant(handle(function->context()->global_proxy(), isolate));
+ args[pos++] = LoadImportData(index, kGlobalProxy, table);
} else {
args[pos++] = jsgraph()->Constant(
handle(isolate->heap()->undefined_value(), isolate));
@@ -2759,7 +2952,7 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target) {
args[pos++] = jsgraph()->UndefinedConstant(); // new target
args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count
- args[pos++] = HeapConstant(handle(function->context()));
+ args[pos++] = LoadImportData(index, kFunctionContext, table);
args[pos++] = *effect_;
args[pos++] = *control_;
@@ -2768,11 +2961,12 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target) {
}
// We cannot call the target directly, we have to use the Call builtin.
+ Node* native_context = nullptr;
if (!call) {
int pos = 0;
Callable callable = CodeFactory::Call(isolate);
args[pos++] = jsgraph()->HeapConstant(callable.code());
- args[pos++] = jsgraph()->Constant(target); // target callable
+ args[pos++] = LoadImportData(index, kFunction, table); // target callable.
args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count
args[pos++] = jsgraph()->Constant(
handle(isolate->heap()->undefined_value(), isolate)); // receiver
@@ -2789,7 +2983,8 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target) {
// is only needed if the target is a constructor to throw a TypeError, if
// the target is a native function, or if the target is a callable JSObject,
// which can only be constructed by the runtime.
- args[pos++] = HeapConstant(isolate->native_context());
+ native_context = LoadNativeContext(table);
+ args[pos++] = native_context;
args[pos++] = *effect_;
args[pos++] = *control_;
@@ -2804,9 +2999,12 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target) {
// Convert the return value back.
Node* val = sig_->return_count() == 0
? jsgraph()->Int32Constant(0)
- : FromJS(call, HeapConstant(isolate->native_context()),
+ : FromJS(call,
+ native_context != nullptr ? native_context
+ : LoadNativeContext(table),
sig_->GetReturn());
Return(val);
+ return true;
}
namespace {
@@ -2818,6 +3016,39 @@ bool HasInt64ParamOrReturn(wasm::FunctionSig* sig) {
}
} // namespace
+void WasmGraphBuilder::BuildWasmToWasmWrapper(Handle<Code> target,
+ Address new_context_address) {
+ int wasm_count = static_cast<int>(sig_->parameter_count());
+ int count = wasm_count + 4; // wasm_code, wasm_context, effect, and control.
+ Node** args = Buffer(count);
+
+ // Build the start node.
+ Node* start = Start(count + 1);
+ *control_ = start;
+ *effect_ = start;
+
+ int pos = 0;
+ // Add the wasm code target.
+ args[pos++] = jsgraph()->HeapConstant(target);
+ // Add the wasm_context of the other instance.
+ args[pos++] = jsgraph()->IntPtrConstant(
+ reinterpret_cast<uintptr_t>(new_context_address));
+ // Add the parameters starting from index 1 since the parameter with index 0
+ // is the old wasm_context.
+ for (int i = 0; i < wasm_count; ++i) {
+ args[pos++] = Param(i + 1);
+ }
+ args[pos++] = *effect_;
+ args[pos++] = *control_;
+
+ // Call the wasm code.
+ CallDescriptor* desc = GetWasmCallDescriptor(jsgraph()->zone(), sig_);
+ Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
+ *effect_ = call;
+ Node* retval = sig_->return_count() == 0 ? jsgraph()->Int32Constant(0) : call;
+ Return(retval);
+}
+
void WasmGraphBuilder::BuildWasmInterpreterEntry(
uint32_t function_index, Handle<WasmInstanceObject> instance) {
int param_count = static_cast<int>(sig_->parameter_count());
@@ -2852,9 +3083,10 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
for (int i = 0; i < param_count; ++i) {
wasm::ValueType type = sig_->GetParam(i);
- *effect_ =
- graph()->NewNode(GetSafeStoreOperator(offset, type), arg_buffer,
- Int32Constant(offset), Param(i), *effect_, *control_);
+ // Start from the parameter with index 1 to drop the wasm_context.
+ *effect_ = graph()->NewNode(GetSafeStoreOperator(offset, type), arg_buffer,
+ Int32Constant(offset), Param(i + 1), *effect_,
+ *control_);
offset += 1 << ElementSizeLog2Of(type);
}
DCHECK_EQ(args_size_bytes, offset);
@@ -2886,12 +3118,17 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
if (HasInt64ParamOrReturn(sig_)) LowerInt64();
}
-void WasmGraphBuilder::BuildCWasmEntry() {
+void WasmGraphBuilder::BuildCWasmEntry(Address wasm_context_address) {
// Build the start and the JS parameter nodes.
Node* start = Start(CWasmEntryParameters::kNumParameters + 5);
*control_ = start;
*effect_ = start;
+ // Create the wasm_context node to pass as parameter.
+ DCHECK_NULL(wasm_context_);
+ wasm_context_ = jsgraph()->IntPtrConstant(
+ reinterpret_cast<uintptr_t>(wasm_context_address));
+
// Create parameter nodes (offset by 1 for the receiver parameter).
Node* code_obj = Param(CWasmEntryParameters::kCodeObject + 1);
Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer + 1);
@@ -2900,11 +3137,12 @@ void WasmGraphBuilder::BuildCWasmEntry() {
BuildModifyThreadInWasmFlag(true);
int wasm_arg_count = static_cast<int>(sig_->parameter_count());
- int arg_count = wasm_arg_count + 3; // args + code, control, effect
+ int arg_count = wasm_arg_count + 4; // code, wasm_context, control, effect
Node** args = Buffer(arg_count);
int pos = 0;
args[pos++] = code_obj;
+ args[pos++] = wasm_context_;
int offset = 0;
for (wasm::ValueType type : sig_->parameters()) {
@@ -2955,38 +3193,60 @@ void WasmGraphBuilder::BuildCWasmEntry() {
}
}
-Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
- DCHECK_NOT_NULL(env_);
- const uintptr_t mem_start = static_cast<const uintptr_t>(env_->mem_start);
- if (offset == 0) {
- if (!mem_buffer_) {
- mem_buffer_ = jsgraph()->RelocatableIntPtrConstant(
- mem_start, RelocInfo::WASM_MEMORY_REFERENCE);
- }
- return mem_buffer_;
- } else {
- return jsgraph()->RelocatableIntPtrConstant(
- static_cast<uintptr_t>(mem_start + offset),
- RelocInfo::WASM_MEMORY_REFERENCE);
+// This function is used by WasmFullDecoder to create a node that loads the
+// mem_start variable from the WasmContext. It should not be used directly by
+// the WasmGraphBuilder. The WasmGraphBuilder should directly use mem_start_,
+// which will always contain the correct node (stored in the SsaEnv).
+Node* WasmGraphBuilder::LoadMemStart() {
+ DCHECK_NOT_NULL(wasm_context_);
+ Node* mem_buffer = graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_,
+ jsgraph()->Int32Constant(
+ static_cast<int32_t>(offsetof(WasmContext, mem_start))),
+ *effect_, *control_);
+ *effect_ = mem_buffer;
+ return mem_buffer;
+}
+
+// This function is used by WasmFullDecoder to create a node that loads the
+// mem_size variable from the WasmContext. It should not be used directly by
+// the WasmGraphBuilder. The WasmGraphBuilder should directly use mem_size_,
+// which will always contain the correct node (stored in the SsaEnv).
+Node* WasmGraphBuilder::LoadMemSize() {
+ // Load mem_size from the memory_context location at runtime.
+ DCHECK_NOT_NULL(wasm_context_);
+ Node* mem_size = graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_,
+ jsgraph()->Int32Constant(
+ static_cast<int32_t>(offsetof(WasmContext, mem_size))),
+ *effect_, *control_);
+ *effect_ = mem_size;
+ if (jsgraph()->machine()->Is64()) {
+ mem_size = graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(),
+ mem_size);
}
+ return mem_size;
+}
+
+Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
+ DCHECK_NOT_NULL(*mem_start_);
+ if (offset == 0) return *mem_start_;
+ return graph()->NewNode(jsgraph()->machine()->IntAdd(), *mem_start_,
+ jsgraph()->IntPtrConstant(offset));
}
Node* WasmGraphBuilder::CurrentMemoryPages() {
// CurrentMemoryPages can not be called from asm.js.
DCHECK_EQ(wasm::kWasmOrigin, env_->module->origin());
- SetNeedsStackCheck();
- Node* call = BuildCallToRuntime(Runtime::kWasmMemorySize, nullptr, 0);
- Node* result = BuildChangeSmiToInt32(call);
- return result;
-}
-
-Node* WasmGraphBuilder::MemSize() {
- DCHECK_NOT_NULL(env_);
- if (mem_size_) return mem_size_;
- uint32_t size = env_->mem_size;
- mem_size_ = jsgraph()->RelocatableInt32Constant(
- size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
- return mem_size_;
+ DCHECK_NOT_NULL(*mem_size_);
+ Node* mem_size = *mem_size_;
+ if (jsgraph()->machine()->Is64()) {
+ mem_size = graph()->NewNode(jsgraph()->machine()->TruncateInt64ToInt32(),
+ mem_size);
+ }
+ return graph()->NewNode(
+ jsgraph()->machine()->Word32Shr(), mem_size,
+ jsgraph()->Int32Constant(WhichPowerOf2(wasm::WasmModule::kPageSize)));
}
void WasmGraphBuilder::EnsureFunctionTableNodes() {
@@ -3013,51 +3273,46 @@ void WasmGraphBuilder::EnsureFunctionTableNodes() {
Node* WasmGraphBuilder::BuildModifyThreadInWasmFlag(bool new_value) {
// TODO(eholk): generate code to modify the thread-local storage directly,
// rather than calling the runtime.
- //
- // Note that the runtime functions also toggle the wasm_execution_time
- // counters. Make sure this behavior is preserved if we avoid the runtime
- // call.
if (!trap_handler::UseTrapHandler()) {
return *control_;
}
- const Runtime::FunctionId f =
- new_value ? Runtime::kSetThreadInWasm : Runtime::kClearThreadInWasm;
- const Runtime::Function* fun = Runtime::FunctionForId(f);
- DCHECK_EQ(0, fun->nargs);
- const CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- jsgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
- CallDescriptor::kNoFlags);
- // CEntryStubConstant nodes have to be created and cached in the main
- // thread. At the moment this is only done for CEntryStubConstant(1).
- DCHECK_EQ(1, fun->result_size);
- Node* inputs[] = {centry_stub_node_,
- jsgraph()->ExternalConstant(
- ExternalReference(f, jsgraph()->isolate())), // ref
- jsgraph()->Int32Constant(fun->nargs), // arity
- jsgraph()->NoContextConstant(),
- *effect_,
- *control_};
-
- Node* node = jsgraph()->graph()->NewNode(jsgraph()->common()->Call(desc),
- arraysize(inputs), inputs);
- *effect_ = node;
- return node;
+ // Using two functions instead of taking the new value as a parameter saves
+ // one instruction on each call to set up the parameter.
+ ExternalReference ref =
+ new_value ? ExternalReference::wasm_set_thread_in_wasm_flag(
+ jsgraph()->isolate())
+ : ExternalReference::wasm_clear_thread_in_wasm_flag(
+ jsgraph()->isolate());
+ MachineSignature::Builder sig_builder(jsgraph()->zone(), 0, 0);
+ return BuildCCall(
+ sig_builder.Build(),
+ graph()->NewNode(jsgraph()->common()->ExternalConstant(ref)));
}
// Only call this function for code which is not reused across instantiations,
-// as we do not patch the embedded context.
+// as we do not patch the embedded js_context.
Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
- Node* context,
+ Node* js_context,
Node** parameters,
int parameter_count) {
- // Setting and clearing the thread-in-wasm flag should not be done as a normal
- // runtime call.
- DCHECK_NE(f, Runtime::kSetThreadInWasm);
- DCHECK_NE(f, Runtime::kClearThreadInWasm);
// We're leaving Wasm code, so clear the flag.
*control_ = BuildModifyThreadInWasmFlag(false);
+ // Since the thread-in-wasm flag is clear, it is as if we are calling from JS.
+ Node* call = BuildCallToRuntimeWithContextFromJS(f, js_context, parameters,
+ parameter_count);
+
+ // Restore the thread-in-wasm flag, since we have returned to Wasm.
+ *control_ = BuildModifyThreadInWasmFlag(true);
+
+ return call;
+}
+// This version of BuildCallToRuntime does not clear and set the thread-in-wasm
+// flag.
+Node* WasmGraphBuilder::BuildCallToRuntimeWithContextFromJS(
+ Runtime::FunctionId f, Node* js_context, Node* const* parameters,
+ int parameter_count) {
const Runtime::Function* fun = Runtime::FunctionForId(f);
CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
jsgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
@@ -3065,9 +3320,9 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
// CEntryStubConstant nodes have to be created and cached in the main
// thread. At the moment this is only done for CEntryStubConstant(1).
DCHECK_EQ(1, fun->result_size);
- // At the moment we only allow 3 parameters. If more parameters are needed,
+ // At the moment we only allow 4 parameters. If more parameters are needed,
// increase this constant accordingly.
- static const int kMaxParams = 3;
+ static const int kMaxParams = 4;
DCHECK_GE(kMaxParams, parameter_count);
Node* inputs[kMaxParams + 6];
int count = 0;
@@ -3078,7 +3333,7 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
inputs[count++] = jsgraph()->ExternalConstant(
ExternalReference(f, jsgraph()->isolate())); // ref
inputs[count++] = jsgraph()->Int32Constant(fun->nargs); // arity
- inputs[count++] = context; // context
+ inputs[count++] = js_context; // js_context
inputs[count++] = *effect_;
inputs[count++] = *control_;
@@ -3086,9 +3341,6 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
count, inputs);
*effect_ = node;
- // Restore the thread-in-wasm flag, since we have returned to Wasm.
- *control_ = BuildModifyThreadInWasmFlag(true);
-
return node;
}
@@ -3132,39 +3384,43 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
uint32_t offset,
wasm::WasmCodePosition position) {
if (FLAG_wasm_no_bounds_checks) return;
+ DCHECK_NOT_NULL(*mem_size_);
- uint64_t min_size = static_cast<uint64_t>(env_->module->initial_pages) *
- wasm::WasmModule::kPageSize;
- uint64_t max_size = static_cast<uint64_t>(env_->module->has_maximum_pages
- ? env_->module->maximum_pages
- : wasm::kV8MaxWasmMemoryPages) *
- wasm::WasmModule::kPageSize;
+ uint32_t min_size = env_->module->initial_pages * wasm::WasmModule::kPageSize;
+ uint32_t max_size =
+ (env_->module->has_maximum_pages ? env_->module->maximum_pages
+ : wasm::kV8MaxWasmMemoryPages) *
+ wasm::WasmModule::kPageSize;
byte access_size = wasm::WasmOpcodes::MemSize(memtype);
- uint64_t end_offset = static_cast<uint64_t>(offset) + access_size;
- if (end_offset > max_size) {
+ if (access_size > max_size || offset > max_size - access_size) {
// The access will be out of bounds, even for the largest memory.
TrapIfEq32(wasm::kTrapMemOutOfBounds, jsgraph()->Int32Constant(0), 0,
position);
return;
}
+ uint32_t end_offset = offset + access_size;
if (end_offset > min_size) {
// The end offset is larger than the smallest memory.
// Dynamically check the end offset against the actual memory size, which
// is not known at compile time.
- Node* cond = graph()->NewNode(
- jsgraph()->machine()->Uint32LessThanOrEqual(),
- jsgraph()->IntPtrConstant(static_cast<uintptr_t>(end_offset)),
- jsgraph()->RelocatableInt32Constant(
- static_cast<uint32_t>(env_->mem_size),
- RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+ Node* cond;
+ if (jsgraph()->machine()->Is32()) {
+ cond = graph()->NewNode(jsgraph()->machine()->Uint32LessThanOrEqual(),
+ jsgraph()->Int32Constant(end_offset), *mem_size_);
+ } else {
+ cond = graph()->NewNode(
+ jsgraph()->machine()->Uint64LessThanOrEqual(),
+ jsgraph()->Int64Constant(static_cast<int64_t>(end_offset)),
+ *mem_size_);
+ }
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
} else {
// The end offset is within the bounds of the smallest memory, so only
// one check is required. Check to see if the index is also a constant.
- Uint32Matcher m(index);
+ UintPtrMatcher m(index);
if (m.HasValue()) {
uint64_t index_val = m.Value();
if ((index_val + offset + access_size) <= min_size) {
@@ -3175,11 +3431,22 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
}
}
- uint64_t effective_size = env_->mem_size - (end_offset - 1);
- Node* cond = graph()->NewNode(jsgraph()->machine()->Uint32LessThan(), index,
- jsgraph()->RelocatableInt32Constant(
- static_cast<uint32_t>(effective_size),
- RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+ Node* effective_size;
+ if (jsgraph()->machine()->Is32()) {
+ effective_size =
+ graph()->NewNode(jsgraph()->machine()->Int32Sub(), *mem_size_,
+ jsgraph()->Int32Constant(end_offset - 1));
+ } else {
+ effective_size = graph()->NewNode(
+ jsgraph()->machine()->Int64Sub(), *mem_size_,
+ jsgraph()->Int64Constant(static_cast<int64_t>(end_offset - 1)));
+ }
+
+ const Operator* less = jsgraph()->machine()->Is32()
+ ? jsgraph()->machine()->Uint32LessThan()
+ : jsgraph()->machine()->Uint64LessThan();
+
+ Node* cond = graph()->NewNode(less, index, effective_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
}
@@ -3204,29 +3471,50 @@ const Operator* WasmGraphBuilder::GetSafeStoreOperator(int offset,
return jsgraph()->machine()->UnalignedStore(rep);
}
+Node* WasmGraphBuilder::TraceMemoryOperation(bool is_store,
+ MachineRepresentation rep,
+ Node* index, uint32_t offset,
+ wasm::WasmCodePosition position) {
+ Node* address = graph()->NewNode(jsgraph()->machine()->Int32Add(),
+ Int32Constant(offset), index);
+ Node* addr_low = BuildChangeInt32ToSmi(graph()->NewNode(
+ jsgraph()->machine()->Word32And(), address, Int32Constant(0xffff)));
+ Node* addr_high = BuildChangeInt32ToSmi(graph()->NewNode(
+ jsgraph()->machine()->Word32Shr(), address, Int32Constant(16)));
+ int32_t rep_i = static_cast<int32_t>(rep);
+ Node* params[] = {
+ jsgraph()->SmiConstant(is_store), // is_store
+ jsgraph()->SmiConstant(rep_i), // mem rep
+ addr_low, // address lower half word
+ addr_high // address higher half word
+ };
+ Node* call =
+ BuildCallToRuntime(Runtime::kWasmTraceMemory, params, arraysize(params));
+ SetSourcePosition(call, position);
+ return call;
+}
+
Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
Node* index, uint32_t offset,
uint32_t alignment,
wasm::WasmCodePosition position) {
Node* load;
+ if (jsgraph()->machine()->Is64()) {
+ index =
+ graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
+ }
// Wasm semantics throw on OOB. Introduce explicit bounds check.
if (!FLAG_wasm_trap_handler || !V8_TRAP_HANDLER_SUPPORTED) {
BoundsCheckMem(memtype, index, offset, position);
}
- if (jsgraph()->machine()->Is64()) {
- index =
- graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
- }
if (memtype.representation() == MachineRepresentation::kWord8 ||
jsgraph()->machine()->UnalignedLoadSupported(memtype.representation())) {
- if (FLAG_wasm_trap_handler && V8_TRAP_HANDLER_SUPPORTED) {
- DCHECK(FLAG_wasm_guard_pages);
- Node* position_node = jsgraph()->Int32Constant(position);
+ if (trap_handler::UseTrapHandler()) {
load = graph()->NewNode(jsgraph()->machine()->ProtectedLoad(memtype),
- MemBuffer(offset), index, position_node, *effect_,
- *control_);
+ MemBuffer(offset), index, *effect_, *control_);
+ SetSourcePosition(load, position);
} else {
load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
MemBuffer(offset), index, *effect_, *control_);
@@ -3257,6 +3545,11 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
}
}
+ if (FLAG_wasm_trace_memory) {
+ TraceMemoryOperation(false, memtype.representation(), index, offset,
+ position);
+ }
+
return load;
}
@@ -3266,15 +3559,15 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
wasm::ValueType type) {
Node* store;
+ if (jsgraph()->machine()->Is64()) {
+ index =
+ graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
+ }
// Wasm semantics throw on OOB. Introduce explicit bounds check.
if (!FLAG_wasm_trap_handler || !V8_TRAP_HANDLER_SUPPORTED) {
BoundsCheckMem(memtype, index, offset, position);
}
- if (jsgraph()->machine()->Is64()) {
- index =
- graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
- }
#if defined(V8_TARGET_BIG_ENDIAN)
val = BuildChangeEndiannessStore(val, memtype, type);
#endif
@@ -3282,10 +3575,10 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
if (memtype.representation() == MachineRepresentation::kWord8 ||
jsgraph()->machine()->UnalignedStoreSupported(memtype.representation())) {
if (FLAG_wasm_trap_handler && V8_TRAP_HANDLER_SUPPORTED) {
- Node* position_node = jsgraph()->Int32Constant(position);
store = graph()->NewNode(
jsgraph()->machine()->ProtectedStore(memtype.representation()),
- MemBuffer(offset), index, val, position_node, *effect_, *control_);
+ MemBuffer(offset), index, val, *effect_, *control_);
+ SetSourcePosition(store, position);
} else {
StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
store =
@@ -3303,19 +3596,26 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
*effect_ = store;
+ if (FLAG_wasm_trace_memory) {
+ TraceMemoryOperation(true, memtype.representation(), index, offset,
+ position);
+ }
+
return store;
}
Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
// TODO(turbofan): fold bounds checks for constant asm.js loads.
// asm.js semantics use CheckedLoad (i.e. OOB reads return 0ish).
+ DCHECK_NOT_NULL(*mem_size_);
+ DCHECK_NOT_NULL(*mem_start_);
if (jsgraph()->machine()->Is64()) {
index =
graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
}
const Operator* op = jsgraph()->machine()->CheckedLoad(type);
Node* load =
- graph()->NewNode(op, MemBuffer(0), index, MemSize(), *effect_, *control_);
+ graph()->NewNode(op, *mem_start_, index, *mem_size_, *effect_, *control_);
*effect_ = load;
return load;
}
@@ -3324,13 +3624,15 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
Node* val) {
// TODO(turbofan): fold bounds checks for constant asm.js stores.
// asm.js semantics use CheckedStore (i.e. ignore OOB writes).
+ DCHECK_NOT_NULL(*mem_size_);
+ DCHECK_NOT_NULL(*mem_start_);
if (jsgraph()->machine()->Is64()) {
index =
graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
}
const Operator* op =
jsgraph()->machine()->CheckedStore(type.representation());
- Node* store = graph()->NewNode(op, MemBuffer(0), index, MemSize(), val,
+ Node* store = graph()->NewNode(op, *mem_start_, index, *mem_size_, val,
*effect_, *control_);
*effect_ = store;
return val;
@@ -3348,7 +3650,7 @@ Node* WasmGraphBuilder::String(const char* string) {
Graph* WasmGraphBuilder::graph() { return jsgraph()->graph(); }
void WasmGraphBuilder::LowerInt64() {
- if (!jsgraph()->machine()->Is32()) return;
+ if (jsgraph()->machine()->Is64()) return;
Int64Lowering r(jsgraph()->graph(), jsgraph()->machine(), jsgraph()->common(),
jsgraph()->zone(), sig_);
r.LowerGraph();
@@ -3793,31 +4095,66 @@ Node* WasmGraphBuilder::Simd8x16ShuffleOp(const uint8_t shuffle[16],
V(I32AtomicCompareExchange8U, CompareExchange, Uint8) \
V(I32AtomicCompareExchange16U, CompareExchange, Uint16)
+#define ATOMIC_LOAD_LIST(V) \
+ V(I32AtomicLoad, Uint32) \
+ V(I32AtomicLoad8U, Uint8) \
+ V(I32AtomicLoad16U, Uint16)
+
+#define ATOMIC_STORE_LIST(V) \
+ V(I32AtomicStore, Uint32, kWord32) \
+ V(I32AtomicStore8U, Uint8, kWord8) \
+ V(I32AtomicStore16U, Uint16, kWord16)
+
Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
+ uint32_t alignment, uint32_t offset,
wasm::WasmCodePosition position) {
+ // TODO(gdeepti): Add alignment validation, traps on misalignment
Node* node;
switch (opcode) {
-#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \
- case wasm::kExpr##Name: { \
- BoundsCheckMem(MachineType::Type(), inputs[0], 0, position); \
- node = graph()->NewNode( \
- jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
- MemBuffer(0), inputs[0], inputs[1], *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \
+ case wasm::kExpr##Name: { \
+ BoundsCheckMem(MachineType::Type(), inputs[0], offset, position); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
+ MemBuffer(offset), inputs[0], inputs[1], *effect_, *control_); \
+ break; \
}
ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP)
#undef BUILD_ATOMIC_BINOP
-#define BUILD_ATOMIC_TERNARY_OP(Name, Operation, Type) \
- case wasm::kExpr##Name: { \
- BoundsCheckMem(MachineType::Type(), inputs[0], 0, position); \
- node = graph()->NewNode( \
- jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
- MemBuffer(0), inputs[0], inputs[1], inputs[2], *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_TERNARY_OP(Name, Operation, Type) \
+ case wasm::kExpr##Name: { \
+ BoundsCheckMem(MachineType::Type(), inputs[0], offset, position); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
+ MemBuffer(offset), inputs[0], inputs[1], inputs[2], *effect_, \
+ *control_); \
+ break; \
}
ATOMIC_TERNARY_LIST(BUILD_ATOMIC_TERNARY_OP)
#undef BUILD_ATOMIC_TERNARY_OP
+
+#define BUILD_ATOMIC_LOAD_OP(Name, Type) \
+ case wasm::kExpr##Name: { \
+ BoundsCheckMem(MachineType::Type(), inputs[0], offset, position); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->AtomicLoad(MachineType::Type()), \
+ MemBuffer(offset), inputs[0], *effect_, *control_); \
+ break; \
+ }
+ ATOMIC_LOAD_LIST(BUILD_ATOMIC_LOAD_OP)
+#undef BUILD_ATOMIC_LOAD_OP
+
+#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \
+ case wasm::kExpr##Name: { \
+ BoundsCheckMem(MachineType::Type(), inputs[0], offset, position); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->AtomicStore(MachineRepresentation::Rep), \
+ MemBuffer(offset), inputs[0], inputs[1], *effect_, *control_); \
+ break; \
+ }
+ ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP)
+#undef BUILD_ATOMIC_STORE_OP
default:
FATAL_UNSUPPORTED_OPCODE(opcode);
}
@@ -3825,17 +4162,28 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
return node;
}
-static void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
- Isolate* isolate, Handle<Code> code,
- const char* message, uint32_t index,
- const wasm::WasmName& module_name,
- const wasm::WasmName& func_name) {
- DCHECK(isolate->logger()->is_logging_code_events() ||
- isolate->is_profiling());
+#undef ATOMIC_BINOP_LIST
+#undef ATOMIC_TERNARY_LIST
+#undef ATOMIC_LOAD_LIST
+#undef ATOMIC_STORE_LIST
+
+namespace {
+bool must_record_function_compilation(Isolate* isolate) {
+ return isolate->logger()->is_logging_code_events() || isolate->is_profiling();
+}
+
+PRINTF_FORMAT(4, 5)
+void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
+ Isolate* isolate, Handle<Code> code,
+ const char* format, ...) {
+ DCHECK(must_record_function_compilation(isolate));
ScopedVector<char> buffer(128);
- SNPrintF(buffer, "%s#%d:%.*s:%.*s", message, index, module_name.length(),
- module_name.start(), func_name.length(), func_name.start());
+ va_list arguments;
+ va_start(arguments, format);
+ int len = VSNPrintF(buffer, format, arguments);
+ CHECK_LT(0, len);
+ va_end(arguments);
Handle<String> name_str =
isolate->factory()->NewStringFromAsciiChecked(buffer.start());
Handle<String> script_str =
@@ -3845,9 +4193,11 @@ static void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *shared,
*script_str, 0, 0));
}
+} // namespace
Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
- Handle<Code> wasm_code, uint32_t index) {
+ Handle<Code> wasm_code, uint32_t index,
+ Address wasm_context_address) {
const wasm::WasmFunction* func = &module->functions[index];
//----------------------------------------------------------------------------
@@ -3869,15 +4219,13 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
std::vector<wasm::SignatureMap*>(), // signature_maps
std::vector<Handle<Code>>(), // function_code
BUILTIN_CODE(isolate, Illegal), // default_function_code
- 0,
- 0,
0};
WasmGraphBuilder builder(&env, &zone, &jsgraph,
CEntryStub(isolate, 1).GetCode(), func->sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
- builder.BuildJSToWasmWrapper(wasm_code);
+ builder.BuildJSToWasmWrapper(wasm_code, wasm_context_address);
//----------------------------------------------------------------------------
// Run the compilation pipeline.
@@ -3893,50 +4241,67 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
static_cast<int>(module->functions[index].sig->parameter_count());
CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
&zone, false, params + 1, CallDescriptor::kNoFlags);
- Code::Flags flags = Code::ComputeFlags(Code::JS_TO_WASM_FUNCTION);
- bool debugging =
-#if DEBUG
- true;
-#else
- FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
-#endif
- Vector<const char> func_name = ArrayVector("js-to-wasm");
+#ifdef DEBUG
+ EmbeddedVector<char, 32> func_name;
static unsigned id = 0;
- Vector<char> buffer;
- if (debugging) {
- buffer = Vector<char>::New(128);
- int chars = SNPrintF(buffer, "js-to-wasm#%d", id);
- func_name = Vector<const char>::cast(buffer.SubVector(0, chars));
- }
+ func_name.Truncate(SNPrintF(func_name, "js-to-wasm#%d", id++));
+#else
+ Vector<const char> func_name = CStrVector("js-to-wasm");
+#endif
- CompilationInfo info(func_name, isolate, &zone, flags);
+ CompilationInfo info(func_name, isolate, &zone, Code::JS_TO_WASM_FUNCTION);
Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code && !code.is_null()) {
OFStream os(stdout);
- code->Disassemble(buffer.start(), os);
+ code->Disassemble(func_name.start(), os);
}
#endif
- if (debugging) {
- buffer.Dispose();
- }
- if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
- char func_name[32];
- SNPrintF(ArrayVector(func_name), "js-to-wasm#%d", func->func_index);
+ if (must_record_function_compilation(isolate)) {
RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
- "js-to-wasm", index, wasm::WasmName("export"),
- CStrVector(func_name));
+ "%.*s", func_name.length(), func_name.start());
}
+
return code;
}
-Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
- wasm::FunctionSig* sig, uint32_t index,
- Handle<String> module_name,
- MaybeHandle<String> import_name,
- wasm::ModuleOrigin origin) {
+void ValidateImportWrapperReferencesImmovables(Handle<Code> wrapper) {
+#if !DEBUG
+ return;
+#endif
+ // We expect the only embedded objects to be those originating from
+ // a snapshot, which are immovable.
+ DisallowHeapAllocation no_gc;
+ if (wrapper.is_null()) return;
+ static constexpr int kAllGCRefs = (1 << (RelocInfo::LAST_GCED_ENUM + 1)) - 1;
+ for (RelocIterator it(*wrapper, kAllGCRefs); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ Object* target = nullptr;
+ switch (mode) {
+ case RelocInfo::CODE_TARGET:
+ // this would be either one of the stubs or builtins, because
+ // we didn't link yet.
+ target = reinterpret_cast<Object*>(it.rinfo()->target_address());
+ break;
+ case RelocInfo::EMBEDDED_OBJECT:
+ target = it.rinfo()->target_object();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ CHECK_NOT_NULL(target);
+ bool is_immovable =
+ target->IsSmi() || Heap::IsImmovable(HeapObject::cast(target));
+ CHECK(is_immovable);
+ }
+}
+
+Handle<Code> CompileWasmToJSWrapper(
+ Isolate* isolate, Handle<JSReceiver> target, wasm::FunctionSig* sig,
+ uint32_t index, wasm::ModuleOrigin origin,
+ Handle<FixedArray> global_js_imports_table) {
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
@@ -3958,10 +4323,20 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
source_position_table);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
- builder.BuildWasmToJSWrapper(target);
+ if (builder.BuildWasmToJSWrapper(target, global_js_imports_table, index)) {
+ global_js_imports_table->set(
+ OffsetForImportData(index, WasmGraphBuilder::kFunction), *target);
+ if (target->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(target);
+ global_js_imports_table->set(
+ OffsetForImportData(index, WasmGraphBuilder::kFunctionContext),
+ function->context());
+ global_js_imports_table->set(
+ OffsetForImportData(index, WasmGraphBuilder::kGlobalProxy),
+ function->context()->global_proxy());
+ }
+ }
- Handle<Code> code = Handle<Code>::null();
- {
if (FLAG_trace_turbo_graph) { // Simple textual RPO.
OFStream os(stdout);
os << "-- Graph after change lowering -- " << std::endl;
@@ -3973,48 +4348,105 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
if (machine.Is32()) {
incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
- Code::Flags flags = Code::ComputeFlags(Code::WASM_TO_JS_FUNCTION);
- bool debugging =
-#if DEBUG
- true;
+
+#ifdef DEBUG
+ EmbeddedVector<char, 32> func_name;
+ static unsigned id = 0;
+ func_name.Truncate(SNPrintF(func_name, "wasm-to-js#%d", id++));
#else
- FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
+ Vector<const char> func_name = CStrVector("wasm-to-js");
#endif
- Vector<const char> func_name = ArrayVector("wasm-to-js");
- static unsigned id = 0;
- Vector<char> buffer;
- if (debugging) {
- buffer = Vector<char>::New(128);
- int chars = SNPrintF(buffer, "wasm-to-js#%d", id);
- func_name = Vector<const char>::cast(buffer.SubVector(0, chars));
- }
- CompilationInfo info(func_name, isolate, &zone, flags);
- code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr,
- source_position_table);
+ CompilationInfo info(func_name, isolate, &zone, Code::WASM_TO_JS_FUNCTION);
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(
+ &info, incoming, &graph, nullptr, source_position_table);
+ ValidateImportWrapperReferencesImmovables(code);
+ Handle<FixedArray> deopt_data =
+ isolate->factory()->NewFixedArray(2, TENURED);
+ intptr_t loc =
+ reinterpret_cast<intptr_t>(global_js_imports_table.location());
+ Handle<Object> loc_handle = isolate->factory()->NewHeapNumberFromBits(loc);
+ deopt_data->set(0, *loc_handle);
+ Handle<Object> index_handle = isolate->factory()->NewNumberFromInt(
+ OffsetForImportData(index, WasmGraphBuilder::kFunction));
+ deopt_data->set(1, *index_handle);
+ code->set_deoptimization_data(*deopt_data);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code && !code.is_null()) {
OFStream os(stdout);
- code->Disassemble(buffer.start(), os);
+ code->Disassemble(func_name.start(), os);
}
#endif
- if (debugging) {
- buffer.Dispose();
- }
+
+ if (must_record_function_compilation(isolate)) {
+ RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+ "%.*s", func_name.length(), func_name.start());
+ }
+
+ return code;
+}
+
+Handle<Code> CompileWasmToWasmWrapper(Isolate* isolate, Handle<Code> target,
+ wasm::FunctionSig* sig, uint32_t index,
+ Address new_wasm_context_address) {
+ //----------------------------------------------------------------------------
+ // Create the Graph
+ //----------------------------------------------------------------------------
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ Graph graph(&zone);
+ CommonOperatorBuilder common(&zone);
+ MachineOperatorBuilder machine(&zone);
+ JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+
+ Node* control = nullptr;
+ Node* effect = nullptr;
+
+ WasmGraphBuilder builder(nullptr, &zone, &jsgraph, Handle<Code>(), sig);
+ builder.set_control_ptr(&control);
+ builder.set_effect_ptr(&effect);
+ builder.BuildWasmToWasmWrapper(target, new_wasm_context_address);
+ if (HasInt64ParamOrReturn(sig)) builder.LowerInt64();
+
+ if (FLAG_trace_turbo_graph) { // Simple textual RPO.
+ OFStream os(stdout);
+ os << "-- Graph after change lowering -- " << std::endl;
+ os << AsRPO(graph);
+ }
+
+ // Schedule and compile to machine code.
+ CallDescriptor* incoming = GetWasmCallDescriptor(&zone, sig);
+ if (machine.Is32()) {
+ incoming = GetI32WasmCallDescriptor(&zone, incoming);
+ }
+ bool debugging =
+#if DEBUG
+ true;
+#else
+ FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
+#endif
+ Vector<const char> func_name = ArrayVector("wasm-to-wasm");
+ static unsigned id = 0;
+ Vector<char> buffer;
+ if (debugging) {
+ buffer = Vector<char>::New(128);
+ int chars = SNPrintF(buffer, "wasm-to-wasm#%d", id);
+ func_name = Vector<const char>::cast(buffer.SubVector(0, chars));
+ }
+
+ CompilationInfo info(func_name, isolate, &zone, Code::WASM_FUNCTION);
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph);
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_opt_code && !code.is_null()) {
+ OFStream os(stdout);
+ code->Disassemble(buffer.start(), os);
+ }
+#endif
+ if (debugging) {
+ buffer.Dispose();
}
if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
- const char* function_name = nullptr;
- size_t function_name_size = 0;
- if (!import_name.is_null()) {
- Handle<String> handle = import_name.ToHandleChecked();
- function_name = handle->ToCString().get();
- function_name_size = static_cast<size_t>(handle->length());
- }
RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
- "wasm-to-js", index,
- {module_name->ToCString().get(),
- static_cast<size_t>(module_name->length())},
- {function_name, function_name_size});
+ "wasm-to-wasm#%d", index);
}
return code;
@@ -4048,7 +4480,7 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
{
if (FLAG_trace_turbo_graph) { // Simple textual RPO.
OFStream os(stdout);
- os << "-- Wasm to interpreter graph -- " << std::endl;
+ os << "-- Wasm interpreter entry graph -- " << std::endl;
os << AsRPO(graph);
}
@@ -4057,27 +4489,27 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
if (machine.Is32()) {
incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
- Code::Flags flags = Code::ComputeFlags(Code::WASM_INTERPRETER_ENTRY);
- EmbeddedVector<char, 32> debug_name;
- int name_len = SNPrintF(debug_name, "wasm-to-interpreter#%d", func_index);
- DCHECK(name_len > 0 && name_len < debug_name.length());
- debug_name.Truncate(name_len);
- DCHECK_EQ('\0', debug_name.start()[debug_name.length()]);
-
- CompilationInfo info(debug_name, isolate, &zone, flags);
+#ifdef DEBUG
+ EmbeddedVector<char, 32> func_name;
+ func_name.Truncate(
+ SNPrintF(func_name, "wasm-interpreter-entry#%d", func_index));
+#else
+ Vector<const char> func_name = CStrVector("wasm-interpreter-entry");
+#endif
+
+ CompilationInfo info(func_name, isolate, &zone,
+ Code::WASM_INTERPRETER_ENTRY);
code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code && !code.is_null()) {
OFStream os(stdout);
- code->Disassemble(debug_name.start(), os);
+ code->Disassemble(func_name.start(), os);
}
#endif
- if (isolate->logger()->is_logging_code_events() ||
- isolate->is_profiling()) {
+ if (must_record_function_compilation(isolate)) {
RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
- "wasm-to-interpreter", func_index,
- wasm::WasmName("module"), debug_name);
+ "%.*s", func_name.length(), func_name.start());
}
}
@@ -4089,7 +4521,8 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
return code;
}
-Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
+Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig,
+ Address wasm_context_address) {
Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
@@ -4103,7 +4536,7 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
CEntryStub(isolate, 1).GetCode(), sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
- builder.BuildCWasmEntry();
+ builder.BuildCWasmEntry(wasm_context_address);
if (FLAG_trace_turbo_graph) { // Simple textual RPO.
OFStream os(stdout);
@@ -4115,7 +4548,6 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
&zone, false, CWasmEntryParameters::kNumParameters + 1,
CallDescriptor::kNoFlags);
- Code::Flags flags = Code::ComputeFlags(Code::C_WASM_ENTRY);
// Build a name in the form "c-wasm-entry:<params>:<returns>".
static constexpr size_t kMaxNameLen = 128;
@@ -4134,7 +4566,7 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
debug_name[name_len] = '\0';
Vector<const char> debug_name_vec(debug_name, name_len);
- CompilationInfo info(debug_name_vec, isolate, &zone, flags);
+ CompilationInfo info(debug_name_vec, isolate, &zone, Code::C_WASM_ENTRY);
Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code && !code.is_null()) {
@@ -4166,7 +4598,8 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
SourcePositionTable* source_position_table =
new (jsgraph_->zone()) SourcePositionTable(jsgraph_->graph());
WasmGraphBuilder builder(env_, jsgraph_->zone(), jsgraph_, centry_stub_,
- func_body_.sig, source_position_table);
+ func_body_.sig, source_position_table,
+ runtime_exception_support_);
graph_construction_result_ =
wasm::BuildTFGraph(isolate_->allocator(), &builder, func_body_);
@@ -4181,7 +4614,8 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
builder.LowerInt64();
- if (builder.has_simd() && !CpuFeatures::SupportsWasmSimd128()) {
+ if (builder.has_simd() &&
+ (!CpuFeatures::SupportsWasmSimd128() || lower_simd_)) {
SimdScalarLowering(jsgraph_, func_body_.sig).LowerGraph();
}
@@ -4216,51 +4650,19 @@ Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
} // namespace
WasmCompilationUnit::WasmCompilationUnit(
- Isolate* isolate, const wasm::ModuleWireBytes& wire_bytes, ModuleEnv* env,
- const wasm::WasmFunction* function, Handle<Code> centry_stub)
- : WasmCompilationUnit(
- isolate, env,
- wasm::FunctionBody{function->sig, function->code.offset(),
- wire_bytes.start() + function->code.offset(),
- wire_bytes.start() + function->code.end_offset()},
- wire_bytes.GetNameOrNull(function), function->func_index,
- centry_stub) {}
-
-WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate, ModuleEnv* env,
- wasm::FunctionBody body,
- wasm::WasmName name, int index,
- Handle<Code> centry_stub)
- : isolate_(isolate),
- env_(env),
- func_body_(body),
- func_name_(name),
- counters_(isolate->counters()),
- centry_stub_(centry_stub),
- func_index_(index) {}
-
-WasmCompilationUnit::WasmCompilationUnit(
- Isolate* isolate, const wasm::ModuleWireBytes& wire_bytes, ModuleEnv* env,
- const wasm::WasmFunction* function, Handle<Code> centry_stub,
- const std::shared_ptr<Counters>& async_counters)
- : WasmCompilationUnit(
- isolate, env,
- wasm::FunctionBody{function->sig, function->code.offset(),
- wire_bytes.start() + function->code.offset(),
- wire_bytes.start() + function->code.end_offset()},
- wire_bytes.GetNameOrNull(function), function->func_index, centry_stub,
- async_counters) {}
-
-WasmCompilationUnit::WasmCompilationUnit(
Isolate* isolate, ModuleEnv* env, wasm::FunctionBody body,
wasm::WasmName name, int index, Handle<Code> centry_stub,
- const std::shared_ptr<Counters>& async_counters)
+ Counters* counters, RuntimeExceptionSupport exception_support,
+ bool lower_simd)
: isolate_(isolate),
env_(env),
func_body_(body),
func_name_(name),
- counters_(async_counters.get()),
+ counters_(counters ? counters : isolate->counters()),
centry_stub_(centry_stub),
- func_index_(index) {}
+ func_index_(index),
+ runtime_exception_support_(exception_support),
+ lower_simd_(lower_simd) {}
void WasmCompilationUnit::ExecuteCompilation() {
auto timed_histogram = env_->module->is_wasm()
@@ -4315,8 +4717,7 @@ void WasmCompilationUnit::ExecuteCompilation() {
}
info_.reset(new CompilationInfo(
GetDebugName(compilation_zone_.get(), func_name_, func_index_),
- isolate_, compilation_zone_.get(),
- Code::ComputeFlags(Code::WASM_FUNCTION)));
+ isolate_, compilation_zone_.get(), Code::WASM_FUNCTION));
ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions(
compilation_zone_.get());
@@ -4350,15 +4751,16 @@ MaybeHandle<Code> WasmCompilationUnit::FinishCompilation(
wasm::ErrorThrower* thrower) {
if (!ok_) {
if (graph_construction_result_.failed()) {
- // Add the function as another context for the exception
- ScopedVector<char> buffer(128);
+ // Add the function as another context for the exception.
+ EmbeddedVector<char, 128> message;
if (func_name_.start() == nullptr) {
- SNPrintF(buffer, "Compiling wasm function #%d failed", func_index_);
+ SNPrintF(message, "Compiling wasm function #%d failed", func_index_);
} else {
- SNPrintF(buffer, "Compiling wasm function #%d:%.*s failed", func_index_,
- func_name_.length(), func_name_.start());
+ wasm::TruncatedUserString<> trunc_name(func_name_);
+ SNPrintF(message, "Compiling wasm function #%d:%.*s failed",
+ func_index_, trunc_name.length(), trunc_name.start());
}
- thrower->CompileFailed(buffer.start(), graph_construction_result_);
+ thrower->CompileFailed(message.start(), graph_construction_result_);
}
return {};
@@ -4373,11 +4775,11 @@ MaybeHandle<Code> WasmCompilationUnit::FinishCompilation(
Handle<Code> code = info_->code();
DCHECK(!code.is_null());
- if (isolate_->logger()->is_logging_code_events() ||
- isolate_->is_profiling()) {
+ if (must_record_function_compilation(isolate_)) {
+ wasm::TruncatedUserString<> trunc_name(func_name_);
RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate_, code,
- "WASM_function", func_index_,
- wasm::WasmName("module"), func_name_);
+ "wasm_function#%d:%.*s", func_index_,
+ trunc_name.length(), trunc_name.start());
}
if (FLAG_trace_wasm_decode_time) {
@@ -4395,12 +4797,20 @@ MaybeHandle<Code> WasmCompilationUnit::CompileWasmFunction(
wasm::ErrorThrower* thrower, Isolate* isolate,
const wasm::ModuleWireBytes& wire_bytes, ModuleEnv* env,
const wasm::WasmFunction* function) {
- WasmCompilationUnit unit(isolate, wire_bytes, env, function,
- CEntryStub(isolate, 1).GetCode());
+ wasm::FunctionBody function_body{
+ function->sig, function->code.offset(),
+ wire_bytes.start() + function->code.offset(),
+ wire_bytes.start() + function->code.end_offset()};
+ WasmCompilationUnit unit(
+ isolate, env, function_body, wire_bytes.GetNameOrNull(function),
+ function->func_index, CEntryStub(isolate, 1).GetCode());
unit.ExecuteCompilation();
return unit.FinishCompilation(thrower);
}
+#undef WASM_64
+#undef FATAL_UNSUPPORTED_OPCODE
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 389013ac72..24c4a6a9f8 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -70,32 +70,26 @@ struct ModuleEnv {
const std::vector<Handle<Code>> function_code;
// If the default code is not a null handle, always use it for direct calls.
const Handle<Code> default_function_code;
- // Address of the start of memory.
- const uintptr_t mem_start;
- // Size of memory in bytes.
- const uint32_t mem_size;
// Address of the start of the globals region.
const uintptr_t globals_start;
};
+enum RuntimeExceptionSupport : bool {
+ kRuntimeExceptionSupport = true,
+ kNoRuntimeExceptionSupport = false
+};
+
class WasmCompilationUnit final {
public:
- // Use the following constructors if you know you are running on the
- // foreground thread.
- WasmCompilationUnit(Isolate* isolate, const wasm::ModuleWireBytes& wire_bytes,
- ModuleEnv* env, const wasm::WasmFunction* function,
- Handle<Code> centry_stub);
- WasmCompilationUnit(Isolate* isolate, ModuleEnv* env, wasm::FunctionBody body,
- wasm::WasmName name, int index, Handle<Code> centry_stub);
- // Use the following constructors if the compilation may run on a background
- // thread.
- WasmCompilationUnit(Isolate* isolate, const wasm::ModuleWireBytes& wire_bytes,
- ModuleEnv* env, const wasm::WasmFunction* function,
- Handle<Code> centry_stub,
- const std::shared_ptr<Counters>& async_counters);
- WasmCompilationUnit(Isolate* isolate, ModuleEnv* env, wasm::FunctionBody body,
- wasm::WasmName name, int index, Handle<Code> centry_stub,
- const std::shared_ptr<Counters>& async_counters);
+ // If constructing from a background thread, pass in a Counters*, and ensure
+ // that the Counters live at least as long as this compilation unit (which
+ // typically means to hold a std::shared_ptr<Counters>).
+ // If no such pointer is passed, Isolate::counters() will be called. This is
+ // only allowed to happen on the foreground thread.
+ WasmCompilationUnit(Isolate*, ModuleEnv*, wasm::FunctionBody, wasm::WasmName,
+ int index, Handle<Code> centry_stub, Counters* = nullptr,
+ RuntimeExceptionSupport = kRuntimeExceptionSupport,
+ bool lower_simd = false);
int func_index() const { return func_index_; }
@@ -112,6 +106,7 @@ class WasmCompilationUnit final {
private:
SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms);
+ Counters* counters() { return counters_; }
Isolate* isolate_;
ModuleEnv* env_;
@@ -130,24 +125,34 @@ class WasmCompilationUnit final {
Handle<Code> centry_stub_;
int func_index_;
wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
+ // See WasmGraphBuilder::runtime_exception_support_.
+ RuntimeExceptionSupport runtime_exception_support_;
bool ok_ = true;
size_t memory_cost_ = 0;
-
- Counters* counters() { return counters_; }
+ bool lower_simd_;
DISALLOW_COPY_AND_ASSIGN(WasmCompilationUnit);
};
// Wraps a JS function, producing a code object that can be called from wasm.
+// The global_js_imports_table is a global handle to a fixed array of target
+// JSReceiver with the lifetime tied to the module. We store it's location (non
+// GCable) in the generated code so that it can reside outside of GCed heap.
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
wasm::FunctionSig* sig, uint32_t index,
- Handle<String> module_name,
- MaybeHandle<String> import_name,
- wasm::ModuleOrigin origin);
+ wasm::ModuleOrigin origin,
+ Handle<FixedArray> global_js_imports_table);
// Wraps a given wasm code object, producing a code object.
Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
- Handle<Code> wasm_code, uint32_t index);
+ Handle<Code> wasm_code, uint32_t index,
+ Address wasm_context_address);
+
+// Wraps a wasm function, producing a code object that can be called from other
+// wasm instances (the WasmContext address must be changed).
+Handle<Code> CompileWasmToWasmWrapper(Isolate* isolate, Handle<Code> target,
+ wasm::FunctionSig* sig, uint32_t index,
+ Address new_wasm_context_address);
// Compiles a stub that redirects a call to a wasm function to the wasm
// interpreter. It's ABI compatible with the compiled wasm function.
@@ -165,17 +170,17 @@ enum CWasmEntryParameters {
// Compiles a stub with JS linkage, taking parameters as described by
// {CWasmEntryParameters}. It loads the wasm parameters from the argument
// buffer and calls the wasm function given as first parameter.
-Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig);
+Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig,
+ Address wasm_context_address);
// Abstracts details of building TurboFan graph nodes for wasm to separate
// the wasm decoder from the internal details of TurboFan.
typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
- WasmGraphBuilder(
- ModuleEnv* env, Zone* z, JSGraph* g, Handle<Code> centry_stub_,
- wasm::FunctionSig* sig,
- compiler::SourcePositionTable* source_position_table = nullptr);
+ WasmGraphBuilder(ModuleEnv*, Zone*, JSGraph*, Handle<Code> centry_stub_,
+ wasm::FunctionSig*, compiler::SourcePositionTable* = nullptr,
+ RuntimeExceptionSupport = kRuntimeExceptionSupport);
Node** Buffer(size_t count) {
if (count > cur_bufsize_) {
@@ -202,6 +207,7 @@ class WasmGraphBuilder {
Node* Uint32Constant(uint32_t value);
Node* Int32Constant(int32_t value);
Node* Int64Constant(int64_t value);
+ Node* IntPtrConstant(intptr_t value);
Node* Float32Constant(float value);
Node* Float64Constant(double value);
Node* HeapConstant(Handle<HeapObject> value);
@@ -210,9 +216,12 @@ class WasmGraphBuilder {
Node* Unop(wasm::WasmOpcode opcode, Node* input,
wasm::WasmCodePosition position = wasm::kNoCodePosition);
Node* GrowMemory(Node* input);
- Node* Throw(Node* input);
+ Node* Throw(uint32_t tag, const wasm::WasmException* exception,
+ const Vector<Node*> values);
Node* Rethrow();
- Node* Catch(Node* input, wasm::WasmCodePosition position);
+ Node* ConvertExceptionTagToRuntimeId(uint32_t tag);
+ Node* GetExceptionRuntimeId();
+ Node** GetExceptionValues(const wasm::WasmException* except_decl);
unsigned InputCount(Node* node);
bool IsPhiWithMerge(Node* phi, Node* merge);
bool ThrowsException(Node* node, Node** if_success, Node** if_exception);
@@ -261,14 +270,27 @@ class WasmGraphBuilder {
Node* CallIndirect(uint32_t index, Node** args, Node*** rets,
wasm::WasmCodePosition position);
- void BuildJSToWasmWrapper(Handle<Code> wasm_code);
- void BuildWasmToJSWrapper(Handle<JSReceiver> target);
+ void BuildJSToWasmWrapper(Handle<Code> wasm_code,
+ Address wasm_context_address);
+ enum ImportDataType {
+ kFunction = 1,
+ kGlobalProxy = 2,
+ kFunctionContext = 3,
+ };
+ Node* LoadImportDataAtOffset(int offset, Node* table);
+ Node* LoadNativeContext(Node* table);
+ Node* LoadImportData(int index, ImportDataType type, Node* table);
+ bool BuildWasmToJSWrapper(Handle<JSReceiver> target,
+ Handle<FixedArray> global_js_imports_table,
+ int index);
+ void BuildWasmToWasmWrapper(Handle<Code> target,
+ Address new_wasm_context_address);
void BuildWasmInterpreterEntry(uint32_t func_index,
Handle<WasmInstanceObject> instance);
- void BuildCWasmEntry();
+ void BuildCWasmEntry(Address wasm_context_address);
Node* ToJS(Node* node, wasm::ValueType type);
- Node* FromJS(Node* node, Node* context, wasm::ValueType type);
+ Node* FromJS(Node* node, Node* js_context, wasm::ValueType type);
Node* Invert(Node* node);
void EnsureFunctionTableNodes();
@@ -278,6 +300,8 @@ class WasmGraphBuilder {
Node* CurrentMemoryPages();
Node* GetGlobal(uint32_t index);
Node* SetGlobal(uint32_t index, Node* val);
+ Node* TraceMemoryOperation(bool is_store, MachineRepresentation, Node* index,
+ uint32_t offset, wasm::WasmCodePosition);
Node* LoadMem(wasm::ValueType type, MachineType memtype, Node* index,
uint32_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
@@ -286,6 +310,10 @@ class WasmGraphBuilder {
wasm::ValueType type);
static void PrintDebugName(Node* node);
+ void set_wasm_context(Node* wasm_context) {
+ this->wasm_context_ = wasm_context;
+ }
+
Node* Control() { return *control_; }
Node* Effect() { return *effect_; }
@@ -293,6 +321,13 @@ class WasmGraphBuilder {
void set_effect_ptr(Node** effect) { this->effect_ = effect; }
+ Node* LoadMemSize();
+ Node* LoadMemStart();
+
+ void set_mem_size(Node** mem_size) { this->mem_size_ = mem_size; }
+
+ void set_mem_start(Node** mem_start) { this->mem_start_ = mem_start; }
+
wasm::FunctionSig* GetFunctionSignature() { return sig_; }
void LowerInt64();
@@ -316,14 +351,11 @@ class WasmGraphBuilder {
Node* Simd8x16ShuffleOp(const uint8_t shuffle[16], Node* const* inputs);
Node* AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
+ uint32_t alignment, uint32_t offset,
wasm::WasmCodePosition position);
bool has_simd() const { return has_simd_; }
- void SetRuntimeExceptionSupport(bool value) {
- has_runtime_exception_support_ = value;
- }
-
const wasm::WasmModule* module() { return env_ ? env_->module : nullptr; }
private:
@@ -333,13 +365,14 @@ class WasmGraphBuilder {
JSGraph* jsgraph_;
Node* centry_stub_node_;
ModuleEnv* env_ = nullptr;
- Node* mem_buffer_ = nullptr;
- Node* mem_size_ = nullptr;
+ Node* wasm_context_ = nullptr;
NodeVector signature_tables_;
NodeVector function_tables_;
NodeVector function_table_sizes_;
Node** control_ = nullptr;
Node** effect_ = nullptr;
+ Node** mem_size_ = nullptr;
+ Node** mem_start_ = nullptr;
Node** cur_buffer_;
size_t cur_bufsize_;
Node* def_buffer_[kDefaultBufferSize];
@@ -348,7 +381,7 @@ class WasmGraphBuilder {
// If the runtime doesn't support exception propagation,
// we won't generate stack checks, and trap handling will also
// be generated differently.
- bool has_runtime_exception_support_ = true;
+ RuntimeExceptionSupport runtime_exception_support_;
wasm::FunctionSig* sig_;
SetOncePointer<const Operator> allocate_heap_number_operator_;
@@ -360,7 +393,6 @@ class WasmGraphBuilder {
Graph* graph();
Node* String(const char* string);
- Node* MemSize();
Node* MemBuffer(uint32_t offset);
void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
wasm::WasmCodePosition position);
@@ -374,7 +406,8 @@ class WasmGraphBuilder {
Node* MaskShiftCount32(Node* node);
Node* MaskShiftCount64(Node* node);
- Node* BuildCCall(MachineSignature* sig, Node** args);
+ template <typename... Args>
+ Node* BuildCCall(MachineSignature* sig, Node* function, Args... args);
Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
wasm::WasmCodePosition position);
@@ -440,7 +473,7 @@ class WasmGraphBuilder {
MachineType result_type, int trap_zero,
wasm::WasmCodePosition position);
- Node* BuildJavaScriptToNumber(Node* node, Node* context);
+ Node* BuildJavaScriptToNumber(Node* node, Node* js_context);
Node* BuildChangeInt32ToTagged(Node* value);
Node* BuildChangeFloat64ToTagged(Node* value);
@@ -469,7 +502,11 @@ class WasmGraphBuilder {
Node* BuildAsmjsLoadMem(MachineType type, Node* index);
Node* BuildAsmjsStoreMem(MachineType type, Node* index, Node* val);
- Node** Realloc(Node** buffer, size_t old_count, size_t new_count) {
+ uint32_t GetExceptionEncodedSize(const wasm::WasmException* exception) const;
+ void BuildEncodeException32BitValue(uint32_t* index, Node* value);
+ Node* BuildDecodeException32BitValue(Node* const* values, uint32_t* index);
+
+ Node** Realloc(Node* const* buffer, size_t old_count, size_t new_count) {
Node** buf = Buffer(new_count);
if (buf != buffer) memcpy(buf, buffer, old_count * sizeof(Node*));
return buf;
@@ -487,13 +524,20 @@ class WasmGraphBuilder {
Node* BuildCallToRuntime(Runtime::FunctionId f, Node** parameters,
int parameter_count);
- Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, Node* context,
+ Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, Node* js_context,
Node** parameters, int parameter_count);
-
+ Node* BuildCallToRuntimeWithContextFromJS(Runtime::FunctionId f,
+ Node* js_context,
+ Node* const* parameters,
+ int parameter_count);
Node* BuildModifyThreadInWasmFlag(bool new_value);
Builtins::Name GetBuiltinIdForTrap(wasm::TrapReason reason);
};
+// The parameter index where the wasm_context paramter should be placed in wasm
+// call descriptors. This is used by the Int64Lowering::LowerNode method.
+constexpr int kWasmContextParameterIndex = 0;
+
V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(Zone* zone,
wasm::FunctionSig* sig);
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index 268ad7ca7a..ccebecb7d4 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -5,7 +5,6 @@
#include "src/assembler-inl.h"
#include "src/base/lazy-instance.h"
#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/compiler/linkage.h"
@@ -47,7 +46,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// ===========================================================================
// == ia32 ===================================================================
// ===========================================================================
-#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi
+#define GP_PARAM_REGISTERS esi, eax, edx, ecx, ebx
#define GP_RETURN_REGISTERS eax, edx
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
@@ -56,7 +55,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// ===========================================================================
// == x64 ====================================================================
// ===========================================================================
-#define GP_PARAM_REGISTERS rax, rdx, rcx, rbx, rsi, rdi
+#define GP_PARAM_REGISTERS rsi, rax, rdx, rcx, rbx, rdi
#define GP_RETURN_REGISTERS rax, rdx
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
@@ -65,7 +64,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// ===========================================================================
// == arm ====================================================================
// ===========================================================================
-#define GP_PARAM_REGISTERS r0, r1, r2, r3
+#define GP_PARAM_REGISTERS r3, r0, r1, r2
#define GP_RETURN_REGISTERS r0, r1
#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
#define FP_RETURN_REGISTERS d0, d1
@@ -74,7 +73,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// ===========================================================================
// == arm64 ====================================================================
// ===========================================================================
-#define GP_PARAM_REGISTERS x0, x1, x2, x3, x4, x5, x6, x7
+#define GP_PARAM_REGISTERS x7, x0, x1, x2, x3, x4, x5, x6
#define GP_RETURN_REGISTERS x0, x1
#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
#define FP_RETURN_REGISTERS d0, d1
@@ -101,7 +100,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// ===========================================================================
// == ppc & ppc64 ============================================================
// ===========================================================================
-#define GP_PARAM_REGISTERS r3, r4, r5, r6, r7, r8, r9, r10
+#define GP_PARAM_REGISTERS r10, r3, r4, r5, r6, r7, r8, r9
#define GP_RETURN_REGISTERS r3, r4
#define FP_PARAM_REGISTERS d1, d2, d3, d4, d5, d6, d7, d8
#define FP_RETURN_REGISTERS d1, d2
@@ -110,7 +109,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// ===========================================================================
// == s390x ==================================================================
// ===========================================================================
-#define GP_PARAM_REGISTERS r2, r3, r4, r5, r6
+#define GP_PARAM_REGISTERS r6, r2, r3, r4, r5
#define GP_RETURN_REGISTERS r2, r3
#define FP_PARAM_REGISTERS d0, d2, d4, d6
#define FP_RETURN_REGISTERS d0, d2, d4, d6
@@ -119,7 +118,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// ===========================================================================
// == s390 ===================================================================
// ===========================================================================
-#define GP_PARAM_REGISTERS r2, r3, r4, r5, r6
+#define GP_PARAM_REGISTERS r6, r2, r3, r4, r5
#define GP_RETURN_REGISTERS r2, r3
#define FP_PARAM_REGISTERS d0, d2
#define FP_RETURN_REGISTERS d0, d2
@@ -170,7 +169,7 @@ struct Allocator {
// TODO(bbudge) Modify wasm linkage to allow use of all float regs.
if (type == wasm::kWasmF32) {
int float_reg_code = reg.code() * 2;
- DCHECK(float_reg_code < RegisterConfiguration::kMaxFPRegisters);
+ DCHECK_GT(RegisterConfiguration::kMaxFPRegisters, float_reg_code);
return LinkageLocation::ForRegister(
DoubleRegister::from_code(float_reg_code).code(),
MachineTypeFor(type));
@@ -223,8 +222,9 @@ static constexpr Allocator parameter_registers(kGPParamRegisters,
// General code uses the above configuration data.
CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
+ // The '+ 1' here is to accomodate the wasm_context as first parameter.
LocationSignature::Builder locations(zone, fsig->return_count(),
- fsig->parameter_count());
+ fsig->parameter_count() + 1);
Allocator rets = return_registers;
@@ -237,6 +237,9 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
Allocator params = parameter_registers;
+ // Add parameter for the wasm_context.
+ locations.AddParam(params.Next(MachineType::PointerRepresentation()));
+
// Add register and/or stack parameter(s).
const int parameter_count = static_cast<int>(fsig->parameter_count());
for (int i = 0; i < parameter_count; i++) {
@@ -339,6 +342,11 @@ CallDescriptor* GetI32WasmCallDescriptorForSimd(Zone* zone,
MachineRepresentation::kWord32);
}
+#undef GP_PARAM_REGISTERS
+#undef GP_RETURN_REGISTERS
+#undef FP_PARAM_REGISTERS
+#undef FP_RETURN_REGISTERS
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 8a1ba9c63f..f6cf21fa1c 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -6,14 +6,12 @@
#include <limits>
-#include "src/callable.h"
#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/heap/heap-inl.h"
-#include "src/wasm/wasm-module.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
@@ -242,24 +240,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
mode_(mode),
zone_(gen->zone()) {}
- void SaveRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- __ pushq(Register::from_code(i));
- }
- }
- }
-
- void RestoreRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
- for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
- if ((registers >> i) & 1u) {
- __ popq(Register::from_code(i));
- }
- }
- }
-
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
@@ -269,38 +249,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
exit());
__ leap(scratch1_, operand_);
-#ifdef V8_CSA_WRITE_BARRIER
- Callable const callable =
- Builtins::CallableFor(__ isolate(), Builtins::kRecordWrite);
- RegList registers = callable.descriptor().allocatable_registers();
-
- SaveRegisters(registers);
-
- Register object_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kObject));
- Register slot_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kSlot));
- Register isolate_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kIsolate));
-
- __ pushq(object_);
- __ pushq(scratch1_);
-
- __ popq(slot_parameter);
- __ popq(object_parameter);
-
- __ LoadAddress(isolate_parameter,
- ExternalReference::isolate_address(__ isolate()));
- __ Call(callable.code(), RelocInfo::CODE_TARGET);
-
- RestoreRegisters(registers);
-#else
RememberedSetAction const remembered_set_action =
mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+#ifdef V8_CSA_WRITE_BARRIER
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+#else
__ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
@@ -320,12 +278,12 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
class WasmOutOfLineTrap final : public OutOfLineCode {
public:
WasmOutOfLineTrap(CodeGenerator* gen, int pc, bool frame_elided,
- int32_t position)
+ Instruction* instr)
: OutOfLineCode(gen),
gen_(gen),
pc_(pc),
frame_elided_(frame_elided),
- position_(position) {}
+ instr_(instr) {}
// TODO(eholk): Refactor this method to take the code generator as a
// parameter.
@@ -336,38 +294,31 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
__ EnterFrame(StackFrame::WASM_COMPILED);
}
- wasm::TrapReason trap_id = wasm::kTrapMemOutOfBounds;
- int trap_reason = wasm::WasmOpcodes::TrapReasonToMessageId(trap_id);
- __ Push(Smi::FromInt(trap_reason));
- // TODO(eholk): use AssembleSourcePosition instead of passing in position_
- // as a parameter. See AssembleArchTrap as an example. Consider sharing code
- // with AssembleArchTrap.
- __ Push(Smi::FromInt(position_));
- __ Move(rsi, Smi::kZero);
- __ CallRuntimeDelayed(gen_->zone(), Runtime::kThrowWasmError);
-
- ReferenceMap* reference_map =
- new (gen_->code()->zone()) ReferenceMap(gen_->code()->zone());
+ gen_->AssembleSourcePosition(instr_);
+ __ Call(__ isolate()->builtins()->builtin_handle(
+ Builtins::kThrowWasmTrapMemOutOfBounds),
+ RelocInfo::CODE_TARGET);
+ ReferenceMap* reference_map = new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
+ __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
}
private:
CodeGenerator* gen_;
int pc_;
bool frame_elided_;
- int32_t position_;
+ Instruction* instr_;
};
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
- InstructionCode opcode, size_t input_count,
+ InstructionCode opcode, Instruction* instr,
X64OperandConverter& i, int pc) {
const X64MemoryProtection protection =
static_cast<X64MemoryProtection>(MiscField::decode(opcode));
if (protection == X64MemoryProtection::kProtected) {
const bool frame_elided = !codegen->frame_access_state()->has_frame();
- const int32_t position = i.InputInt32(input_count - 1);
- new (zone) WasmOutOfLineTrap(codegen, pc, frame_elided, position);
+ new (zone) WasmOutOfLineTrap(codegen, pc, frame_elided, instr);
}
}
} // namespace
@@ -872,6 +823,28 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check if the code object is marked for deoptimization. If it is, then it
+// jumps to CompileLazyDeoptimizedCode builtin. In order to do this we need to:
+// 1. load the address of the current instruction;
+// 2. read from memory the word that contains that bit, which can be found in
+// the first set of flags ({kKindSpecificFlags1Offset});
+// 3. test kMarkedForDeoptimizationBit in those flags; and
+// 4. if it is not zero then it jumps to the builtin.
+void CodeGenerator::BailoutIfDeoptimized() {
+ Label current;
+ // Load effective address to get the address of the current instruction into
+ // rcx.
+ __ leaq(rcx, Operand(&current));
+ __ bind(&current);
+ int pc = __ pc_offset();
+ int offset = Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc);
+ __ testl(Operand(rcx, offset),
+ Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ Handle<Code> code = isolate()->builtins()->builtin_handle(
+ Builtins::kCompileLazyDeoptimizedCode);
+ __ j(not_zero, code, RelocInfo::CODE_TARGET);
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -943,13 +916,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchSaveCallerRegisters: {
+ fp_mode_ =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// kReturnRegister0 should have been saved before entering the stub.
- __ PushCallerSaved(kSaveFPRegs, kReturnRegister0);
+ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
+ DCHECK_EQ(0, bytes % kPointerSize);
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ DCHECK(!caller_registers_saved_);
+ caller_registers_saved_ = true;
break;
}
case kArchRestoreCallerRegisters: {
+ DCHECK(fp_mode_ ==
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// Don't overwrite the returned value.
- __ PopCallerSaved(kSaveFPRegs, kReturnRegister0);
+ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ DCHECK(caller_registers_saved_);
+ caller_registers_saved_ = false;
break;
}
case kArchPrepareTailCall:
@@ -965,7 +953,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CallCFunction(func, num_parameters);
}
frame_access_state()->SetFrameAccessToDefault();
+ // Ideally, we should decrement SP delta to match the change of stack
+ // pointer in CallCFunction. However, for certain architectures (e.g.
+ // ARM), there may be more strict alignment requirement, causing old SP
+ // to be saved on the stack. In those cases, we can not calculate the SP
+ // delta statically.
frame_access_state()->ClearSPDelta();
+ if (caller_registers_saved_) {
+ // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
+ // Here, we assume the sequence to be:
+ // kArchSaveCallerRegisters;
+ // kArchCallCFunction;
+ // kArchRestoreCallerRegisters;
+ int bytes =
+ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ }
break;
}
case kArchJmp:
@@ -983,7 +986,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchDebugAbort:
- DCHECK(i.InputRegister(0).is(rdx));
+ DCHECK(i.InputRegister(0) == rdx);
if (!frame_access_state()->has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
@@ -1062,12 +1065,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
- Register base;
- if (offset.from_stack_pointer()) {
- base = rsp;
- } else {
- base = rbp;
- }
+ Register base = offset.from_stack_pointer() ? rsp : rbp;
__ leaq(i.OutputRegister(), Operand(base, offset.offset()));
break;
}
@@ -1930,31 +1928,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
break;
case kX64Movsxbl:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movzxbl:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movzxbl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movsxbq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movsxbq);
break;
case kX64Movzxbq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movzxbq);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movb: {
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
@@ -1965,31 +1958,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64Movsxwl:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movsxwl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movzxwl:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movzxwl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movsxwq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movsxwq);
break;
case kX64Movzxwq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movzxwq);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movw: {
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
@@ -2000,8 +1988,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64Movl:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
if (instr->HasOutput()) {
if (instr->addressing_mode() == kMode_None) {
if (instr->InputAt(0)->IsRegister()) {
@@ -2024,13 +2011,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kX64Movsxlq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movsxlq);
break;
case kX64Movq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
if (instr->HasOutput()) {
__ movq(i.OutputRegister(), i.MemoryOperand());
} else {
@@ -2044,8 +2029,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kX64Movss:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
if (instr->HasOutput()) {
__ movss(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
@@ -2055,8 +2039,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kX64Movsd:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
if (instr->HasOutput()) {
__ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
@@ -2067,8 +2050,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kX64Movdqu: {
CpuFeatureScope sse_scope(tasm(), SSSE3);
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
- __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
if (instr->HasOutput()) {
__ movdqu(i.OutputSimd128Register(), i.MemoryOperand());
} else {
@@ -2111,7 +2093,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Shorten "leal" to "addl", "subl" or "shll" if the register allocation
// and addressing mode just happens to work out. The "addl"/"subl" forms
// in these cases are faster based on measurements.
- if (i.InputRegister(0).is(i.OutputRegister())) {
+ if (i.InputRegister(0) == i.OutputRegister()) {
if (mode == kMode_MRI) {
int32_t constant_summand = i.InputInt32(1);
if (constant_summand > 0) {
@@ -2120,7 +2102,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ subl(i.OutputRegister(), Immediate(-constant_summand));
}
} else if (mode == kMode_MR1) {
- if (i.InputRegister(1).is(i.OutputRegister())) {
+ if (i.InputRegister(1) == i.OutputRegister()) {
__ shll(i.OutputRegister(), Immediate(1));
} else {
__ addl(i.OutputRegister(), i.InputRegister(1));
@@ -2135,7 +2117,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ leal(i.OutputRegister(), i.MemoryOperand());
}
} else if (mode == kMode_MR1 &&
- i.InputRegister(1).is(i.OutputRegister())) {
+ i.InputRegister(1) == i.OutputRegister()) {
__ addl(i.OutputRegister(), i.InputRegister(0));
} else {
__ leal(i.OutputRegister(), i.MemoryOperand());
@@ -2148,7 +2130,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Shorten "leaq" to "addq", "subq" or "shlq" if the register allocation
// and addressing mode just happens to work out. The "addq"/"subq" forms
// in these cases are faster based on measurements.
- if (i.InputRegister(0).is(i.OutputRegister())) {
+ if (i.InputRegister(0) == i.OutputRegister()) {
if (mode == kMode_MRI) {
int32_t constant_summand = i.InputInt32(1);
if (constant_summand > 0) {
@@ -2157,7 +2139,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ subq(i.OutputRegister(), Immediate(-constant_summand));
}
} else if (mode == kMode_MR1) {
- if (i.InputRegister(1).is(i.OutputRegister())) {
+ if (i.InputRegister(1) == i.OutputRegister()) {
__ shlq(i.OutputRegister(), Immediate(1));
} else {
__ addq(i.OutputRegister(), i.InputRegister(1));
@@ -2172,7 +2154,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ leaq(i.OutputRegister(), i.MemoryOperand());
}
} else if (mode == kMode_MR1 &&
- i.InputRegister(1).is(i.OutputRegister())) {
+ i.InputRegister(1) == i.OutputRegister()) {
__ addq(i.OutputRegister(), i.InputRegister(0));
} else {
__ leaq(i.OutputRegister(), i.MemoryOperand());
@@ -2251,7 +2233,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope sse_scope(tasm(), SSSE3);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
- if (dst.is(src)) {
+ if (dst == src) {
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psignd(dst, kScratchDoubleReg);
} else {
@@ -2384,7 +2366,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope sse_scope(tasm(), SSSE3);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
- if (dst.is(src)) {
+ if (dst == src) {
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psignw(dst, kScratchDoubleReg);
} else {
@@ -2528,7 +2510,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope sse_scope(tasm(), SSSE3);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
- if (dst.is(src)) {
+ if (dst == src) {
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psignb(dst, kScratchDoubleReg);
} else {
@@ -2636,7 +2618,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64S128Not: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
- if (dst.is(src)) {
+ if (dst == src) {
__ movaps(kScratchDoubleReg, dst);
__ pcmpeqd(dst, dst);
__ pxor(dst, kScratchDoubleReg);
@@ -2848,6 +2830,46 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ jmp(flabel, flabel_distance);
}
+void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
+ BranchInfo* branch) {
+ Label::Distance flabel_distance =
+ branch->fallthru ? Label::kNear : Label::kFar;
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+ Label nodeopt;
+ if (branch->condition == kUnorderedEqual) {
+ __ j(parity_even, flabel, flabel_distance);
+ } else if (branch->condition == kUnorderedNotEqual) {
+ __ j(parity_even, tlabel);
+ }
+ __ j(FlagsConditionToCondition(branch->condition), tlabel);
+
+ if (FLAG_deopt_every_n_times > 0) {
+ ExternalReference counter =
+ ExternalReference::stress_deopt_count(isolate());
+
+ __ pushfq();
+ __ pushq(rax);
+ __ load_rax(counter);
+ __ decl(rax);
+ __ j(not_zero, &nodeopt);
+
+ __ Set(rax, FLAG_deopt_every_n_times);
+ __ store_rax(counter);
+ __ popq(rax);
+ __ popfq();
+ __ jmp(tlabel);
+
+ __ bind(&nodeopt);
+ __ store_rax(counter);
+ __ popq(rax);
+ __ popfq();
+ }
+
+ if (!branch->fallthru) {
+ __ jmp(flabel, flabel_distance);
+ }
+}
void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
@@ -2889,7 +2911,10 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- __ Ret();
+ CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
+ size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+ // Use rcx as a scratch register, we return anyways immediately.
+ __ Ret(static_cast<int>(pop_size), rcx);
} else {
gen_->AssembleSourcePosition(instr_);
__ Call(__ isolate()->builtins()->builtin_handle(trap_id),
@@ -3081,7 +3106,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ subp(rsp, Immediate(stack_size));
// Store the registers on the stack.
int slot_idx = 0;
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
if (!((1 << i) & saves_fp)) continue;
__ movdqu(Operand(rsp, kQuadWordSize * slot_idx),
XMMRegister::from_code(i));
@@ -3115,7 +3140,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
const int stack_size = saves_fp_count * kQuadWordSize;
// Load the registers from the stack.
int slot_idx = 0;
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
if (!((1 << i) & saves_fp)) continue;
__ movdqu(XMMRegister::from_code(i),
Operand(rsp, kQuadWordSize * slot_idx));
@@ -3157,7 +3182,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret(static_cast<int>(pop_size), rcx);
} else {
Register pop_reg = g.ToRegister(pop);
- Register scratch_reg = pop_reg.is(rcx) ? rdx : rcx;
+ Register scratch_reg = pop_reg == rcx ? rdx : rcx;
__ popq(scratch_reg);
__ leaq(rsp, Operand(rsp, pop_reg, times_8, static_cast<int>(pop_size)));
__ jmp(scratch_reg);
@@ -3348,28 +3373,30 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
if (rep != MachineRepresentation::kSimd128) {
Register tmp = kScratchRegister;
__ movq(tmp, dst);
- __ pushq(src);
+ __ pushq(src); // Then use stack to copy src to destination.
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kPointerSize);
- frame_access_state()->IncreaseSPDelta(1);
- src = g.ToOperand(source);
- __ movq(src, tmp);
- frame_access_state()->IncreaseSPDelta(-1);
- dst = g.ToOperand(destination);
__ popq(dst);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-kPointerSize);
+ __ movq(src, tmp);
} else {
- // Use the XOR trick to swap without a temporary. The xorps may read
- // from or write to an unaligned address, causing a slowdown, but swaps
- // between slots should be rare.
- __ Movups(kScratchDoubleReg, src);
- __ Xorps(kScratchDoubleReg, dst); // scratch contains src ^ dst.
- __ Movups(src, kScratchDoubleReg);
- __ Xorps(kScratchDoubleReg, dst); // scratch contains src.
- __ Movups(dst, kScratchDoubleReg);
- __ Xorps(kScratchDoubleReg, src); // scratch contains dst.
- __ Movups(src, kScratchDoubleReg);
+ // Without AVX, misaligned reads and writes will trap. Move using the
+ // stack, in two parts.
+ __ movups(kScratchDoubleReg, dst); // Save dst in scratch register.
+ __ pushq(src); // Then use stack to copy src to destination.
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
+ __ popq(dst);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ -kPointerSize);
+ __ pushq(g.ToOperand(source, kPointerSize));
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
+ __ popq(g.ToOperand(destination, kPointerSize));
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ -kPointerSize);
+ __ movups(src, kScratchDoubleReg);
}
} else if (source->IsFPRegister() && destination->IsFPRegister()) {
// XMM register-register swap.
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index c5ef1e5a7a..ba775e72af 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -216,7 +216,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Movsxwq:
case kX64Movzxwq:
case kX64Movsxlq:
- DCHECK(instr->InputCount() >= 1);
+ DCHECK_LE(1, instr->InputCount());
return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
: kIsLoadOperation;
@@ -226,7 +226,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Movl:
if (instr->HasOutput()) {
- DCHECK(instr->InputCount() >= 1);
+ DCHECK_LE(1, instr->InputCount());
return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
: kIsLoadOperation;
} else {
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index b715d02999..f826b22b09 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -296,15 +296,13 @@ void InstructionSelector::VisitLoad(Node* node) {
ArchOpcode opcode = GetLoadOpcode(load_rep);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionOperand inputs[4];
+ InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kProtectedLoad) {
code |= MiscField::encode(X64MemoryProtection::kProtected);
- // Add the source position as an input
- inputs[input_count++] = g.UseImmediate(node->InputAt(2));
}
Emit(code, 1, outputs, input_count, inputs);
}
@@ -379,12 +377,11 @@ void InstructionSelector::VisitStore(Node* node) {
void InstructionSelector::VisitProtectedStore(Node* node) {
X64OperandGenerator g(this);
Node* value = node->InputAt(2);
- Node* position = node->InputAt(3);
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
ArchOpcode opcode = GetStoreOpcode(store_rep);
- InstructionOperand inputs[5];
+ InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
@@ -393,7 +390,6 @@ void InstructionSelector::VisitProtectedStore(Node* node) {
InstructionOperand value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
inputs[input_count++] = value_operand;
- inputs[input_count++] = g.UseImmediate(position);
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
}
@@ -1590,7 +1586,7 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
InstructionCode opcode, Node* left,
InstructionOperand right,
FlagsContinuation* cont) {
- DCHECK(left->opcode() == IrOpcode::kLoad);
+ DCHECK_EQ(IrOpcode::kLoad, left->opcode());
X64OperandGenerator g(selector);
size_t input_count = 0;
InstructionOperand inputs[6];
diff --git a/deps/v8/src/compiler/x64/unwinding-info-writer-x64.cc b/deps/v8/src/compiler/x64/unwinding-info-writer-x64.cc
index 31338bdaff..94b82ad8cf 100644
--- a/deps/v8/src/compiler/x64/unwinding-info-writer-x64.cc
+++ b/deps/v8/src/compiler/x64/unwinding-info-writer-x64.cc
@@ -20,12 +20,12 @@ void UnwindingInfoWriter::BeginInstructionBlock(int pc_offset,
const BlockInitialState* initial_state =
block_initial_states_[block->rpo_number().ToInt()];
if (initial_state) {
- if (!initial_state->register_.is(eh_frame_writer_.base_register()) &&
+ if (initial_state->register_ != eh_frame_writer_.base_register() &&
initial_state->offset_ != eh_frame_writer_.base_offset()) {
eh_frame_writer_.AdvanceLocation(pc_offset);
eh_frame_writer_.SetBaseAddressRegisterAndOffset(initial_state->register_,
initial_state->offset_);
- } else if (!initial_state->register_.is(eh_frame_writer_.base_register())) {
+ } else if (initial_state->register_ != eh_frame_writer_.base_register()) {
eh_frame_writer_.AdvanceLocation(pc_offset);
eh_frame_writer_.SetBaseAddressRegister(initial_state->register_);
} else if (initial_state->offset_ != eh_frame_writer_.base_offset()) {
@@ -54,7 +54,7 @@ void UnwindingInfoWriter::EndInstructionBlock(const InstructionBlock* block) {
// If we already had an entry for this BB, check that the values are the
// same we are trying to insert.
if (existing_state) {
- DCHECK(existing_state->register_.is(eh_frame_writer_.base_register()));
+ DCHECK(existing_state->register_ == eh_frame_writer_.base_register());
DCHECK_EQ(existing_state->offset_, eh_frame_writer_.base_offset());
DCHECK_EQ(existing_state->tracking_fp_, tracking_fp_);
} else {
diff --git a/deps/v8/src/compiler/zone-stats.cc b/deps/v8/src/compiler/zone-stats.cc
index 626ad4072c..eef13d0de6 100644
--- a/deps/v8/src/compiler/zone-stats.cc
+++ b/deps/v8/src/compiler/zone-stats.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
+
#include "src/compiler/zone-stats.h"
namespace v8 {
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index 59c5ca64bf..05b46e0e0b 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -12,6 +12,7 @@
#include "src/objects/map-inl.h"
#include "src/objects/regexp-match-info.h"
#include "src/objects/shared-function-info-inl.h"
+#include "src/objects/template-objects.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index a6b7b289fb..21721828b2 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -442,77 +442,6 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
return Handle<Object>::null();
}
-void Context::AddOptimizedFunction(JSFunction* function) {
- DCHECK(IsNativeContext());
-#ifdef ENABLE_SLOW_DCHECKS
- Isolate* isolate = GetIsolate();
- if (FLAG_enable_slow_asserts) {
- Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
- while (!element->IsUndefined(isolate)) {
- CHECK(element != function);
- element = JSFunction::cast(element)->next_function_link();
- }
- }
-
- // Check that the context belongs to the weak native contexts list.
- bool found = false;
- Object* context = isolate->heap()->native_contexts_list();
- while (!context->IsUndefined(isolate)) {
- if (context == this) {
- found = true;
- break;
- }
- context = Context::cast(context)->next_context_link();
- }
- CHECK(found);
-#endif
-
- DCHECK(function->next_function_link()->IsUndefined(GetIsolate()));
- function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST),
- UPDATE_WEAK_WRITE_BARRIER);
- set(OPTIMIZED_FUNCTIONS_LIST, function, UPDATE_WEAK_WRITE_BARRIER);
-}
-
-
-void Context::RemoveOptimizedFunction(JSFunction* function) {
- DCHECK(IsNativeContext());
- Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
- JSFunction* prev = NULL;
- Isolate* isolate = function->GetIsolate();
- while (!element->IsUndefined(isolate)) {
- JSFunction* element_function = JSFunction::cast(element);
- DCHECK(element_function->next_function_link()->IsUndefined(isolate) ||
- element_function->next_function_link()->IsJSFunction());
- if (element_function == function) {
- if (prev == NULL) {
- set(OPTIMIZED_FUNCTIONS_LIST, element_function->next_function_link(),
- UPDATE_WEAK_WRITE_BARRIER);
- } else {
- prev->set_next_function_link(element_function->next_function_link(),
- UPDATE_WEAK_WRITE_BARRIER);
- }
- element_function->set_next_function_link(GetHeap()->undefined_value(),
- UPDATE_WEAK_WRITE_BARRIER);
- return;
- }
- prev = element_function;
- element = element_function->next_function_link();
- }
- UNREACHABLE();
-}
-
-
-void Context::SetOptimizedFunctionsListHead(Object* head) {
- DCHECK(IsNativeContext());
- set(OPTIMIZED_FUNCTIONS_LIST, head, UPDATE_WEAK_WRITE_BARRIER);
-}
-
-
-Object* Context::OptimizedFunctionsListHead() {
- DCHECK(IsNativeContext());
- return get(OPTIMIZED_FUNCTIONS_LIST);
-}
-
void Context::AddOptimizedCode(Code* code) {
DCHECK(IsNativeContext());
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 169fd8ac27..f8278820d5 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -43,7 +43,6 @@ enum ContextLookupFlags {
async_function_promise_release) \
V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
- V(GET_TEMPLATE_CALL_SITE_INDEX, JSFunction, get_template_call_site) \
V(MAKE_ERROR_INDEX, JSFunction, make_error) \
V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \
@@ -52,7 +51,6 @@ enum ContextLookupFlags {
V(OBJECT_CREATE, JSFunction, object_create) \
V(OBJECT_DEFINE_PROPERTIES, JSFunction, object_define_properties) \
V(OBJECT_DEFINE_PROPERTY, JSFunction, object_define_property) \
- V(OBJECT_FREEZE, JSFunction, object_freeze) \
V(OBJECT_GET_PROTOTYPE_OF, JSFunction, object_get_prototype_of) \
V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible) \
V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
@@ -226,6 +224,7 @@ enum ContextLookupFlags {
V(ASYNC_GENERATOR_RETURN_CLOSED_REJECT_SHARED_FUN, SharedFunctionInfo, \
async_generator_return_closed_reject_shared_fun) \
V(ATOMICS_OBJECT, JSObject, atomics_object) \
+ V(BIGINT_FUNCTION_INDEX, JSFunction, bigint_function) \
V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
V(BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX, Map, \
bound_function_with_constructor_map) \
@@ -402,6 +401,7 @@ enum ContextLookupFlags {
V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor) \
V(WASM_MODULE_CONSTRUCTOR_INDEX, JSFunction, wasm_module_constructor) \
V(WASM_TABLE_CONSTRUCTOR_INDEX, JSFunction, wasm_table_constructor) \
+ V(TEMPLATE_MAP_INDEX, HeapObject, template_map) \
V(TYPED_ARRAY_FUN_INDEX, JSFunction, typed_array_function) \
V(TYPED_ARRAY_PROTOTYPE_INDEX, JSObject, typed_array_prototype) \
V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \
@@ -541,14 +541,13 @@ class Context: public FixedArray {
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
- OPTIMIZED_FUNCTIONS_LIST, // Weak.
- OPTIMIZED_CODE_LIST, // Weak.
- DEOPTIMIZED_CODE_LIST, // Weak.
- NEXT_CONTEXT_LINK, // Weak.
+ OPTIMIZED_CODE_LIST, // Weak.
+ DEOPTIMIZED_CODE_LIST, // Weak.
+ NEXT_CONTEXT_LINK, // Weak.
// Total number of slots.
NATIVE_CONTEXT_SLOTS,
- FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST,
+ FIRST_WEAK_SLOT = OPTIMIZED_CODE_LIST,
FIRST_JS_ARRAY_MAP_SLOT = JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX,
MIN_CONTEXT_SLOTS = GLOBAL_PROXY_INDEX,
@@ -627,12 +626,6 @@ class Context: public FixedArray {
inline bool HasSameSecurityTokenAs(Context* that) const;
- // A native context holds a list of all functions with optimized code.
- void AddOptimizedFunction(JSFunction* function);
- void RemoveOptimizedFunction(JSFunction* function);
- void SetOptimizedFunctionsListHead(Object* head);
- Object* OptimizedFunctionsListHead();
-
// The native context also stores a list of all optimized code and a
// list of all deoptimized code, which are needed by the deoptimizer.
void AddOptimizedCode(Code* code);
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 9310ce9972..096661c64a 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -10,7 +10,6 @@
#include <stdarg.h>
#include <cmath>
#include "src/globals.h" // Required for V8_INFINITY
-#include "src/unicode-cache-inl.h"
// ----------------------------------------------------------------------------
// Extra POSIX/ANSI functions for Win32/MSVC.
@@ -20,21 +19,10 @@
#include "src/conversions.h"
#include "src/double.h"
#include "src/objects-inl.h"
-#include "src/strtod.h"
namespace v8 {
namespace internal {
-inline double JunkStringValue() {
- return bit_cast<double, uint64_t>(kQuietNaNMask);
-}
-
-
-inline double SignedZero(bool negative) {
- return negative ? uint64_to_double(Double::kSignMask) : 0.0;
-}
-
-
// The fast double-to-unsigned-int conversion routine does not guarantee
// rounding towards zero, or any reasonable value if the argument is larger
// than what fits in an unsigned 32-bit integer.
@@ -231,599 +219,6 @@ uint32_t DoubleToUint32(double x) {
return static_cast<uint32_t>(DoubleToInt32(x));
}
-
-template <class Iterator, class EndMark>
-bool SubStringEquals(Iterator* current,
- EndMark end,
- const char* substring) {
- DCHECK(**current == *substring);
- for (substring++; *substring != '\0'; substring++) {
- ++*current;
- if (*current == end || **current != *substring) return false;
- }
- ++*current;
- return true;
-}
-
-
-// Returns true if a nonspace character has been found and false if the
-// end was been reached before finding a nonspace character.
-template <class Iterator, class EndMark>
-inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
- Iterator* current,
- EndMark end) {
- while (*current != end) {
- if (!unicode_cache->IsWhiteSpaceOrLineTerminator(**current)) return true;
- ++*current;
- }
- return false;
-}
-
-
-// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
-template <int radix_log_2, class Iterator, class EndMark>
-double InternalStringToIntDouble(UnicodeCache* unicode_cache,
- Iterator current,
- EndMark end,
- bool negative,
- bool allow_trailing_junk) {
- DCHECK(current != end);
-
- // Skip leading 0s.
- while (*current == '0') {
- ++current;
- if (current == end) return SignedZero(negative);
- }
-
- int64_t number = 0;
- int exponent = 0;
- const int radix = (1 << radix_log_2);
-
- do {
- int digit;
- if (*current >= '0' && *current <= '9' && *current < '0' + radix) {
- digit = static_cast<char>(*current) - '0';
- } else if (radix > 10 && *current >= 'a' && *current < 'a' + radix - 10) {
- digit = static_cast<char>(*current) - 'a' + 10;
- } else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) {
- digit = static_cast<char>(*current) - 'A' + 10;
- } else {
- if (allow_trailing_junk ||
- !AdvanceToNonspace(unicode_cache, &current, end)) {
- break;
- } else {
- return JunkStringValue();
- }
- }
-
- number = number * radix + digit;
- int overflow = static_cast<int>(number >> 53);
- if (overflow != 0) {
- // Overflow occurred. Need to determine which direction to round the
- // result.
- int overflow_bits_count = 1;
- while (overflow > 1) {
- overflow_bits_count++;
- overflow >>= 1;
- }
-
- int dropped_bits_mask = ((1 << overflow_bits_count) - 1);
- int dropped_bits = static_cast<int>(number) & dropped_bits_mask;
- number >>= overflow_bits_count;
- exponent = overflow_bits_count;
-
- bool zero_tail = true;
- while (true) {
- ++current;
- if (current == end || !isDigit(*current, radix)) break;
- zero_tail = zero_tail && *current == '0';
- exponent += radix_log_2;
- }
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(unicode_cache, &current, end)) {
- return JunkStringValue();
- }
-
- int middle_value = (1 << (overflow_bits_count - 1));
- if (dropped_bits > middle_value) {
- number++; // Rounding up.
- } else if (dropped_bits == middle_value) {
- // Rounding to even to consistency with decimals: half-way case rounds
- // up if significant part is odd and down otherwise.
- if ((number & 1) != 0 || !zero_tail) {
- number++; // Rounding up.
- }
- }
-
- // Rounding up may cause overflow.
- if ((number & (static_cast<int64_t>(1) << 53)) != 0) {
- exponent++;
- number >>= 1;
- }
- break;
- }
- ++current;
- } while (current != end);
-
- DCHECK(number < ((int64_t)1 << 53));
- DCHECK(static_cast<int64_t>(static_cast<double>(number)) == number);
-
- if (exponent == 0) {
- if (negative) {
- if (number == 0) return -0.0;
- number = -number;
- }
- return static_cast<double>(number);
- }
-
- DCHECK(number != 0);
- return std::ldexp(static_cast<double>(negative ? -number : number), exponent);
-}
-
-// ES6 18.2.5 parseInt(string, radix)
-template <class Iterator, class EndMark>
-double InternalStringToInt(UnicodeCache* unicode_cache,
- Iterator current,
- EndMark end,
- int radix) {
- const bool allow_trailing_junk = true;
- const double empty_string_val = JunkStringValue();
-
- if (!AdvanceToNonspace(unicode_cache, &current, end)) {
- return empty_string_val;
- }
-
- bool negative = false;
- bool leading_zero = false;
-
- if (*current == '+') {
- // Ignore leading sign; skip following spaces.
- ++current;
- if (current == end) {
- return JunkStringValue();
- }
- } else if (*current == '-') {
- ++current;
- if (current == end) {
- return JunkStringValue();
- }
- negative = true;
- }
-
- if (radix == 0) {
- // Radix detection.
- radix = 10;
- if (*current == '0') {
- ++current;
- if (current == end) return SignedZero(negative);
- if (*current == 'x' || *current == 'X') {
- radix = 16;
- ++current;
- if (current == end) return JunkStringValue();
- } else {
- leading_zero = true;
- }
- }
- } else if (radix == 16) {
- if (*current == '0') {
- // Allow "0x" prefix.
- ++current;
- if (current == end) return SignedZero(negative);
- if (*current == 'x' || *current == 'X') {
- ++current;
- if (current == end) return JunkStringValue();
- } else {
- leading_zero = true;
- }
- }
- }
-
- if (radix < 2 || radix > 36) return JunkStringValue();
-
- // Skip leading zeros.
- while (*current == '0') {
- leading_zero = true;
- ++current;
- if (current == end) return SignedZero(negative);
- }
-
- if (!leading_zero && !isDigit(*current, radix)) {
- return JunkStringValue();
- }
-
- if (base::bits::IsPowerOfTwo(radix)) {
- switch (radix) {
- case 2:
- return InternalStringToIntDouble<1>(
- unicode_cache, current, end, negative, allow_trailing_junk);
- case 4:
- return InternalStringToIntDouble<2>(
- unicode_cache, current, end, negative, allow_trailing_junk);
- case 8:
- return InternalStringToIntDouble<3>(
- unicode_cache, current, end, negative, allow_trailing_junk);
-
- case 16:
- return InternalStringToIntDouble<4>(
- unicode_cache, current, end, negative, allow_trailing_junk);
-
- case 32:
- return InternalStringToIntDouble<5>(
- unicode_cache, current, end, negative, allow_trailing_junk);
- default:
- UNREACHABLE();
- }
- }
-
- if (radix == 10) {
- // Parsing with strtod.
- const int kMaxSignificantDigits = 309; // Doubles are less than 1.8e308.
- // The buffer may contain up to kMaxSignificantDigits + 1 digits and a zero
- // end.
- const int kBufferSize = kMaxSignificantDigits + 2;
- char buffer[kBufferSize];
- int buffer_pos = 0;
- while (*current >= '0' && *current <= '9') {
- if (buffer_pos <= kMaxSignificantDigits) {
- // If the number has more than kMaxSignificantDigits it will be parsed
- // as infinity.
- DCHECK(buffer_pos < kBufferSize);
- buffer[buffer_pos++] = static_cast<char>(*current);
- }
- ++current;
- if (current == end) break;
- }
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(unicode_cache, &current, end)) {
- return JunkStringValue();
- }
-
- SLOW_DCHECK(buffer_pos < kBufferSize);
- buffer[buffer_pos] = '\0';
- Vector<const char> buffer_vector(buffer, buffer_pos);
- return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
- }
-
- // The following code causes accumulating rounding error for numbers greater
- // than ~2^56. It's explicitly allowed in the spec: "if R is not 2, 4, 8, 10,
- // 16, or 32, then mathInt may be an implementation-dependent approximation to
- // the mathematical integer value" (15.1.2.2).
-
- int lim_0 = '0' + (radix < 10 ? radix : 10);
- int lim_a = 'a' + (radix - 10);
- int lim_A = 'A' + (radix - 10);
-
- // NOTE: The code for computing the value may seem a bit complex at
- // first glance. It is structured to use 32-bit multiply-and-add
- // loops as long as possible to avoid losing precision.
-
- double v = 0.0;
- bool done = false;
- do {
- // Parse the longest part of the string starting at index j
- // possible while keeping the multiplier, and thus the part
- // itself, within 32 bits.
- unsigned int part = 0, multiplier = 1;
- while (true) {
- int d;
- if (*current >= '0' && *current < lim_0) {
- d = *current - '0';
- } else if (*current >= 'a' && *current < lim_a) {
- d = *current - 'a' + 10;
- } else if (*current >= 'A' && *current < lim_A) {
- d = *current - 'A' + 10;
- } else {
- done = true;
- break;
- }
-
- // Update the value of the part as long as the multiplier fits
- // in 32 bits. When we can't guarantee that the next iteration
- // will not overflow the multiplier, we stop parsing the part
- // by leaving the loop.
- const unsigned int kMaximumMultiplier = 0xffffffffU / 36;
- uint32_t m = multiplier * radix;
- if (m > kMaximumMultiplier) break;
- part = part * radix + d;
- multiplier = m;
- DCHECK(multiplier > part);
-
- ++current;
- if (current == end) {
- done = true;
- break;
- }
- }
-
- // Update the value and skip the part in the string.
- v = v * multiplier + part;
- } while (!done);
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(unicode_cache, &current, end)) {
- return JunkStringValue();
- }
-
- return negative ? -v : v;
-}
-
-
-// Converts a string to a double value. Assumes the Iterator supports
-// the following operations:
-// 1. current == end (other ops are not allowed), current != end.
-// 2. *current - gets the current character in the sequence.
-// 3. ++current (advances the position).
-template <class Iterator, class EndMark>
-double InternalStringToDouble(UnicodeCache* unicode_cache,
- Iterator current,
- EndMark end,
- int flags,
- double empty_string_val) {
- // To make sure that iterator dereferencing is valid the following
- // convention is used:
- // 1. Each '++current' statement is followed by check for equality to 'end'.
- // 2. If AdvanceToNonspace returned false then current == end.
- // 3. If 'current' becomes be equal to 'end' the function returns or goes to
- // 'parsing_done'.
- // 4. 'current' is not dereferenced after the 'parsing_done' label.
- // 5. Code before 'parsing_done' may rely on 'current != end'.
- if (!AdvanceToNonspace(unicode_cache, &current, end)) {
- return empty_string_val;
- }
-
- const bool allow_trailing_junk = (flags & ALLOW_TRAILING_JUNK) != 0;
-
- // The longest form of simplified number is: "-<significant digits>'.1eXXX\0".
- const int kBufferSize = kMaxSignificantDigits + 10;
- char buffer[kBufferSize]; // NOLINT: size is known at compile time.
- int buffer_pos = 0;
-
- // Exponent will be adjusted if insignificant digits of the integer part
- // or insignificant leading zeros of the fractional part are dropped.
- int exponent = 0;
- int significant_digits = 0;
- int insignificant_digits = 0;
- bool nonzero_digit_dropped = false;
-
- enum Sign {
- NONE,
- NEGATIVE,
- POSITIVE
- };
-
- Sign sign = NONE;
-
- if (*current == '+') {
- // Ignore leading sign.
- ++current;
- if (current == end) return JunkStringValue();
- sign = POSITIVE;
- } else if (*current == '-') {
- ++current;
- if (current == end) return JunkStringValue();
- sign = NEGATIVE;
- }
-
- static const char kInfinityString[] = "Infinity";
- if (*current == kInfinityString[0]) {
- if (!SubStringEquals(&current, end, kInfinityString)) {
- return JunkStringValue();
- }
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(unicode_cache, &current, end)) {
- return JunkStringValue();
- }
-
- DCHECK(buffer_pos == 0);
- return (sign == NEGATIVE) ? -V8_INFINITY : V8_INFINITY;
- }
-
- bool leading_zero = false;
- if (*current == '0') {
- ++current;
- if (current == end) return SignedZero(sign == NEGATIVE);
-
- leading_zero = true;
-
- // It could be hexadecimal value.
- if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
- ++current;
- if (current == end || !isDigit(*current, 16) || sign != NONE) {
- return JunkStringValue(); // "0x".
- }
-
- return InternalStringToIntDouble<4>(unicode_cache,
- current,
- end,
- false,
- allow_trailing_junk);
-
- // It could be an explicit octal value.
- } else if ((flags & ALLOW_OCTAL) && (*current == 'o' || *current == 'O')) {
- ++current;
- if (current == end || !isDigit(*current, 8) || sign != NONE) {
- return JunkStringValue(); // "0o".
- }
-
- return InternalStringToIntDouble<3>(unicode_cache,
- current,
- end,
- false,
- allow_trailing_junk);
-
- // It could be a binary value.
- } else if ((flags & ALLOW_BINARY) && (*current == 'b' || *current == 'B')) {
- ++current;
- if (current == end || !isBinaryDigit(*current) || sign != NONE) {
- return JunkStringValue(); // "0b".
- }
-
- return InternalStringToIntDouble<1>(unicode_cache,
- current,
- end,
- false,
- allow_trailing_junk);
- }
-
- // Ignore leading zeros in the integer part.
- while (*current == '0') {
- ++current;
- if (current == end) return SignedZero(sign == NEGATIVE);
- }
- }
-
- bool octal = leading_zero && (flags & ALLOW_IMPLICIT_OCTAL) != 0;
-
- // Copy significant digits of the integer part (if any) to the buffer.
- while (*current >= '0' && *current <= '9') {
- if (significant_digits < kMaxSignificantDigits) {
- DCHECK(buffer_pos < kBufferSize);
- buffer[buffer_pos++] = static_cast<char>(*current);
- significant_digits++;
- // Will later check if it's an octal in the buffer.
- } else {
- insignificant_digits++; // Move the digit into the exponential part.
- nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
- }
- octal = octal && *current < '8';
- ++current;
- if (current == end) goto parsing_done;
- }
-
- if (significant_digits == 0) {
- octal = false;
- }
-
- if (*current == '.') {
- if (octal && !allow_trailing_junk) return JunkStringValue();
- if (octal) goto parsing_done;
-
- ++current;
- if (current == end) {
- if (significant_digits == 0 && !leading_zero) {
- return JunkStringValue();
- } else {
- goto parsing_done;
- }
- }
-
- if (significant_digits == 0) {
- // octal = false;
- // Integer part consists of 0 or is absent. Significant digits start after
- // leading zeros (if any).
- while (*current == '0') {
- ++current;
- if (current == end) return SignedZero(sign == NEGATIVE);
- exponent--; // Move this 0 into the exponent.
- }
- }
-
- // There is a fractional part. We don't emit a '.', but adjust the exponent
- // instead.
- while (*current >= '0' && *current <= '9') {
- if (significant_digits < kMaxSignificantDigits) {
- DCHECK(buffer_pos < kBufferSize);
- buffer[buffer_pos++] = static_cast<char>(*current);
- significant_digits++;
- exponent--;
- } else {
- // Ignore insignificant digits in the fractional part.
- nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
- }
- ++current;
- if (current == end) goto parsing_done;
- }
- }
-
- if (!leading_zero && exponent == 0 && significant_digits == 0) {
- // If leading_zeros is true then the string contains zeros.
- // If exponent < 0 then string was [+-]\.0*...
- // If significant_digits != 0 the string is not equal to 0.
- // Otherwise there are no digits in the string.
- return JunkStringValue();
- }
-
- // Parse exponential part.
- if (*current == 'e' || *current == 'E') {
- if (octal) return JunkStringValue();
- ++current;
- if (current == end) {
- if (allow_trailing_junk) {
- goto parsing_done;
- } else {
- return JunkStringValue();
- }
- }
- char sign = '+';
- if (*current == '+' || *current == '-') {
- sign = static_cast<char>(*current);
- ++current;
- if (current == end) {
- if (allow_trailing_junk) {
- goto parsing_done;
- } else {
- return JunkStringValue();
- }
- }
- }
-
- if (current == end || *current < '0' || *current > '9') {
- if (allow_trailing_junk) {
- goto parsing_done;
- } else {
- return JunkStringValue();
- }
- }
-
- const int max_exponent = INT_MAX / 2;
- DCHECK(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2);
- int num = 0;
- do {
- // Check overflow.
- int digit = *current - '0';
- if (num >= max_exponent / 10
- && !(num == max_exponent / 10 && digit <= max_exponent % 10)) {
- num = max_exponent;
- } else {
- num = num * 10 + digit;
- }
- ++current;
- } while (current != end && *current >= '0' && *current <= '9');
-
- exponent += (sign == '-' ? -num : num);
- }
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(unicode_cache, &current, end)) {
- return JunkStringValue();
- }
-
- parsing_done:
- exponent += insignificant_digits;
-
- if (octal) {
- return InternalStringToIntDouble<3>(unicode_cache,
- buffer,
- buffer + buffer_pos,
- sign == NEGATIVE,
- allow_trailing_junk);
- }
-
- if (nonzero_digit_dropped) {
- buffer[buffer_pos++] = '1';
- exponent--;
- }
-
- SLOW_DCHECK(buffer_pos < kBufferSize);
- buffer[buffer_pos] = '\0';
-
- double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
- return (sign == NEGATIVE) ? -converted : converted;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 8956a26168..848c4f4c2b 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -12,12 +12,13 @@
#include "src/assert-scope.h"
#include "src/char-predicates-inl.h"
#include "src/codegen.h"
-#include "src/conversions-inl.h"
#include "src/dtoa.h"
#include "src/factory.h"
#include "src/handles.h"
-#include "src/list-inl.h"
+#include "src/objects-inl.h"
+#include "src/objects/bigint.h"
#include "src/strtod.h"
+#include "src/unicode-cache-inl.h"
#include "src/utils.h"
#if defined(_STLP_VENDOR_CSTD)
@@ -30,47 +31,736 @@
namespace v8 {
namespace internal {
-
namespace {
-// C++-style iterator adaptor for StringCharacterStream
-// (unlike C++ iterators the end-marker has different type).
-class StringCharacterStreamIterator {
+inline double JunkStringValue() {
+ return bit_cast<double, uint64_t>(kQuietNaNMask);
+}
+
+inline double SignedZero(bool negative) {
+ return negative ? uint64_to_double(Double::kSignMask) : 0.0;
+}
+
+inline bool isDigit(int x, int radix) {
+ return (x >= '0' && x <= '9' && x < '0' + radix) ||
+ (radix > 10 && x >= 'a' && x < 'a' + radix - 10) ||
+ (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
+}
+
+inline bool isBinaryDigit(int x) { return x == '0' || x == '1'; }
+
+template <class Iterator, class EndMark>
+bool SubStringEquals(Iterator* current, EndMark end, const char* substring) {
+ DCHECK(**current == *substring);
+ for (substring++; *substring != '\0'; substring++) {
+ ++*current;
+ if (*current == end || **current != *substring) return false;
+ }
+ ++*current;
+ return true;
+}
+
+// Returns true if a nonspace character has been found and false if the
+// end was been reached before finding a nonspace character.
+template <class Iterator, class EndMark>
+inline bool AdvanceToNonspace(UnicodeCache* unicode_cache, Iterator* current,
+ EndMark end) {
+ while (*current != end) {
+ if (!unicode_cache->IsWhiteSpaceOrLineTerminator(**current)) return true;
+ ++*current;
+ }
+ return false;
+}
+
+// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
+template <int radix_log_2, class Iterator, class EndMark>
+double InternalStringToIntDouble(UnicodeCache* unicode_cache, Iterator current,
+ EndMark end, bool negative,
+ bool allow_trailing_junk) {
+ DCHECK(current != end);
+
+ // Skip leading 0s.
+ while (*current == '0') {
+ ++current;
+ if (current == end) return SignedZero(negative);
+ }
+
+ int64_t number = 0;
+ int exponent = 0;
+ const int radix = (1 << radix_log_2);
+
+ int lim_0 = '0' + (radix < 10 ? radix : 10);
+ int lim_a = 'a' + (radix - 10);
+ int lim_A = 'A' + (radix - 10);
+
+ do {
+ int digit;
+ if (*current >= '0' && *current < lim_0) {
+ digit = static_cast<char>(*current) - '0';
+ } else if (*current >= 'a' && *current < lim_a) {
+ digit = static_cast<char>(*current) - 'a' + 10;
+ } else if (*current >= 'A' && *current < lim_A) {
+ digit = static_cast<char>(*current) - 'A' + 10;
+ } else {
+ if (allow_trailing_junk ||
+ !AdvanceToNonspace(unicode_cache, &current, end)) {
+ break;
+ } else {
+ return JunkStringValue();
+ }
+ }
+
+ number = number * radix + digit;
+ int overflow = static_cast<int>(number >> 53);
+ if (overflow != 0) {
+ // Overflow occurred. Need to determine which direction to round the
+ // result.
+ int overflow_bits_count = 1;
+ while (overflow > 1) {
+ overflow_bits_count++;
+ overflow >>= 1;
+ }
+
+ int dropped_bits_mask = ((1 << overflow_bits_count) - 1);
+ int dropped_bits = static_cast<int>(number) & dropped_bits_mask;
+ number >>= overflow_bits_count;
+ exponent = overflow_bits_count;
+
+ bool zero_tail = true;
+ while (true) {
+ ++current;
+ if (current == end || !isDigit(*current, radix)) break;
+ zero_tail = zero_tail && *current == '0';
+ exponent += radix_log_2;
+ }
+
+ if (!allow_trailing_junk &&
+ AdvanceToNonspace(unicode_cache, &current, end)) {
+ return JunkStringValue();
+ }
+
+ int middle_value = (1 << (overflow_bits_count - 1));
+ if (dropped_bits > middle_value) {
+ number++; // Rounding up.
+ } else if (dropped_bits == middle_value) {
+ // Rounding to even to consistency with decimals: half-way case rounds
+ // up if significant part is odd and down otherwise.
+ if ((number & 1) != 0 || !zero_tail) {
+ number++; // Rounding up.
+ }
+ }
+
+ // Rounding up may cause overflow.
+ if ((number & (static_cast<int64_t>(1) << 53)) != 0) {
+ exponent++;
+ number >>= 1;
+ }
+ break;
+ }
+ ++current;
+ } while (current != end);
+
+ DCHECK(number < ((int64_t)1 << 53));
+ DCHECK(static_cast<int64_t>(static_cast<double>(number)) == number);
+
+ if (exponent == 0) {
+ if (negative) {
+ if (number == 0) return -0.0;
+ number = -number;
+ }
+ return static_cast<double>(number);
+ }
+
+ DCHECK(number != 0);
+ return std::ldexp(static_cast<double>(negative ? -number : number), exponent);
+}
+
+// ES6 18.2.5 parseInt(string, radix) (with NumberParseIntHelper subclass);
+// https://tc39.github.io/proposal-bigint/#sec-bigint-parseint-string-radix
+// (with BigIntParseIntHelper subclass).
+class StringToIntHelper {
public:
- class EndMarker {};
+ StringToIntHelper(Isolate* isolate, Handle<String> subject, int radix)
+ : isolate_(isolate), subject_(subject), radix_(radix) {
+ DCHECK(subject->IsFlat());
+ }
+ virtual ~StringToIntHelper() {}
- explicit StringCharacterStreamIterator(StringCharacterStream* stream);
+ protected:
+ // Subclasses must implement these:
+ virtual void AllocateResult() = 0;
+ virtual void ResultMultiplyAdd(uint32_t multiplier, uint32_t part) = 0;
- uint16_t operator*() const;
- void operator++();
- bool operator==(EndMarker const&) const { return end_; }
- bool operator!=(EndMarker const& m) const { return !end_; }
+ // Subclasses must call this to do all the work.
+ void ParseInt();
+
+ // Subclasses may override this.
+ virtual void HandleSpecialCases() {}
+
+ // Subclasses get access to internal state:
+ enum State { kRunning, kError, kJunk, kZero, kDone };
+
+ Isolate* isolate() { return isolate_; }
+ Handle<String> subject() { return subject_; }
+ int radix() { return radix_; }
+ int cursor() { return cursor_; }
+ int length() { return length_; }
+ bool negative() { return negative_; }
+ State state() { return state_; }
+ void set_state(State state) { state_ = state; }
private:
- StringCharacterStream* const stream_;
- uint16_t current_;
- bool end_;
+ template <class Char>
+ void DetectRadixInternal(Char current, int length);
+ template <class Char>
+ void ParseInternal(Char start);
+
+ Isolate* isolate_;
+ Handle<String> subject_;
+ int radix_;
+ int cursor_ = 0;
+ int length_ = 0;
+ bool negative_ = false;
+ bool leading_zero_ = false;
+ State state_ = kRunning;
};
+void StringToIntHelper::ParseInt() {
+ {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = subject_->GetFlatContent();
+ if (flat.IsOneByte()) {
+ Vector<const uint8_t> vector = flat.ToOneByteVector();
+ DetectRadixInternal(vector.start(), vector.length());
+ } else {
+ Vector<const uc16> vector = flat.ToUC16Vector();
+ DetectRadixInternal(vector.start(), vector.length());
+ }
+ }
+ if (state_ != kRunning) return;
+ AllocateResult();
+ HandleSpecialCases();
+ if (state_ != kRunning) return;
+ {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = subject_->GetFlatContent();
+ if (flat.IsOneByte()) {
+ Vector<const uint8_t> vector = flat.ToOneByteVector();
+ DCHECK_EQ(length_, vector.length());
+ ParseInternal(vector.start());
+ } else {
+ Vector<const uc16> vector = flat.ToUC16Vector();
+ DCHECK_EQ(length_, vector.length());
+ ParseInternal(vector.start());
+ }
+ }
+ DCHECK(state_ != kRunning);
+}
+
+template <class Char>
+void StringToIntHelper::DetectRadixInternal(Char current, int length) {
+ Char start = current;
+ length_ = length;
+ Char end = start + length;
+ UnicodeCache* unicode_cache = isolate_->unicode_cache();
+
+ if (!AdvanceToNonspace(unicode_cache, &current, end)) {
+ return set_state(kJunk);
+ }
-StringCharacterStreamIterator::StringCharacterStreamIterator(
- StringCharacterStream* stream) : stream_(stream) {
- ++(*this);
+ if (*current == '+') {
+ // Ignore leading sign; skip following spaces.
+ ++current;
+ if (current == end) {
+ return set_state(kJunk);
+ }
+ } else if (*current == '-') {
+ ++current;
+ if (current == end) {
+ return set_state(kJunk);
+ }
+ negative_ = true;
+ }
+
+ if (radix_ == 0) {
+ // Radix detection.
+ radix_ = 10;
+ if (*current == '0') {
+ ++current;
+ if (current == end) return set_state(kZero);
+ if (*current == 'x' || *current == 'X') {
+ radix_ = 16;
+ ++current;
+ if (current == end) return set_state(kJunk);
+ } else {
+ leading_zero_ = true;
+ }
+ }
+ } else if (radix_ == 16) {
+ if (*current == '0') {
+ // Allow "0x" prefix.
+ ++current;
+ if (current == end) return set_state(kZero);
+ if (*current == 'x' || *current == 'X') {
+ ++current;
+ if (current == end) return set_state(kJunk);
+ } else {
+ leading_zero_ = true;
+ }
+ }
+ }
+ // Skip leading zeros.
+ while (*current == '0') {
+ leading_zero_ = true;
+ ++current;
+ if (current == end) return set_state(kZero);
+ }
+
+ if (!leading_zero_ && !isDigit(*current, radix_)) {
+ return set_state(kJunk);
+ }
+
+ DCHECK(radix_ >= 2 && radix_ <= 36);
+ STATIC_ASSERT(String::kMaxLength <= INT_MAX);
+ cursor_ = static_cast<int>(current - start);
}
-uint16_t StringCharacterStreamIterator::operator*() const {
- return current_;
+template <class Char>
+void StringToIntHelper::ParseInternal(Char start) {
+ Char current = start + cursor_;
+ Char end = start + length_;
+
+ // The following code causes accumulating rounding error for numbers greater
+ // than ~2^56. It's explicitly allowed in the spec: "if R is not 2, 4, 8, 10,
+ // 16, or 32, then mathInt may be an implementation-dependent approximation to
+ // the mathematical integer value" (15.1.2.2).
+
+ int lim_0 = '0' + (radix_ < 10 ? radix_ : 10);
+ int lim_a = 'a' + (radix_ - 10);
+ int lim_A = 'A' + (radix_ - 10);
+
+ // NOTE: The code for computing the value may seem a bit complex at
+ // first glance. It is structured to use 32-bit multiply-and-add
+ // loops as long as possible to avoid losing precision.
+
+ bool done = false;
+ do {
+ // Parse the longest part of the string starting at {current}
+ // possible while keeping the multiplier, and thus the part
+ // itself, within 32 bits.
+ uint32_t part = 0, multiplier = 1;
+ while (true) {
+ uint32_t d;
+ if (*current >= '0' && *current < lim_0) {
+ d = *current - '0';
+ } else if (*current >= 'a' && *current < lim_a) {
+ d = *current - 'a' + 10;
+ } else if (*current >= 'A' && *current < lim_A) {
+ d = *current - 'A' + 10;
+ } else {
+ done = true;
+ break;
+ }
+
+ // Update the value of the part as long as the multiplier fits
+ // in 32 bits. When we can't guarantee that the next iteration
+ // will not overflow the multiplier, we stop parsing the part
+ // by leaving the loop.
+ const uint32_t kMaximumMultiplier = 0xffffffffU / 36;
+ uint32_t m = multiplier * static_cast<uint32_t>(radix_);
+ if (m > kMaximumMultiplier) break;
+ part = part * radix_ + d;
+ multiplier = m;
+ DCHECK(multiplier > part);
+
+ ++current;
+ if (current == end) {
+ done = true;
+ break;
+ }
+ }
+
+ // Update the value and skip the part in the string.
+ ResultMultiplyAdd(multiplier, part);
+ } while (!done);
+
+ return set_state(kDone);
}
+class NumberParseIntHelper : public StringToIntHelper {
+ public:
+ NumberParseIntHelper(Isolate* isolate, Handle<String> string, int radix)
+ : StringToIntHelper(isolate, string, radix) {}
+
+ double GetResult() {
+ ParseInt();
+ switch (state()) {
+ case kJunk:
+ return JunkStringValue();
+ case kZero:
+ return SignedZero(negative());
+ case kDone:
+ return negative() ? -result_ : result_;
+ case kError:
+ case kRunning:
+ break;
+ }
+ UNREACHABLE();
+ }
-void StringCharacterStreamIterator::operator++() {
- end_ = !stream_->HasMore();
- if (!end_) {
- current_ = stream_->GetNext();
+ protected:
+ virtual void AllocateResult() {}
+ virtual void ResultMultiplyAdd(uint32_t multiplier, uint32_t part) {
+ result_ = result_ * multiplier + part;
}
+
+ private:
+ virtual void HandleSpecialCases() {
+ bool is_power_of_two = base::bits::IsPowerOfTwo(radix());
+ if (!is_power_of_two && radix() != 10) return;
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = subject()->GetFlatContent();
+ if (flat.IsOneByte()) {
+ Vector<const uint8_t> vector = flat.ToOneByteVector();
+ DCHECK_EQ(length(), vector.length());
+ result_ = is_power_of_two ? HandlePowerOfTwoCase(vector.start())
+ : HandleBaseTenCase(vector.start());
+ } else {
+ Vector<const uc16> vector = flat.ToUC16Vector();
+ DCHECK_EQ(length(), vector.length());
+ result_ = is_power_of_two ? HandlePowerOfTwoCase(vector.start())
+ : HandleBaseTenCase(vector.start());
+ }
+ set_state(kDone);
+ }
+
+ template <class Char>
+ double HandlePowerOfTwoCase(Char start) {
+ Char current = start + cursor();
+ Char end = start + length();
+ UnicodeCache* unicode_cache = isolate()->unicode_cache();
+ const bool allow_trailing_junk = true;
+ // GetResult() will take care of the sign bit, so ignore it for now.
+ const bool negative = false;
+ switch (radix()) {
+ case 2:
+ return InternalStringToIntDouble<1>(unicode_cache, current, end,
+ negative, allow_trailing_junk);
+ case 4:
+ return InternalStringToIntDouble<2>(unicode_cache, current, end,
+ negative, allow_trailing_junk);
+ case 8:
+ return InternalStringToIntDouble<3>(unicode_cache, current, end,
+ negative, allow_trailing_junk);
+
+ case 16:
+ return InternalStringToIntDouble<4>(unicode_cache, current, end,
+ negative, allow_trailing_junk);
+
+ case 32:
+ return InternalStringToIntDouble<5>(unicode_cache, current, end,
+ negative, allow_trailing_junk);
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ template <class Char>
+ double HandleBaseTenCase(Char start) {
+ // Parsing with strtod.
+ Char current = start + cursor();
+ Char end = start + length();
+ const int kMaxSignificantDigits = 309; // Doubles are less than 1.8e308.
+ // The buffer may contain up to kMaxSignificantDigits + 1 digits and a zero
+ // end.
+ const int kBufferSize = kMaxSignificantDigits + 2;
+ char buffer[kBufferSize];
+ int buffer_pos = 0;
+ while (*current >= '0' && *current <= '9') {
+ if (buffer_pos <= kMaxSignificantDigits) {
+ // If the number has more than kMaxSignificantDigits it will be parsed
+ // as infinity.
+ DCHECK(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = static_cast<char>(*current);
+ }
+ ++current;
+ if (current == end) break;
+ }
+
+ SLOW_DCHECK(buffer_pos < kBufferSize);
+ buffer[buffer_pos] = '\0';
+ Vector<const char> buffer_vector(buffer, buffer_pos);
+ return Strtod(buffer_vector, 0);
+ }
+
+ double result_ = 0;
+};
+
+// Converts a string to a double value. Assumes the Iterator supports
+// the following operations:
+// 1. current == end (other ops are not allowed), current != end.
+// 2. *current - gets the current character in the sequence.
+// 3. ++current (advances the position).
+template <class Iterator, class EndMark>
+double InternalStringToDouble(UnicodeCache* unicode_cache, Iterator current,
+ EndMark end, int flags, double empty_string_val) {
+ // To make sure that iterator dereferencing is valid the following
+ // convention is used:
+ // 1. Each '++current' statement is followed by check for equality to 'end'.
+ // 2. If AdvanceToNonspace returned false then current == end.
+ // 3. If 'current' becomes be equal to 'end' the function returns or goes to
+ // 'parsing_done'.
+ // 4. 'current' is not dereferenced after the 'parsing_done' label.
+ // 5. Code before 'parsing_done' may rely on 'current != end'.
+ if (!AdvanceToNonspace(unicode_cache, &current, end)) {
+ return empty_string_val;
+ }
+
+ const bool allow_trailing_junk = (flags & ALLOW_TRAILING_JUNK) != 0;
+
+ // Maximum number of significant digits in decimal representation.
+ // The longest possible double in decimal representation is
+ // (2^53 - 1) * 2 ^ -1074 that is (2 ^ 53 - 1) * 5 ^ 1074 / 10 ^ 1074
+ // (768 digits). If we parse a number whose first digits are equal to a
+ // mean of 2 adjacent doubles (that could have up to 769 digits) the result
+ // must be rounded to the bigger one unless the tail consists of zeros, so
+ // we don't need to preserve all the digits.
+ const int kMaxSignificantDigits = 772;
+
+ // The longest form of simplified number is: "-<significant digits>'.1eXXX\0".
+ const int kBufferSize = kMaxSignificantDigits + 10;
+ char buffer[kBufferSize]; // NOLINT: size is known at compile time.
+ int buffer_pos = 0;
+
+ // Exponent will be adjusted if insignificant digits of the integer part
+ // or insignificant leading zeros of the fractional part are dropped.
+ int exponent = 0;
+ int significant_digits = 0;
+ int insignificant_digits = 0;
+ bool nonzero_digit_dropped = false;
+
+ enum Sign { NONE, NEGATIVE, POSITIVE };
+
+ Sign sign = NONE;
+
+ if (*current == '+') {
+ // Ignore leading sign.
+ ++current;
+ if (current == end) return JunkStringValue();
+ sign = POSITIVE;
+ } else if (*current == '-') {
+ ++current;
+ if (current == end) return JunkStringValue();
+ sign = NEGATIVE;
+ }
+
+ static const char kInfinityString[] = "Infinity";
+ if (*current == kInfinityString[0]) {
+ if (!SubStringEquals(&current, end, kInfinityString)) {
+ return JunkStringValue();
+ }
+
+ if (!allow_trailing_junk &&
+ AdvanceToNonspace(unicode_cache, &current, end)) {
+ return JunkStringValue();
+ }
+
+ DCHECK(buffer_pos == 0);
+ return (sign == NEGATIVE) ? -V8_INFINITY : V8_INFINITY;
+ }
+
+ bool leading_zero = false;
+ if (*current == '0') {
+ ++current;
+ if (current == end) return SignedZero(sign == NEGATIVE);
+
+ leading_zero = true;
+
+ // It could be hexadecimal value.
+ if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
+ ++current;
+ if (current == end || !isDigit(*current, 16) || sign != NONE) {
+ return JunkStringValue(); // "0x".
+ }
+
+ return InternalStringToIntDouble<4>(unicode_cache, current, end, false,
+ allow_trailing_junk);
+
+ // It could be an explicit octal value.
+ } else if ((flags & ALLOW_OCTAL) && (*current == 'o' || *current == 'O')) {
+ ++current;
+ if (current == end || !isDigit(*current, 8) || sign != NONE) {
+ return JunkStringValue(); // "0o".
+ }
+
+ return InternalStringToIntDouble<3>(unicode_cache, current, end, false,
+ allow_trailing_junk);
+
+ // It could be a binary value.
+ } else if ((flags & ALLOW_BINARY) && (*current == 'b' || *current == 'B')) {
+ ++current;
+ if (current == end || !isBinaryDigit(*current) || sign != NONE) {
+ return JunkStringValue(); // "0b".
+ }
+
+ return InternalStringToIntDouble<1>(unicode_cache, current, end, false,
+ allow_trailing_junk);
+ }
+
+ // Ignore leading zeros in the integer part.
+ while (*current == '0') {
+ ++current;
+ if (current == end) return SignedZero(sign == NEGATIVE);
+ }
+ }
+
+ bool octal = leading_zero && (flags & ALLOW_IMPLICIT_OCTAL) != 0;
+
+ // Copy significant digits of the integer part (if any) to the buffer.
+ while (*current >= '0' && *current <= '9') {
+ if (significant_digits < kMaxSignificantDigits) {
+ DCHECK(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = static_cast<char>(*current);
+ significant_digits++;
+ // Will later check if it's an octal in the buffer.
+ } else {
+ insignificant_digits++; // Move the digit into the exponential part.
+ nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+ }
+ octal = octal && *current < '8';
+ ++current;
+ if (current == end) goto parsing_done;
+ }
+
+ if (significant_digits == 0) {
+ octal = false;
+ }
+
+ if (*current == '.') {
+ if (octal && !allow_trailing_junk) return JunkStringValue();
+ if (octal) goto parsing_done;
+
+ ++current;
+ if (current == end) {
+ if (significant_digits == 0 && !leading_zero) {
+ return JunkStringValue();
+ } else {
+ goto parsing_done;
+ }
+ }
+
+ if (significant_digits == 0) {
+ // octal = false;
+ // Integer part consists of 0 or is absent. Significant digits start after
+ // leading zeros (if any).
+ while (*current == '0') {
+ ++current;
+ if (current == end) return SignedZero(sign == NEGATIVE);
+ exponent--; // Move this 0 into the exponent.
+ }
+ }
+
+ // There is a fractional part. We don't emit a '.', but adjust the exponent
+ // instead.
+ while (*current >= '0' && *current <= '9') {
+ if (significant_digits < kMaxSignificantDigits) {
+ DCHECK(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = static_cast<char>(*current);
+ significant_digits++;
+ exponent--;
+ } else {
+ // Ignore insignificant digits in the fractional part.
+ nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+ }
+ ++current;
+ if (current == end) goto parsing_done;
+ }
+ }
+
+ if (!leading_zero && exponent == 0 && significant_digits == 0) {
+ // If leading_zeros is true then the string contains zeros.
+ // If exponent < 0 then string was [+-]\.0*...
+ // If significant_digits != 0 the string is not equal to 0.
+ // Otherwise there are no digits in the string.
+ return JunkStringValue();
+ }
+
+ // Parse exponential part.
+ if (*current == 'e' || *current == 'E') {
+ if (octal) return JunkStringValue();
+ ++current;
+ if (current == end) {
+ if (allow_trailing_junk) {
+ goto parsing_done;
+ } else {
+ return JunkStringValue();
+ }
+ }
+ char sign = '+';
+ if (*current == '+' || *current == '-') {
+ sign = static_cast<char>(*current);
+ ++current;
+ if (current == end) {
+ if (allow_trailing_junk) {
+ goto parsing_done;
+ } else {
+ return JunkStringValue();
+ }
+ }
+ }
+
+ if (current == end || *current < '0' || *current > '9') {
+ if (allow_trailing_junk) {
+ goto parsing_done;
+ } else {
+ return JunkStringValue();
+ }
+ }
+
+ const int max_exponent = INT_MAX / 2;
+ DCHECK(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2);
+ int num = 0;
+ do {
+ // Check overflow.
+ int digit = *current - '0';
+ if (num >= max_exponent / 10 &&
+ !(num == max_exponent / 10 && digit <= max_exponent % 10)) {
+ num = max_exponent;
+ } else {
+ num = num * 10 + digit;
+ }
+ ++current;
+ } while (current != end && *current >= '0' && *current <= '9');
+
+ exponent += (sign == '-' ? -num : num);
+ }
+
+ if (!allow_trailing_junk && AdvanceToNonspace(unicode_cache, &current, end)) {
+ return JunkStringValue();
+ }
+
+parsing_done:
+ exponent += insignificant_digits;
+
+ if (octal) {
+ return InternalStringToIntDouble<3>(unicode_cache, buffer,
+ buffer + buffer_pos, sign == NEGATIVE,
+ allow_trailing_junk);
+ }
+
+ if (nonzero_digit_dropped) {
+ buffer[buffer_pos++] = '1';
+ exponent--;
+ }
+
+ SLOW_DCHECK(buffer_pos < kBufferSize);
+ buffer[buffer_pos] = '\0';
+
+ double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
+ return (sign == NEGATIVE) ? -converted : converted;
}
-} // End anonymous namespace.
+} // namespace
double StringToDouble(UnicodeCache* unicode_cache,
const char* str, int flags, double empty_string_val) {
@@ -105,23 +795,65 @@ double StringToDouble(UnicodeCache* unicode_cache,
empty_string_val);
}
-
-// Converts a string into an integer.
-double StringToInt(UnicodeCache* unicode_cache,
- Vector<const uint8_t> vector,
- int radix) {
- return InternalStringToInt(
- unicode_cache, vector.start(), vector.start() + vector.length(), radix);
+double StringToInt(Isolate* isolate, Handle<String> string, int radix) {
+ NumberParseIntHelper helper(isolate, string, radix);
+ return helper.GetResult();
}
+class BigIntParseIntHelper : public StringToIntHelper {
+ public:
+ BigIntParseIntHelper(Isolate* isolate, Handle<String> string, int radix)
+ : StringToIntHelper(isolate, string, radix) {}
+
+ MaybeHandle<BigInt> GetResult() {
+ ParseInt();
+ switch (state()) {
+ case kJunk:
+ THROW_NEW_ERROR(isolate(),
+ NewSyntaxError(MessageTemplate::kBigIntInvalidString),
+ BigInt);
+ case kZero:
+ return isolate()->factory()->NewBigIntFromInt(0);
+ case kError:
+ return MaybeHandle<BigInt>();
+ case kDone:
+ result_->set_sign(negative());
+ result_->RightTrim();
+ return result_;
+ case kRunning:
+ break;
+ }
+ UNREACHABLE();
+ }
-double StringToInt(UnicodeCache* unicode_cache,
- Vector<const uc16> vector,
- int radix) {
- return InternalStringToInt(
- unicode_cache, vector.start(), vector.start() + vector.length(), radix);
-}
+ protected:
+ virtual void AllocateResult() {
+ // We have to allocate a BigInt that's big enough to fit the result.
+ // Conseratively assume that all remaining digits are significant.
+ // Optimization opportunity: Would it makes sense to scan for trailing
+ // junk before allocating the result?
+ int charcount = length() - cursor();
+ MaybeHandle<BigInt> maybe =
+ BigInt::AllocateFor(isolate(), radix(), charcount);
+ if (!maybe.ToHandle(&result_)) {
+ set_state(kError);
+ }
+ }
+ virtual void ResultMultiplyAdd(uint32_t multiplier, uint32_t part) {
+ result_->InplaceMultiplyAdd(static_cast<uintptr_t>(multiplier),
+ static_cast<uintptr_t>(part));
+ }
+
+ private:
+ Handle<BigInt> result_;
+};
+
+MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string,
+ int radix) {
+ BigIntParseIntHelper helper(isolate, string, radix);
+ return helper.GetResult();
+}
const char* DoubleToCString(double v, Vector<char> buffer) {
switch (FPCLASSIFY_NAMESPACE::fpclassify(v)) {
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index e34afaa4d2..ac689c8b51 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -13,35 +13,15 @@
namespace v8 {
namespace internal {
+class BigInt;
template <typename T>
class Handle;
class UnicodeCache;
-// Maximum number of significant digits in decimal representation.
-// The longest possible double in decimal representation is
-// (2^53 - 1) * 2 ^ -1074 that is (2 ^ 53 - 1) * 5 ^ 1074 / 10 ^ 1074
-// (768 digits). If we parse a number whose first digits are equal to a
-// mean of 2 adjacent doubles (that could have up to 769 digits) the result
-// must be rounded to the bigger one unless the tail consists of zeros, so
-// we don't need to preserve all the digits.
-const int kMaxSignificantDigits = 772;
-
// The limit for the the fractionDigits/precision for toFixed, toPrecision
// and toExponential.
const int kMaxFractionDigits = 100;
-inline bool isDigit(int x, int radix) {
- return (x >= '0' && x <= '9' && x < '0' + radix)
- || (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
- || (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
-}
-
-
-inline bool isBinaryDigit(int x) {
- return x == '0' || x == '1';
-}
-
-
// The fast double-to-(unsigned-)int conversion routine does not guarantee
// rounding towards zero.
// If x is NaN, the result is INT_MIN. Otherwise the result is the argument x,
@@ -123,15 +103,10 @@ double StringToDouble(UnicodeCache* unicode_cache,
int flags,
double empty_string_val = 0);
-// Converts a string into an integer.
-double StringToInt(UnicodeCache* unicode_cache,
- Vector<const uint8_t> vector,
- int radix);
-
+double StringToInt(Isolate* isolate, Handle<String> string, int radix);
-double StringToInt(UnicodeCache* unicode_cache,
- Vector<const uc16> vector,
- int radix);
+MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string,
+ int radix);
const int kDoubleToCStringMinBufferSize = 100;
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index afef806788..a5709dbec9 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -420,8 +420,7 @@ void RuntimeCallTimer::Snapshot() {
Resume(now);
}
-RuntimeCallStats::RuntimeCallStats()
- : in_use_(false), thread_id_(ThreadId::Current()) {
+RuntimeCallStats::RuntimeCallStats() : in_use_(false) {
static const char* const kNames[] = {
#define CALL_BUILTIN_COUNTER(name) "GC_" #name,
FOR_EACH_GC_COUNTER(CALL_BUILTIN_COUNTER) //
@@ -477,7 +476,7 @@ const int RuntimeCallStats::counters_count =
// static
void RuntimeCallStats::Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
CounterId counter_id) {
- DCHECK(ThreadId::Current().Equals(stats->thread_id()));
+ DCHECK(stats->IsCalledOnTheSameThread());
RuntimeCallCounter* counter = &(stats->*counter_id);
DCHECK(counter->name() != nullptr);
timer->Start(counter, stats->current_timer_.Value());
@@ -487,8 +486,16 @@ void RuntimeCallStats::Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
// static
void RuntimeCallStats::Leave(RuntimeCallStats* stats, RuntimeCallTimer* timer) {
- DCHECK(ThreadId::Current().Equals(stats->thread_id()));
- CHECK(stats->current_timer_.Value() == timer);
+ DCHECK(stats->IsCalledOnTheSameThread());
+ if (stats->current_timer_.Value() != timer) {
+ // The branch is added to catch a crash crbug.com/760649
+ RuntimeCallTimer* stack_top = stats->current_timer_.Value();
+ EmbeddedVector<char, 200> text;
+ SNPrintF(text, "ERROR: Leaving counter '%s', stack top '%s'.\n",
+ timer->name(), stack_top ? stack_top->name() : "(null)");
+ USE(text);
+ CHECK(false);
+ }
stats->current_timer_.SetValue(timer->Stop());
RuntimeCallTimer* cur_timer = stats->current_timer_.Value();
stats->current_counter_.SetValue(cur_timer ? cur_timer->counter() : nullptr);
@@ -514,6 +521,13 @@ void RuntimeCallStats::CorrectCurrentCounterId(RuntimeCallStats* stats,
stats->current_counter_.SetValue(counter);
}
+bool RuntimeCallStats::IsCalledOnTheSameThread() {
+ if (!thread_id_.Equals(ThreadId::Invalid()))
+ return thread_id_.Equals(ThreadId::Current());
+ thread_id_ = ThreadId::Current();
+ return true;
+}
+
void RuntimeCallStats::Print(std::ostream& os) {
RuntimeCallStatEntries entries;
if (current_timer_.Value() != nullptr) {
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 9e0f46ae40..3fa3ae0306 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -752,86 +752,84 @@ class RuntimeCallTimer final {
V(ValueDeserializer_ReadValue) \
V(ValueSerializer_WriteValue)
-#define FOR_EACH_MANUAL_COUNTER(V) \
- V(AccessorGetterCallback) \
- V(AccessorNameGetterCallback) \
- V(AccessorNameSetterCallback) \
- V(ArrayLengthGetter) \
- V(ArrayLengthSetter) \
- V(BoundFunctionNameGetter) \
- V(BoundFunctionLengthGetter) \
- V(CompileCodeLazy) \
- V(CompileDeserialize) \
- V(CompileEval) \
- V(CompileFullCode) \
- V(CompileAnalyse) \
- V(CompileBackgroundIgnition) \
- V(CompileFunction) \
- V(CompileGetFromOptimizedCodeMap) \
- V(CompileIgnition) \
- V(CompileIgnitionFinalization) \
- V(CompileInnerFunction) \
- V(CompileRenumber) \
- V(CompileRewriteReturnResult) \
- V(CompileScopeAnalysis) \
- V(CompileScript) \
- V(CompileSerialize) \
- V(CompileWaitForDispatcher) \
- V(DeoptimizeCode) \
- V(FunctionCallback) \
- V(FunctionPrototypeGetter) \
- V(FunctionPrototypeSetter) \
- V(GC_Custom_AllAvailableGarbage) \
- V(GC_Custom_IncrementalMarkingObserver) \
- V(GC_Custom_SlowAllocateRaw) \
- V(GCEpilogueCallback) \
- V(GCPrologueCallback) \
- V(GenericNamedPropertyDefinerCallback) \
- V(GenericNamedPropertyDeleterCallback) \
- V(GenericNamedPropertyDescriptorCallback) \
- V(GenericNamedPropertyQueryCallback) \
- V(GenericNamedPropertySetterCallback) \
- V(GetMoreDataCallback) \
- V(IndexedPropertyDefinerCallback) \
- V(IndexedPropertyDeleterCallback) \
- V(IndexedPropertyDescriptorCallback) \
- V(IndexedPropertyGetterCallback) \
- V(IndexedPropertyQueryCallback) \
- V(IndexedPropertySetterCallback) \
- V(InvokeApiInterruptCallbacks) \
- V(InvokeFunctionCallback) \
- V(JS_Execution) \
- V(Map_SetPrototype) \
- V(Map_TransitionToAccessorProperty) \
- V(Map_TransitionToDataProperty) \
- V(Object_DeleteProperty) \
- V(OptimizeCode) \
- V(ParseArrowFunctionLiteral) \
- V(ParseBackgroundArrowFunctionLiteral) \
- V(ParseBackgroundFunctionLiteral) \
- V(ParseEval) \
- V(ParseFunction) \
- V(ParseFunctionLiteral) \
- V(ParseProgram) \
- V(PreParseArrowFunctionLiteral) \
- V(PreParseBackgroundArrowFunctionLiteral) \
- V(PreParseBackgroundNoVariableResolution) \
- V(PreParseBackgroundWithVariableResolution) \
- V(PreParseNoVariableResolution) \
- V(PreParseWithVariableResolution) \
- V(PropertyCallback) \
- V(PrototypeMap_TransitionToAccessorProperty) \
- V(PrototypeMap_TransitionToDataProperty) \
- V(PrototypeObject_DeleteProperty) \
- V(RecompileConcurrent) \
- V(RecompileSynchronous) \
- V(ReconfigureToDataProperty) \
- V(StringLengthGetter) \
- V(TestCounter1) \
- V(TestCounter2) \
- V(TestCounter3) \
- /* Dummy counter for the unexpected stub miss. */ \
- V(UnexpectedStubMiss)
+#define FOR_EACH_MANUAL_COUNTER(V) \
+ V(AccessorGetterCallback) \
+ V(AccessorNameGetterCallback) \
+ V(AccessorNameSetterCallback) \
+ V(ArrayLengthGetter) \
+ V(ArrayLengthSetter) \
+ V(BoundFunctionNameGetter) \
+ V(BoundFunctionLengthGetter) \
+ V(CompileCodeLazy) \
+ V(CompileDeserialize) \
+ V(CompileEval) \
+ V(CompileFullCode) \
+ V(CompileAnalyse) \
+ V(CompileBackgroundIgnition) \
+ V(CompileFunction) \
+ V(CompileGetFromOptimizedCodeMap) \
+ V(CompileIgnition) \
+ V(CompileIgnitionFinalization) \
+ V(CompileInnerFunction) \
+ V(CompileRenumber) \
+ V(CompileRewriteReturnResult) \
+ V(CompileScopeAnalysis) \
+ V(CompileScript) \
+ V(CompileSerialize) \
+ V(CompileWaitForDispatcher) \
+ V(DeoptimizeCode) \
+ V(FunctionCallback) \
+ V(FunctionPrototypeGetter) \
+ V(FunctionPrototypeSetter) \
+ V(GC_Custom_AllAvailableGarbage) \
+ V(GC_Custom_IncrementalMarkingObserver) \
+ V(GC_Custom_SlowAllocateRaw) \
+ V(GCEpilogueCallback) \
+ V(GCPrologueCallback) \
+ V(GenericNamedPropertyDefinerCallback) \
+ V(GenericNamedPropertyDeleterCallback) \
+ V(GenericNamedPropertyDescriptorCallback) \
+ V(GenericNamedPropertyQueryCallback) \
+ V(GenericNamedPropertySetterCallback) \
+ V(GetMoreDataCallback) \
+ V(IndexedPropertyDefinerCallback) \
+ V(IndexedPropertyDeleterCallback) \
+ V(IndexedPropertyDescriptorCallback) \
+ V(IndexedPropertyGetterCallback) \
+ V(IndexedPropertyQueryCallback) \
+ V(IndexedPropertySetterCallback) \
+ V(InvokeApiInterruptCallbacks) \
+ V(InvokeFunctionCallback) \
+ V(JS_Execution) \
+ V(Map_SetPrototype) \
+ V(Map_TransitionToAccessorProperty) \
+ V(Map_TransitionToDataProperty) \
+ V(Object_DeleteProperty) \
+ V(OptimizeCode) \
+ V(ParseArrowFunctionLiteral) \
+ V(ParseBackgroundArrowFunctionLiteral) \
+ V(ParseBackgroundFunctionLiteral) \
+ V(ParseEval) \
+ V(ParseFunction) \
+ V(ParseFunctionLiteral) \
+ V(ParseProgram) \
+ V(PreParseArrowFunctionLiteral) \
+ V(PreParseBackgroundArrowFunctionLiteral) \
+ V(PreParseBackgroundNoVariableResolution) \
+ V(PreParseBackgroundWithVariableResolution) \
+ V(PreParseNoVariableResolution) \
+ V(PreParseWithVariableResolution) \
+ V(PropertyCallback) \
+ V(PrototypeMap_TransitionToAccessorProperty) \
+ V(PrototypeMap_TransitionToDataProperty) \
+ V(PrototypeObject_DeleteProperty) \
+ V(RecompileConcurrent) \
+ V(RecompileSynchronous) \
+ V(ReconfigureToDataProperty) \
+ V(StringLengthGetter) \
+ V(TestCounter1) \
+ V(TestCounter2) \
+ V(TestCounter3)
#define FOR_EACH_HANDLER_COUNTER(V) \
V(KeyedLoadIC_LoadIndexedStringStub) \
@@ -869,7 +867,7 @@ class RuntimeCallTimer final {
V(LoadIC_NonReceiver) \
V(LoadIC_Premonomorphic) \
V(LoadIC_SlowStub) \
- V(LoadIC_StringLengthStub) \
+ V(LoadIC_StringLength) \
V(StoreIC_HandlerCacheHit_Accessor) \
V(StoreIC_NonReceiver) \
V(StoreIC_Premonomorphic) \
@@ -939,6 +937,7 @@ class RuntimeCallStats final : public ZoneObject {
RuntimeCallTimer* current_timer() { return current_timer_.Value(); }
RuntimeCallCounter* current_counter() { return current_counter_.Value(); }
bool InUse() { return in_use_; }
+ bool IsCalledOnTheSameThread();
private:
// Top of a stack of active timers.
@@ -992,45 +991,48 @@ class RuntimeCallTimerScope {
DISALLOW_COPY_AND_ASSIGN(RuntimeCallTimerScope);
};
-#define HISTOGRAM_RANGE_LIST(HR) \
- /* Generic range histograms: HR(name, caption, min, max, num_buckets) */ \
- HR(detached_context_age_in_gc, V8.DetachedContextAgeInGC, 0, 20, 21) \
- HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6) \
- HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20) \
- HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7) \
- HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 21, 22) \
- HR(incremental_marking_sum, V8.GCIncrementalMarkingSum, 0, 10000, 101) \
- HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 21, 22) \
- HR(scavenge_reason, V8.GCScavengeReason, 0, 21, 22) \
- HR(young_generation_handling, V8.GCYoungGenerationHandling, 0, 2, 3) \
- /* Asm/Wasm. */ \
- HR(wasm_functions_per_asm_module, V8.WasmFunctionsPerModule.asm, 1, 100000, \
- 51) \
- HR(wasm_functions_per_wasm_module, V8.WasmFunctionsPerModule.wasm, 1, \
- 100000, 51) \
- HR(array_buffer_big_allocations, V8.ArrayBufferLargeAllocations, 0, 4096, \
- 13) \
- HR(array_buffer_new_size_failures, V8.ArrayBufferNewSizeFailures, 0, 4096, \
- 13) \
- HR(shared_array_allocations, V8.SharedArrayAllocationSizes, 0, 4096, 13) \
- HR(wasm_asm_function_size_bytes, V8.WasmFunctionSizeBytes.asm, 1, GB, 51) \
- HR(wasm_wasm_function_size_bytes, V8.WasmFunctionSizeBytes.wasm, 1, GB, 51) \
- HR(wasm_asm_module_size_bytes, V8.WasmModuleSizeBytes.asm, 1, GB, 51) \
- HR(wasm_wasm_module_size_bytes, V8.WasmModuleSizeBytes.wasm, 1, GB, 51) \
- HR(wasm_asm_min_mem_pages_count, V8.WasmMinMemPagesCount.asm, 1, 2 << 16, \
- 51) \
- HR(wasm_wasm_min_mem_pages_count, V8.WasmMinMemPagesCount.wasm, 1, 2 << 16, \
- 51) \
- HR(wasm_wasm_max_mem_pages_count, V8.WasmMaxMemPagesCount.wasm, 1, 2 << 16, \
- 51) \
- HR(wasm_decode_asm_module_peak_memory_bytes, \
- V8.WasmDecodeModulePeakMemoryBytes.asm, 1, GB, 51) \
- HR(wasm_decode_wasm_module_peak_memory_bytes, \
- V8.WasmDecodeModulePeakMemoryBytes.wasm, 1, GB, 51) \
- HR(asm_wasm_translation_peak_memory_bytes, \
- V8.AsmWasmTranslationPeakMemoryBytes, 1, GB, 51) \
- HR(wasm_compile_function_peak_memory_bytes, \
- V8.WasmCompileFunctionPeakMemoryBytes, 1, GB, 51)
+#define HISTOGRAM_RANGE_LIST(HR) \
+ /* Generic range histograms: HR(name, caption, min, max, num_buckets) */ \
+ HR(detached_context_age_in_gc, V8.DetachedContextAgeInGC, 0, 20, 21) \
+ HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6) \
+ HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20) \
+ HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7) \
+ HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 21, 22) \
+ HR(incremental_marking_sum, V8.GCIncrementalMarkingSum, 0, 10000, 101) \
+ HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 21, 22) \
+ HR(scavenge_reason, V8.GCScavengeReason, 0, 21, 22) \
+ HR(young_generation_handling, V8.GCYoungGenerationHandling, 0, 2, 3) \
+ /* Asm/Wasm. */ \
+ HR(wasm_functions_per_asm_module, V8.WasmFunctionsPerModule.asm, 1, 100000, \
+ 51) \
+ HR(wasm_functions_per_wasm_module, V8.WasmFunctionsPerModule.wasm, 1, \
+ 100000, 51) \
+ HR(array_buffer_big_allocations, V8.ArrayBufferLargeAllocations, 0, 4096, \
+ 13) \
+ HR(array_buffer_new_size_failures, V8.ArrayBufferNewSizeFailures, 0, 4096, \
+ 13) \
+ HR(shared_array_allocations, V8.SharedArrayAllocationSizes, 0, 4096, 13) \
+ HR(wasm_asm_function_size_bytes, V8.WasmFunctionSizeBytes.asm, 1, GB, 51) \
+ HR(wasm_wasm_function_size_bytes, V8.WasmFunctionSizeBytes.wasm, 1, GB, 51) \
+ HR(wasm_asm_module_size_bytes, V8.WasmModuleSizeBytes.asm, 1, GB, 51) \
+ HR(wasm_wasm_module_size_bytes, V8.WasmModuleSizeBytes.wasm, 1, GB, 51) \
+ HR(wasm_asm_min_mem_pages_count, V8.WasmMinMemPagesCount.asm, 1, 2 << 16, \
+ 51) \
+ HR(wasm_wasm_min_mem_pages_count, V8.WasmMinMemPagesCount.wasm, 1, 2 << 16, \
+ 51) \
+ HR(wasm_wasm_max_mem_pages_count, V8.WasmMaxMemPagesCount.wasm, 1, 2 << 16, \
+ 51) \
+ HR(wasm_decode_asm_module_peak_memory_bytes, \
+ V8.WasmDecodeModulePeakMemoryBytes.asm, 1, GB, 51) \
+ HR(wasm_decode_wasm_module_peak_memory_bytes, \
+ V8.WasmDecodeModulePeakMemoryBytes.wasm, 1, GB, 51) \
+ HR(asm_wasm_translation_peak_memory_bytes, \
+ V8.AsmWasmTranslationPeakMemoryBytes, 1, GB, 51) \
+ HR(wasm_compile_function_peak_memory_bytes, \
+ V8.WasmCompileFunctionPeakMemoryBytes, 1, GB, 51) \
+ HR(asm_module_size_bytes, V8.AsmModuleSizeBytes, 1, GB, 51) \
+ HR(asm_wasm_translation_throughput, V8.AsmWasmTranslationThroughput, 1, 100, \
+ 20)
#define HISTOGRAM_TIMER_LIST(HT) \
/* Garbage collection timers. */ \
@@ -1100,13 +1102,7 @@ class RuntimeCallTimerScope {
HP(external_fragmentation_code_space, \
V8.MemoryExternalFragmentationCodeSpace) \
HP(external_fragmentation_map_space, V8.MemoryExternalFragmentationMapSpace) \
- HP(external_fragmentation_lo_space, V8.MemoryExternalFragmentationLoSpace) \
- /* Percentages of heap committed to each space. */ \
- HP(heap_fraction_new_space, V8.MemoryHeapFractionNewSpace) \
- HP(heap_fraction_old_space, V8.MemoryHeapFractionOldSpace) \
- HP(heap_fraction_code_space, V8.MemoryHeapFractionCodeSpace) \
- HP(heap_fraction_map_space, V8.MemoryHeapFractionMapSpace) \
- HP(heap_fraction_lo_space, V8.MemoryHeapFractionLoSpace)
+ HP(external_fragmentation_lo_space, V8.MemoryExternalFragmentationLoSpace)
// Note: These use Histogram with options (min=1000, max=500000, buckets=50).
#define HISTOGRAM_LEGACY_MEMORY_LIST(HM) \
diff --git a/deps/v8/src/d8-console.cc b/deps/v8/src/d8-console.cc
index 74b9f6f475..5848883b8c 100644
--- a/deps/v8/src/d8-console.cc
+++ b/deps/v8/src/d8-console.cc
@@ -14,16 +14,11 @@ void WriteToFile(FILE* file, Isolate* isolate,
HandleScope handle_scope(isolate);
if (i != 0) fprintf(file, " ");
- // Explicitly catch potential exceptions in toString().
- v8::TryCatch try_catch(isolate);
Local<Value> arg = args[i];
Local<String> str_obj;
if (arg->IsSymbol()) arg = Local<Symbol>::Cast(arg)->Name();
- if (!arg->ToString(isolate->GetCurrentContext()).ToLocal(&str_obj)) {
- Shell::ReportException(isolate, &try_catch);
- return;
- }
+ if (!arg->ToString(isolate->GetCurrentContext()).ToLocal(&str_obj)) return;
v8::String::Utf8Value str(isolate, str_obj);
int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), file));
@@ -73,10 +68,7 @@ void D8Console::Time(const debug::ConsoleCallArguments& args,
Local<Value> arg = args[0];
Local<String> label;
v8::TryCatch try_catch(isolate_);
- if (!arg->ToString(isolate_->GetCurrentContext()).ToLocal(&label)) {
- Shell::ReportException(isolate_, &try_catch);
- return;
- }
+ if (!arg->ToString(isolate_->GetCurrentContext()).ToLocal(&label)) return;
v8::String::Utf8Value utf8(isolate_, label);
std::string string(*utf8);
auto find = timers_.find(string);
@@ -100,10 +92,7 @@ void D8Console::TimeEnd(const debug::ConsoleCallArguments& args,
Local<Value> arg = args[0];
Local<String> label;
v8::TryCatch try_catch(isolate_);
- if (!arg->ToString(isolate_->GetCurrentContext()).ToLocal(&label)) {
- Shell::ReportException(isolate_, &try_catch);
- return;
- }
+ if (!arg->ToString(isolate_->GetCurrentContext()).ToLocal(&label)) return;
v8::String::Utf8Value utf8(isolate_, label);
std::string string(*utf8);
auto find = timers_.find(string);
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 564472a06c..8a7b922ebb 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -41,6 +41,10 @@
#include "src/utils.h"
#include "src/v8.h"
+#if defined(LEAK_SANITIZER)
+#include <sanitizer/lsan_interface.h>
+#endif
+
#if !defined(_WIN32) && !defined(_WIN64)
#include <unistd.h> // NOLINT
#else
@@ -138,7 +142,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
void Free(void* data, size_t length) override {
#if USE_VM
if (RoundToPageSize(&length)) {
- base::VirtualMemory::ReleaseRegion(data, length);
+ base::OS::ReleaseRegion(data, length);
return;
}
#endif
@@ -156,11 +160,14 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
}
#if USE_VM
void* VirtualMemoryAllocate(size_t length) {
- void* data = base::VirtualMemory::ReserveRegion(length, nullptr);
- if (data && !base::VirtualMemory::CommitRegion(data, length, false)) {
- base::VirtualMemory::ReleaseRegion(data, length);
+ void* data = base::OS::ReserveRegion(length, nullptr);
+ if (data && !base::OS::CommitRegion(data, length, false)) {
+ base::OS::ReleaseRegion(data, length);
return nullptr;
}
+#if defined(LEAK_SANITIZER)
+ __lsan_register_root_region(data, length);
+#endif
MSAN_MEMORY_IS_INITIALIZED(data, length);
return data;
}
@@ -228,6 +235,10 @@ class PredictablePlatform : public Platform {
return synthetic_time_in_sec_ += 0.00001;
}
+ double CurrentClockTimeMillis() override {
+ return MonotonicallyIncreasingTime() * base::Time::kMillisecondsPerSecond;
+ }
+
v8::TracingController* GetTracingController() override {
return platform_->GetTracingController();
}
@@ -391,7 +402,6 @@ static platform::tracing::TraceConfig* CreateTraceConfigFromJSON(
class PerIsolateData {
public:
explicit PerIsolateData(Isolate* isolate) : isolate_(isolate), realms_(NULL) {
- HandleScope scope(isolate);
isolate->SetData(0, this);
}
@@ -411,6 +421,25 @@ class PerIsolateData {
PerIsolateData* data_;
};
+ inline void SetTimeout(Local<Function> callback, Local<Context> context) {
+ set_timeout_callbacks_.emplace(isolate_, callback);
+ set_timeout_contexts_.emplace(isolate_, context);
+ }
+
+ inline MaybeLocal<Function> GetTimeoutCallback() {
+ if (set_timeout_callbacks_.empty()) return MaybeLocal<Function>();
+ Local<Function> result = set_timeout_callbacks_.front().Get(isolate_);
+ set_timeout_callbacks_.pop();
+ return result;
+ }
+
+ inline MaybeLocal<Context> GetTimeoutContext() {
+ if (set_timeout_contexts_.empty()) return MaybeLocal<Context>();
+ Local<Context> result = set_timeout_contexts_.front().Get(isolate_);
+ set_timeout_contexts_.pop();
+ return result;
+ }
+
private:
friend class Shell;
friend class RealmScope;
@@ -420,6 +449,8 @@ class PerIsolateData {
int realm_switch_;
Global<Context>* realms_;
Global<Value> realm_shared_;
+ std::queue<Global<Function>> set_timeout_callbacks_;
+ std::queue<Global<Context>> set_timeout_contexts_;
int RealmIndexOrThrow(const v8::FunctionCallbackInfo<v8::Value>& args,
int arg_offset);
@@ -1290,6 +1321,14 @@ void Shell::Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
+void Shell::SetTimeout(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ args.GetReturnValue().Set(v8::Number::New(isolate, 0));
+ if (args.Length() == 0 || !args[0]->IsFunction()) return;
+ Local<Function> callback = Local<Function>::Cast(args[0]);
+ Local<Context> context = isolate->GetCurrentContext();
+ PerIsolateData::Get(isolate)->SetTimeout(callback, context);
+}
void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
@@ -1513,7 +1552,7 @@ Counter* CounterCollection::GetNextCounter() {
void Shell::MapCounters(v8::Isolate* isolate, const char* name) {
counters_file_ = base::OS::MemoryMappedFile::create(
- name, sizeof(CounterCollection), &local_counters_);
+ name, nullptr, sizeof(CounterCollection), &local_counters_);
void* memory = (counters_file_ == NULL) ?
NULL : counters_file_->memory();
if (memory == NULL) {
@@ -1641,6 +1680,10 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
String::NewFromUtf8(isolate, "load", NewStringType::kNormal)
.ToLocalChecked(),
FunctionTemplate::New(isolate, Load));
+ global_template->Set(
+ String::NewFromUtf8(isolate, "setTimeout", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, SetTimeout));
// Some Emscripten-generated code tries to call 'quit', which in turn would
// call C's exit(). This would lead to memory leaks, because there is no way
// we can terminate cleanly then, so we need a way to hide 'quit'.
@@ -2229,7 +2272,7 @@ class InspectorFrontend final : public v8_inspector::V8Inspector::Channel {
v8::NewStringType::kNormal)
.ToLocalChecked();
Local<Value> value = exception->Get(context, key).ToLocalChecked();
- CHECK(value->StrictEquals(expected));
+ DCHECK(value->StrictEquals(expected));
}
#endif
}
@@ -2393,9 +2436,9 @@ void SourceGroup::ExecuteInThread() {
next_semaphore_.Wait();
{
Isolate::Scope iscope(isolate);
+ PerIsolateData data(isolate);
{
HandleScope scope(isolate);
- PerIsolateData data(isolate);
Local<Context> context = Shell::CreateEvaluationContext(isolate);
{
Context::Scope cscope(context);
@@ -2738,6 +2781,12 @@ bool Shell::SetOptions(int argc, char* argv[]) {
}
}
+// On x64 Linux we want to enable the Wasm trap handler by default. Setting
+// the flag here allows the command line argument to still override it.
+#if V8_OS_LINUX && V8_TARGET_ARCH_X64
+ SetFlagsFromString("--wasm-trap-handler");
+#endif
+
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
// Set up isolated source groups.
@@ -2778,10 +2827,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
{
EnsureEventLoopInitialized(isolate);
if (options.lcov_file) {
- debug::Coverage::Mode mode = i::FLAG_block_coverage
- ? debug::Coverage::kBlockCount
- : debug::Coverage::kPreciseCount;
- debug::Coverage::SelectMode(isolate, mode);
+ debug::Coverage::SelectMode(isolate, debug::Coverage::kBlockCount);
}
HandleScope scope(isolate);
Local<Context> context = CreateEvaluationContext(isolate);
@@ -2844,39 +2890,48 @@ void Shell::SetWaitUntilDone(Isolate* isolate, bool value) {
}
}
-bool Shell::IsWaitUntilDone(Isolate* isolate) {
- base::LockGuard<base::Mutex> guard(isolate_status_lock_.Pointer());
- DCHECK_GT(isolate_status_.count(isolate), 0);
- return isolate_status_[isolate];
+namespace {
+void ProcessMessages(Isolate* isolate,
+ std::function<platform::MessageLoopBehavior()> behavior) {
+ Platform* platform = GetDefaultPlatform();
+ while (true) {
+ while (v8::platform::PumpMessageLoop(platform, isolate, behavior())) {
+ isolate->RunMicrotasks();
+ }
+ if (platform->IdleTasksEnabled(isolate)) {
+ v8::platform::RunIdleTasks(platform, isolate,
+ 50.0 / base::Time::kMillisecondsPerSecond);
+ }
+ HandleScope handle_scope(isolate);
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ Local<Function> callback;
+ if (!data->GetTimeoutCallback().ToLocal(&callback)) break;
+ Local<Context> context;
+ if (!data->GetTimeoutContext().ToLocal(&context)) break;
+ TryCatch try_catch(isolate);
+ try_catch.SetVerbose(true);
+ Context::Scope context_scope(context);
+ if (callback->Call(context, Undefined(isolate), 0, nullptr).IsEmpty()) {
+ Shell::ReportException(isolate, &try_catch);
+ return;
+ }
+ }
}
+} // anonymous namespace
void Shell::CompleteMessageLoop(Isolate* isolate) {
- Platform* platform = GetDefaultPlatform();
- while (v8::platform::PumpMessageLoop(
- platform, isolate,
- Shell::IsWaitUntilDone(isolate)
- ? platform::MessageLoopBehavior::kWaitForWork
- : platform::MessageLoopBehavior::kDoNotWait)) {
- isolate->RunMicrotasks();
- }
- if (platform->IdleTasksEnabled(isolate)) {
- v8::platform::RunIdleTasks(platform, isolate,
- 50.0 / base::Time::kMillisecondsPerSecond);
- }
+ ProcessMessages(isolate, [isolate]() {
+ base::LockGuard<base::Mutex> guard(isolate_status_lock_.Pointer());
+ DCHECK_GT(isolate_status_.count(isolate), 0);
+ return isolate_status_[isolate]
+ ? platform::MessageLoopBehavior::kWaitForWork
+ : platform::MessageLoopBehavior::kDoNotWait;
+ });
}
void Shell::EmptyMessageQueues(Isolate* isolate) {
- Platform* platform = GetDefaultPlatform();
- // Pump the message loop until it is empty.
- while (v8::platform::PumpMessageLoop(
- platform, isolate, platform::MessageLoopBehavior::kDoNotWait)) {
- isolate->RunMicrotasks();
- }
- // Run the idle tasks.
- if (platform->IdleTasksEnabled(isolate)) {
- v8::platform::RunIdleTasks(platform, isolate,
- 50.0 / base::Time::kMillisecondsPerSecond);
- }
+ ProcessMessages(isolate,
+ []() { return platform::MessageLoopBehavior::kDoNotWait; });
}
class Serializer : public ValueSerializer::Delegate {
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index c0922bc595..53c498b57d 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -408,6 +408,7 @@ class Shell : public i::AllStatic {
args.GetReturnValue().Set(ReadFromStdin(args.GetIsolate()));
}
static void Load(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void SetTimeout(const v8::FunctionCallbackInfo<v8::Value>& args);
static void WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args);
static void WorkerPostMessage(
const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -462,7 +463,6 @@ class Shell : public i::AllStatic {
static ArrayBuffer::Allocator* array_buffer_allocator;
static void SetWaitUntilDone(Isolate* isolate, bool value);
- static bool IsWaitUntilDone(Isolate* isolate);
static char* ReadCharsFromTcpPort(const char* name, int* size_out);
diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js
index e49c6b7458..287571b937 100644
--- a/deps/v8/src/d8.js
+++ b/deps/v8/src/d8.js
@@ -37,11 +37,13 @@ function Stringify(x, depth) {
case "boolean":
case "number":
case "function":
+ case "symbol":
return x.toString();
case "string":
return "\"" + x.toString() + "\"";
- case "symbol":
- return x.toString();
+ case "bigint":
+ // TODO(neis): Use x.toString() once we have it.
+ return String(x) + "n";
case "object":
if (IS_NULL(x)) return "null";
if (x.constructor && x.constructor.name === "Array") {
diff --git a/deps/v8/src/date.cc b/deps/v8/src/date.cc
index 06f5f4ffb1..40a96e0190 100644
--- a/deps/v8/src/date.cc
+++ b/deps/v8/src/date.cc
@@ -310,7 +310,7 @@ int DateCache::DaylightSavingsOffsetInMs(int64_t time_ms) {
}
// Binary search for daylight savings offset change point,
- // but give up if we don't find it in four iterations.
+ // but give up if we don't find it in five iterations.
for (int i = 4; i >= 0; --i) {
int delta = after_->start_sec - before_->end_sec;
int middle_sec = (i == 0) ? time_sec : before_->end_sec + delta / 2;
@@ -332,7 +332,7 @@ int DateCache::DaylightSavingsOffsetInMs(int64_t time_ms) {
}
}
}
- UNREACHABLE();
+ return 0;
}
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index e1ba0782ef..8fe2edc08a 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -6,6 +6,7 @@
#include "src/ast/ast.h"
#include "src/base/hashmap.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/isolate.h"
@@ -60,15 +61,14 @@ bool CompareSharedFunctionInfo(SharedFunctionInfo* a, SharedFunctionInfo* b) {
}
bool CompareCoverageBlock(const CoverageBlock& a, const CoverageBlock& b) {
- DCHECK(a.start != kNoSourcePosition);
- DCHECK(b.start != kNoSourcePosition);
+ DCHECK_NE(kNoSourcePosition, a.start);
+ DCHECK_NE(kNoSourcePosition, b.start);
if (a.start == b.start) return a.end > b.end;
return a.start < b.start;
}
std::vector<CoverageBlock> GetSortedBlockData(Isolate* isolate,
SharedFunctionInfo* shared) {
- DCHECK(FLAG_block_coverage);
DCHECK(shared->HasCoverageInfo());
CoverageInfo* coverage_info =
@@ -82,7 +82,7 @@ std::vector<CoverageBlock> GetSortedBlockData(Isolate* isolate,
const int until_pos = coverage_info->EndSourcePosition(i);
const int count = coverage_info->BlockCount(i);
- DCHECK(start_pos != kNoSourcePosition);
+ DCHECK_NE(kNoSourcePosition, start_pos);
result.emplace_back(start_pos, until_pos, count);
}
@@ -324,7 +324,6 @@ void ClampToBinary(CoverageFunction* function) {
}
void ResetAllBlockCounts(SharedFunctionInfo* shared) {
- DCHECK(FLAG_block_coverage);
DCHECK(shared->HasCoverageInfo());
CoverageInfo* coverage_info =
@@ -348,7 +347,6 @@ bool IsBlockMode(debug::Coverage::Mode mode) {
void CollectBlockCoverage(Isolate* isolate, CoverageFunction* function,
SharedFunctionInfo* info,
debug::Coverage::Mode mode) {
- DCHECK(FLAG_block_coverage);
DCHECK(IsBlockMode(mode));
function->has_block_coverage = true;
@@ -380,9 +378,10 @@ void CollectBlockCoverage(Isolate* isolate, CoverageFunction* function,
}
} // anonymous namespace
-Coverage* Coverage::CollectPrecise(Isolate* isolate) {
+std::unique_ptr<Coverage> Coverage::CollectPrecise(Isolate* isolate) {
DCHECK(!isolate->is_best_effort_code_coverage());
- Coverage* result = Collect(isolate, isolate->code_coverage_mode());
+ std::unique_ptr<Coverage> result =
+ Collect(isolate, isolate->code_coverage_mode());
if (isolate->is_precise_binary_code_coverage() ||
isolate->is_block_binary_code_coverage()) {
// We do not have to hold onto feedback vectors for invocations we already
@@ -392,12 +391,12 @@ Coverage* Coverage::CollectPrecise(Isolate* isolate) {
return result;
}
-Coverage* Coverage::CollectBestEffort(Isolate* isolate) {
+std::unique_ptr<Coverage> Coverage::CollectBestEffort(Isolate* isolate) {
return Collect(isolate, v8::debug::Coverage::kBestEffort);
}
-Coverage* Coverage::Collect(Isolate* isolate,
- v8::debug::Coverage::Mode collectionMode) {
+std::unique_ptr<Coverage> Coverage::Collect(
+ Isolate* isolate, v8::debug::Coverage::Mode collectionMode) {
SharedToCounterMap counter_map;
const bool reset_count = collectionMode != v8::debug::Coverage::kBestEffort;
@@ -439,7 +438,7 @@ Coverage* Coverage::Collect(Isolate* isolate,
// Iterate shared function infos of every script and build a mapping
// between source ranges and invocation counts.
- Coverage* result = new Coverage();
+ std::unique_ptr<Coverage> result(new Coverage());
Script::Iterator scripts(isolate);
while (Script* script = scripts.Next()) {
if (!script->IsUserJavaScript()) continue;
@@ -491,8 +490,7 @@ Coverage* Coverage::Collect(Isolate* isolate,
Handle<String> name(info->DebugName(), isolate);
CoverageFunction function(start, end, count, name);
- if (FLAG_block_coverage && IsBlockMode(collectionMode) &&
- info->HasCoverageInfo()) {
+ if (IsBlockMode(collectionMode) && info->HasCoverageInfo()) {
CollectBlockCoverage(isolate, &function, info, collectionMode);
}
@@ -521,7 +519,7 @@ void Coverage::SelectMode(Isolate* isolate, debug::Coverage::Mode mode) {
// recording is stopped. Since we delete coverage infos at that point, any
// following coverage recording (without reloads) will be at function
// granularity.
- if (FLAG_block_coverage) isolate->debug()->RemoveAllCoverageInfos();
+ isolate->debug()->RemoveAllCoverageInfos();
isolate->SetCodeCoverageList(isolate->heap()->undefined_value());
break;
case debug::Coverage::kBlockBinary:
@@ -546,6 +544,9 @@ void Coverage::SelectMode(Isolate* isolate, debug::Coverage::Mode mode) {
if (!shared->IsSubjectToDebugging()) continue;
vector->clear_invocation_count();
vectors.emplace_back(vector, isolate);
+ } else if (current_obj->IsJSFunction()) {
+ JSFunction* function = JSFunction::cast(current_obj);
+ function->set_code(function->shared()->code());
}
}
}
diff --git a/deps/v8/src/debug/debug-coverage.h b/deps/v8/src/debug/debug-coverage.h
index 49e3d60f21..359a813375 100644
--- a/deps/v8/src/debug/debug-coverage.h
+++ b/deps/v8/src/debug/debug-coverage.h
@@ -51,17 +51,17 @@ class Coverage : public std::vector<CoverageScript> {
// In case of kPreciseCount, an updated count since last collection is
// returned. In case of kPreciseBinary, a count of 1 is returned if a
// function has been executed for the first time since last collection.
- static Coverage* CollectPrecise(Isolate* isolate);
+ static std::unique_ptr<Coverage> CollectPrecise(Isolate* isolate);
// Collecting best effort coverage always works, but may be imprecise
// depending on selected mode. The invocation count is not reset.
- static Coverage* CollectBestEffort(Isolate* isolate);
+ static std::unique_ptr<Coverage> CollectBestEffort(Isolate* isolate);
// Select code coverage mode.
static void SelectMode(Isolate* isolate, debug::Coverage::Mode mode);
private:
- static Coverage* Collect(Isolate* isolate,
- v8::debug::Coverage::Mode collectionMode);
+ static std::unique_ptr<Coverage> Collect(
+ Isolate* isolate, v8::debug::Coverage::Mode collectionMode);
Coverage() {}
};
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 2cc0b25d8a..3c89809356 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -5,6 +5,7 @@
#include "src/debug/debug-evaluate.h"
#include "src/accessors.h"
+#include "src/assembler-inl.h"
#include "src/compiler.h"
#include "src/contexts.h"
#include "src/debug/debug-frames.h"
@@ -157,8 +158,8 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
Handle<StringSet> non_locals = it.GetNonLocals();
MaterializeReceiver(materialized, local_context, local_function,
non_locals);
- frame_inspector.MaterializeStackLocals(materialized, local_function);
- MaterializeArgumentsObject(materialized, local_function);
+ frame_inspector.MaterializeStackLocals(materialized, local_function,
+ true);
ContextChainElement context_chain_element;
context_chain_element.scope_info = it.CurrentScopeInfo();
context_chain_element.materialized_object = materialized;
@@ -168,7 +169,7 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
if (it.HasContext()) {
context_chain_element.wrapped_context = it.CurrentContext();
}
- context_chain_.Add(context_chain_element);
+ context_chain_.push_back(context_chain_element);
evaluation_context_ = outer_context;
break;
} else if (scope_type == ScopeIterator::ScopeTypeCatch ||
@@ -179,7 +180,7 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
if (!current_context->IsDebugEvaluateContext()) {
context_chain_element.wrapped_context = current_context;
}
- context_chain_.Add(context_chain_element);
+ context_chain_.push_back(context_chain_element);
} else if (scope_type == ScopeIterator::ScopeTypeBlock ||
scope_type == ScopeIterator::ScopeTypeEval) {
Handle<JSObject> materialized = factory->NewJSObjectWithNullProto();
@@ -191,28 +192,29 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
if (it.HasContext()) {
context_chain_element.wrapped_context = it.CurrentContext();
}
- context_chain_.Add(context_chain_element);
+ context_chain_.push_back(context_chain_element);
} else {
break;
}
}
- for (int i = context_chain_.length() - 1; i >= 0; i--) {
+ for (auto rit = context_chain_.rbegin(); rit != context_chain_.rend();
+ rit++) {
+ ContextChainElement element = *rit;
Handle<ScopeInfo> scope_info(ScopeInfo::CreateForWithScope(
isolate, evaluation_context_->IsNativeContext()
? Handle<ScopeInfo>::null()
: Handle<ScopeInfo>(evaluation_context_->scope_info())));
scope_info->SetIsDebugEvaluateScope();
evaluation_context_ = factory->NewDebugEvaluateContext(
- evaluation_context_, scope_info, context_chain_[i].materialized_object,
- context_chain_[i].wrapped_context, context_chain_[i].whitelist);
+ evaluation_context_, scope_info, element.materialized_object,
+ element.wrapped_context, element.whitelist);
}
}
void DebugEvaluate::ContextBuilder::UpdateValues() {
- for (int i = 0; i < context_chain_.length(); i++) {
- ContextChainElement element = context_chain_[i];
+ for (ContextChainElement& element : context_chain_) {
if (!element.materialized_object.is_null()) {
// Write back potential changes to materialized stack locals to the stack.
FrameInspector(frame_, inlined_jsframe_index_, isolate_)
@@ -223,24 +225,6 @@ void DebugEvaluate::ContextBuilder::UpdateValues() {
}
-void DebugEvaluate::ContextBuilder::MaterializeArgumentsObject(
- Handle<JSObject> target, Handle<JSFunction> function) {
- // Do not materialize the arguments object for eval or top-level code.
- // Skip if "arguments" is already taken.
- if (function->shared()->is_toplevel()) return;
- Maybe<bool> maybe = JSReceiver::HasOwnProperty(
- target, isolate_->factory()->arguments_string());
- DCHECK(maybe.IsJust());
- if (maybe.FromJust()) return;
-
- // FunctionGetArguments can't throw an exception.
- Handle<JSObject> arguments = Accessors::FunctionGetArguments(function);
- Handle<String> arguments_str = isolate_->factory()->arguments_string();
- JSObject::SetOwnPropertyIgnoreAttributes(target, arguments_str, arguments,
- NONE)
- .Check();
-}
-
void DebugEvaluate::ContextBuilder::MaterializeReceiver(
Handle<JSObject> target, Handle<Context> local_context,
Handle<JSFunction> local_function, Handle<StringSet> non_locals) {
@@ -268,6 +252,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ToString) \
V(ToLength) \
V(ToNumber) \
+ V(NumberToString) \
/* Type checks */ \
V(IsJSReceiver) \
V(IsSmi) \
@@ -298,12 +283,16 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(NewTypeError) \
V(ThrowInvalidStringLength) \
/* Strings */ \
- V(StringCharCodeAt) \
V(StringIndexOf) \
V(StringIncludes) \
V(StringReplaceOneCharWithString) \
+ V(StringToNumber) \
+ V(StringTrim) \
V(SubString) \
V(RegExpInternalReplace) \
+ /* BigInts */ \
+ V(BigIntEqual) \
+ V(BigIntToBoolean) \
/* Literals */ \
V(CreateArrayLiteral) \
V(CreateObjectLiteral) \
@@ -311,11 +300,13 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
/* Collections */ \
V(GenericHash) \
/* Called from builtins */ \
+ V(StringAdd) \
V(StringParseFloat) \
V(StringParseInt) \
- V(StringCharCodeAtRT) \
+ V(StringCharCodeAt) \
V(StringIndexOfUnchecked) \
V(StringEqual) \
+ V(RegExpInitializeAndCompile) \
V(SymbolDescriptiveString) \
V(GenerateRandomNumbers) \
V(GlobalPrint) \
@@ -332,8 +323,10 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ThrowIncompatibleMethodReceiver) \
V(ThrowInvalidHint) \
V(ThrowNotDateError) \
+ V(ThrowRangeError) \
+ V(ToName) \
+ V(GetOwnPropertyDescriptor) \
/* Misc. */ \
- V(ForInPrepare) \
V(Call) \
V(MaxSmi) \
V(NewObject) \
@@ -385,8 +378,10 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
case Bytecode::kDivSmi:
case Bytecode::kMod:
case Bytecode::kModSmi:
+ case Bytecode::kNegate:
case Bytecode::kBitwiseAnd:
case Bytecode::kBitwiseAndSmi:
+ case Bytecode::kBitwiseNot:
case Bytecode::kBitwiseOr:
case Bytecode::kBitwiseOrSmi:
case Bytecode::kBitwiseXor:
@@ -437,6 +432,7 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
case Bytecode::kToNumber:
case Bytecode::kToName:
// Misc.
+ case Bytecode::kForInEnumerate:
case Bytecode::kForInPrepare:
case Bytecode::kForInContinue:
case Bytecode::kForInNext:
@@ -465,6 +461,7 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
switch (id) {
// Whitelist for builtins.
// Object builtins.
+ case Builtins::kObjectConstructor:
case Builtins::kObjectCreate:
case Builtins::kObjectEntries:
case Builtins::kObjectGetOwnPropertyDescriptor:
@@ -531,7 +528,7 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kDatePrototypeValueOf:
// Map builtins.
case Builtins::kMapConstructor:
- case Builtins::kMapGet:
+ case Builtins::kMapPrototypeGet:
case Builtins::kMapPrototypeEntries:
case Builtins::kMapPrototypeGetSize:
case Builtins::kMapPrototypeKeys:
@@ -594,18 +591,32 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kStringFromCharCode:
case Builtins::kStringFromCodePoint:
case Builtins::kStringConstructor:
+ case Builtins::kStringPrototypeAnchor:
+ case Builtins::kStringPrototypeBig:
+ case Builtins::kStringPrototypeBlink:
+ case Builtins::kStringPrototypeBold:
case Builtins::kStringPrototypeCharAt:
case Builtins::kStringPrototypeCharCodeAt:
case Builtins::kStringPrototypeCodePointAt:
case Builtins::kStringPrototypeConcat:
case Builtins::kStringPrototypeEndsWith:
+ case Builtins::kStringPrototypeFixed:
+ case Builtins::kStringPrototypeFontcolor:
+ case Builtins::kStringPrototypeFontsize:
case Builtins::kStringPrototypeIncludes:
case Builtins::kStringPrototypeIndexOf:
+ case Builtins::kStringPrototypeItalics:
case Builtins::kStringPrototypeLastIndexOf:
+ case Builtins::kStringPrototypeLink:
+ case Builtins::kStringPrototypeRepeat:
case Builtins::kStringPrototypeSlice:
+ case Builtins::kStringPrototypeSmall:
case Builtins::kStringPrototypeStartsWith:
+ case Builtins::kStringPrototypeStrike:
+ case Builtins::kStringPrototypeSub:
case Builtins::kStringPrototypeSubstr:
case Builtins::kStringPrototypeSubstring:
+ case Builtins::kStringPrototypeSup:
case Builtins::kStringPrototypeToString:
#ifndef V8_INTL_SUPPORT
case Builtins::kStringPrototypeToLowerCase:
@@ -615,6 +626,7 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kStringPrototypeTrimLeft:
case Builtins::kStringPrototypeTrimRight:
case Builtins::kStringPrototypeValueOf:
+ case Builtins::kStringToNumber:
// Symbol builtins.
case Builtins::kSymbolConstructor:
case Builtins::kSymbolKeyFor:
@@ -695,10 +707,16 @@ bool DebugEvaluate::FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info) {
return true;
} else {
// Check built-ins against whitelist.
- int builtin_index = info->code()->builtin_index();
+ int builtin_index = info->HasLazyDeserializationBuiltinId()
+ ? info->lazy_deserialization_builtin_id()
+ : info->code()->builtin_index();
+ DCHECK_NE(Builtins::kDeserializeLazy, builtin_index);
if (builtin_index >= 0 && builtin_index < Builtins::builtin_count &&
BuiltinHasNoSideEffect(static_cast<Builtins::Name>(builtin_index))) {
#ifdef DEBUG
+ if (info->code()->builtin_index() == Builtins::kDeserializeLazy) {
+ return true; // Target builtin is not yet deserialized.
+ }
// TODO(yangguo): Check builtin-to-builtin calls too.
int mode = RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE);
bool failed = false;
@@ -712,7 +730,7 @@ bool DebugEvaluate::FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info) {
Builtins::name(builtin_index), function->name);
failed = true;
}
- CHECK(!failed);
+ DCHECK(!failed);
}
#endif // DEBUG
return true;
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index ba6ca7e439..6327895d57 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -5,6 +5,8 @@
#ifndef V8_DEBUG_DEBUG_EVALUATE_H_
#define V8_DEBUG_DEBUG_EVALUATE_H_
+#include <vector>
+
#include "src/frames.h"
#include "src/objects.h"
#include "src/objects/string-table.h"
@@ -66,11 +68,6 @@ class DebugEvaluate : public AllStatic {
Handle<StringSet> whitelist;
};
- // Helper function to find or create the arguments object for
- // Runtime_DebugEvaluate.
- void MaterializeArgumentsObject(Handle<JSObject> target,
- Handle<JSFunction> function);
-
void MaterializeReceiver(Handle<JSObject> target,
Handle<Context> local_context,
Handle<JSFunction> local_function,
@@ -78,7 +75,7 @@ class DebugEvaluate : public AllStatic {
Handle<SharedFunctionInfo> outer_info_;
Handle<Context> evaluation_context_;
- List<ContextChainElement> context_chain_;
+ std::vector<ContextChainElement> context_chain_;
Isolate* isolate_;
JavaScriptFrame* frame_;
int inlined_jsframe_index_;
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index 9017bcab0e..b04f8fc1bc 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -4,9 +4,10 @@
#include "src/debug/debug-frames.h"
+#include "src/accessors.h"
#include "src/frames-inl.h"
#include "src/wasm/wasm-interpreter.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
namespace internal {
@@ -37,7 +38,7 @@ FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index,
// Calculate the deoptimized frame.
if (is_optimized_) {
- DCHECK(js_frame != nullptr);
+ DCHECK_NOT_NULL(js_frame);
// TODO(turbofan): Deoptimization from AstGraphBuilder is not supported.
if (js_frame->LookupCode()->is_turbofanned() &&
!js_frame->function()->shared()->HasBytecodeArray()) {
@@ -108,7 +109,8 @@ void FrameInspector::SetArgumentsFrame(StandardFrame* frame) {
// Create a plain JSObject which materializes the local scope for the specified
// frame.
void FrameInspector::MaterializeStackLocals(Handle<JSObject> target,
- Handle<ScopeInfo> scope_info) {
+ Handle<ScopeInfo> scope_info,
+ bool materialize_arguments_object) {
HandleScope scope(isolate_);
// First fill all parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
@@ -139,18 +141,41 @@ void FrameInspector::MaterializeStackLocals(Handle<JSObject> target,
value = isolate_->factory()->undefined_value();
}
if (value->IsOptimizedOut(isolate_)) {
+ if (materialize_arguments_object) {
+ Handle<String> arguments_str = isolate_->factory()->arguments_string();
+ if (String::Equals(name, arguments_str)) continue;
+ }
value = isolate_->factory()->undefined_value();
}
JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
}
}
-
void FrameInspector::MaterializeStackLocals(Handle<JSObject> target,
- Handle<JSFunction> function) {
+ Handle<JSFunction> function,
+ bool materialize_arguments_object) {
+ // Do not materialize the arguments object for eval or top-level code.
+ if (function->shared()->is_toplevel()) materialize_arguments_object = false;
+
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
- MaterializeStackLocals(target, scope_info);
+ MaterializeStackLocals(target, scope_info, materialize_arguments_object);
+
+ // Third materialize the arguments object.
+ if (materialize_arguments_object) {
+ // Skip if "arguments" is already taken and wasn't optimized out (which
+ // causes {MaterializeStackLocals} above to skip the local variable).
+ Handle<String> arguments_str = isolate_->factory()->arguments_string();
+ Maybe<bool> maybe = JSReceiver::HasOwnProperty(target, arguments_str);
+ DCHECK(maybe.IsJust());
+ if (maybe.FromJust()) return;
+
+ // FunctionGetArguments can't throw an exception.
+ Handle<JSObject> arguments = Accessors::FunctionGetArguments(function);
+ JSObject::SetOwnPropertyIgnoreAttributes(target, arguments_str, arguments,
+ NONE)
+ .Check();
+ }
}
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index 0a0d4329b9..96593b858d 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -49,10 +49,12 @@ class FrameInspector {
void SetArgumentsFrame(StandardFrame* frame);
void MaterializeStackLocals(Handle<JSObject> target,
- Handle<ScopeInfo> scope_info);
+ Handle<ScopeInfo> scope_info,
+ bool materialize_arguments_object = false);
void MaterializeStackLocals(Handle<JSObject> target,
- Handle<JSFunction> function);
+ Handle<JSFunction> function,
+ bool materialize_arguments_object = false);
void UpdateStackLocalsFromMaterializedObject(Handle<JSObject> object,
Handle<ScopeInfo> scope_info);
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 78001524b2..cc321ebfa2 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -18,9 +18,12 @@ namespace internal {
struct CoverageBlock;
struct CoverageFunction;
struct CoverageScript;
+struct TypeProfileEntry;
+struct TypeProfileScript;
class Coverage;
class Script;
-}
+class TypeProfile;
+} // namespace internal
namespace debug {
@@ -244,6 +247,8 @@ class GeneratorObject {
*/
class V8_EXPORT_PRIVATE Coverage {
public:
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Coverage);
+
enum Mode {
// Make use of existing information in feedback vectors on the heap.
// Only return a yes/no result. Optimization and GC are not affected.
@@ -269,19 +274,27 @@ class V8_EXPORT_PRIVATE Coverage {
class V8_EXPORT_PRIVATE BlockData {
public:
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(BlockData);
+
int StartOffset() const;
int EndOffset() const;
uint32_t Count() const;
private:
- explicit BlockData(i::CoverageBlock* block) : block_(block) {}
+ explicit BlockData(i::CoverageBlock* block,
+ std::shared_ptr<i::Coverage> coverage)
+ : block_(block), coverage_(std::move(coverage)) {}
+
i::CoverageBlock* block_;
+ std::shared_ptr<i::Coverage> coverage_;
friend class v8::debug::Coverage::FunctionData;
};
class V8_EXPORT_PRIVATE FunctionData {
public:
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(FunctionData);
+
int StartOffset() const;
int EndOffset() const;
uint32_t Count() const;
@@ -291,22 +304,29 @@ class V8_EXPORT_PRIVATE Coverage {
BlockData GetBlockData(size_t i) const;
private:
- explicit FunctionData(i::CoverageFunction* function)
- : function_(function) {}
+ explicit FunctionData(i::CoverageFunction* function,
+ std::shared_ptr<i::Coverage> coverage)
+ : function_(function), coverage_(std::move(coverage)) {}
+
i::CoverageFunction* function_;
+ std::shared_ptr<i::Coverage> coverage_;
friend class v8::debug::Coverage::ScriptData;
};
class V8_EXPORT_PRIVATE ScriptData {
public:
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(ScriptData);
+
Local<debug::Script> GetScript() const;
size_t FunctionCount() const;
FunctionData GetFunctionData(size_t i) const;
private:
- explicit ScriptData(i::CoverageScript* script) : script_(script) {}
+ explicit ScriptData(size_t index, std::shared_ptr<i::Coverage> c);
+
i::CoverageScript* script_;
+ std::shared_ptr<i::Coverage> coverage_;
friend class v8::debug::Coverage;
};
@@ -320,11 +340,72 @@ class V8_EXPORT_PRIVATE Coverage {
ScriptData GetScriptData(size_t i) const;
bool IsEmpty() const { return coverage_ == nullptr; }
- ~Coverage();
+ private:
+ explicit Coverage(std::shared_ptr<i::Coverage> coverage)
+ : coverage_(std::move(coverage)) {}
+ std::shared_ptr<i::Coverage> coverage_;
+};
+
+/*
+ * Provide API layer between inspector and type profile.
+ */
+class V8_EXPORT_PRIVATE TypeProfile {
+ public:
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(TypeProfile);
+
+ enum Mode {
+ kNone,
+ kCollect,
+ };
+ class ScriptData; // Forward declaration.
+
+ class V8_EXPORT_PRIVATE Entry {
+ public:
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Entry);
+
+ int SourcePosition() const;
+ std::vector<MaybeLocal<String>> Types() const;
+
+ private:
+ explicit Entry(const i::TypeProfileEntry* entry,
+ std::shared_ptr<i::TypeProfile> type_profile)
+ : entry_(entry), type_profile_(std::move(type_profile)) {}
+
+ const i::TypeProfileEntry* entry_;
+ std::shared_ptr<i::TypeProfile> type_profile_;
+
+ friend class v8::debug::TypeProfile::ScriptData;
+ };
+
+ class V8_EXPORT_PRIVATE ScriptData {
+ public:
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(ScriptData);
+
+ Local<debug::Script> GetScript() const;
+ std::vector<Entry> Entries() const;
+
+ private:
+ explicit ScriptData(size_t index,
+ std::shared_ptr<i::TypeProfile> type_profile);
+
+ i::TypeProfileScript* script_;
+ std::shared_ptr<i::TypeProfile> type_profile_;
+
+ friend class v8::debug::TypeProfile;
+ };
+
+ static TypeProfile Collect(Isolate* isolate);
+
+ static void SelectMode(Isolate* isolate, Mode mode);
+
+ size_t ScriptCount() const;
+ ScriptData GetScriptData(size_t i) const;
private:
- explicit Coverage(i::Coverage* coverage) : coverage_(coverage) {}
- i::Coverage* coverage_;
+ explicit TypeProfile(std::shared_ptr<i::TypeProfile> type_profile)
+ : type_profile_(std::move(type_profile)) {}
+
+ std::shared_ptr<i::TypeProfile> type_profile_;
};
class ScopeIterator {
diff --git a/deps/v8/src/debug/debug-scope-iterator.cc b/deps/v8/src/debug/debug-scope-iterator.cc
index 62c622853a..2e06dccab6 100644
--- a/deps/v8/src/debug/debug-scope-iterator.cc
+++ b/deps/v8/src/debug/debug-scope-iterator.cc
@@ -4,20 +4,33 @@
#include "src/debug/debug-scope-iterator.h"
+#include "src/api.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
#include "src/isolate.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
std::unique_ptr<debug::ScopeIterator> debug::ScopeIterator::CreateForFunction(
v8::Isolate* v8_isolate, v8::Local<v8::Function> v8_func) {
+ internal::Handle<internal::JSReceiver> receiver =
+ internal::Handle<internal::JSReceiver>::cast(Utils::OpenHandle(*v8_func));
+
+ // Besides JSFunction and JSBoundFunction, {v8_func} could be an
+ // ObjectTemplate with a CallAsFunctionHandler. We only handle plain
+ // JSFunctions.
+ if (!receiver->IsJSFunction()) return nullptr;
+
+ internal::Handle<internal::JSFunction> function =
+ internal::Handle<internal::JSFunction>::cast(receiver);
+
+ // Blink has function objects with callable map, JS_SPECIAL_API_OBJECT_TYPE
+ // but without context on heap.
+ if (!function->has_context()) return nullptr;
return std::unique_ptr<debug::ScopeIterator>(new internal::DebugScopeIterator(
- reinterpret_cast<internal::Isolate*>(v8_isolate),
- internal::Handle<internal::JSFunction>::cast(
- Utils::OpenHandle(*v8_func))));
+ reinterpret_cast<internal::Isolate*>(v8_isolate), function));
}
std::unique_ptr<debug::ScopeIterator>
@@ -26,7 +39,6 @@ debug::ScopeIterator::CreateForGeneratorObject(
internal::Handle<internal::Object> generator =
Utils::OpenHandle(*v8_generator);
DCHECK(generator->IsJSGeneratorObject());
-
return std::unique_ptr<debug::ScopeIterator>(new internal::DebugScopeIterator(
reinterpret_cast<internal::Isolate*>(v8_isolate),
internal::Handle<internal::JSGeneratorObject>::cast(generator)));
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 5c6775ccf9..0fcb20a645 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -23,7 +23,6 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
ScopeIterator::Option option)
: isolate_(isolate),
frame_inspector_(frame_inspector),
- nested_scope_chain_(4),
seen_script_scope_(false) {
if (!frame_inspector->GetContext()->IsContext()) {
// Optimized frame, context or function cannot be materialized. Give up.
@@ -83,9 +82,9 @@ void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) {
}
}
if (scope_info->scope_type() == FUNCTION_SCOPE) {
- nested_scope_chain_.Add(ExtendedScopeInfo(scope_info,
- shared_info->start_position(),
- shared_info->end_position()));
+ nested_scope_chain_.emplace_back(scope_info,
+ shared_info->start_position(),
+ shared_info->end_position());
}
if (!collect_non_locals) return;
}
@@ -245,7 +244,7 @@ void ScopeIterator::Next() {
}
if (HasNestedScopeChain()) {
DCHECK_EQ(LastNestedScopeChain().scope_info->scope_type(), SCRIPT_SCOPE);
- nested_scope_chain_.RemoveLast();
+ nested_scope_chain_.pop_back();
DCHECK(!HasNestedScopeChain());
}
CHECK(context_->IsNativeContext());
@@ -257,7 +256,7 @@ void ScopeIterator::Next() {
DCHECK(context_->previous() != NULL);
context_ = Handle<Context>(context_->previous(), isolate_);
}
- nested_scope_chain_.RemoveLast();
+ nested_scope_chain_.pop_back();
if (!HasNestedScopeChain()) break;
// Repeat to skip hidden scopes.
} while (LastNestedScopeChain().is_hidden());
@@ -331,7 +330,7 @@ MaybeHandle<JSObject> ScopeIterator::ScopeObject() {
return MaterializeScriptScope();
case ScopeIterator::ScopeTypeLocal:
// Materialize the content of the local scope into a JSObject.
- DCHECK(nested_scope_chain_.length() == 1);
+ DCHECK_EQ(1, nested_scope_chain_.size());
return MaterializeLocalScope();
case ScopeIterator::ScopeTypeWith:
return WithContextExtension();
@@ -409,7 +408,7 @@ Handle<Context> ScopeIterator::CurrentContext() {
} else if (LastNestedScopeChain().scope_info->HasContext()) {
return context_;
} else {
- return Handle<Context>();
+ return Handle<Context>::null();
}
}
@@ -965,10 +964,10 @@ void ScopeIterator::GetNestedScopeChain(Isolate* isolate, Scope* scope,
if (scope->is_hidden()) {
// We need to add this chain element in case the scope has a context
// associated. We need to keep the scope chain and context chain in sync.
- nested_scope_chain_.Add(ExtendedScopeInfo(scope->scope_info()));
+ nested_scope_chain_.emplace_back(scope->scope_info());
} else {
- nested_scope_chain_.Add(ExtendedScopeInfo(
- scope->scope_info(), scope->start_position(), scope->end_position()));
+ nested_scope_chain_.emplace_back(
+ scope->scope_info(), scope->start_position(), scope->end_position());
}
for (Scope* inner_scope = scope->inner_scope(); inner_scope != nullptr;
inner_scope = inner_scope->sibling()) {
@@ -983,12 +982,12 @@ void ScopeIterator::GetNestedScopeChain(Isolate* isolate, Scope* scope,
}
bool ScopeIterator::HasNestedScopeChain() {
- return !nested_scope_chain_.is_empty();
+ return !nested_scope_chain_.empty();
}
ScopeIterator::ExtendedScopeInfo& ScopeIterator::LastNestedScopeChain() {
DCHECK(HasNestedScopeChain());
- return nested_scope_chain_.last();
+ return nested_scope_chain_.back();
}
} // namespace internal
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index 76e083fe87..9321b8f995 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -5,6 +5,8 @@
#ifndef V8_DEBUG_DEBUG_SCOPES_H_
#define V8_DEBUG_DEBUG_SCOPES_H_
+#include <vector>
+
#include "src/debug/debug-frames.h"
#include "src/frames.h"
@@ -101,7 +103,7 @@ class ScopeIterator {
FrameInspector* const frame_inspector_ = nullptr;
Handle<JSGeneratorObject> generator_;
Handle<Context> context_;
- List<ExtendedScopeInfo> nested_scope_chain_;
+ std::vector<ExtendedScopeInfo> nested_scope_chain_;
Handle<StringSet> non_locals_;
bool seen_script_scope_;
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index 74ba3373d4..867436d1de 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -4,6 +4,7 @@
#include "src/debug/debug-stack-trace-iterator.h"
+#include "src/api.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-scope-iterator.h"
#include "src/debug/debug.h"
diff --git a/deps/v8/src/debug/debug-type-profile.cc b/deps/v8/src/debug/debug-type-profile.cc
new file mode 100644
index 0000000000..ef4f5ba3d7
--- /dev/null
+++ b/deps/v8/src/debug/debug-type-profile.cc
@@ -0,0 +1,102 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/debug/debug-type-profile.h"
+
+#include "src/feedback-vector.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+std::unique_ptr<TypeProfile> TypeProfile::Collect(Isolate* isolate) {
+ std::unique_ptr<TypeProfile> result(new TypeProfile());
+
+ // Collect existing feedback vectors.
+ std::vector<Handle<FeedbackVector>> feedback_vectors;
+ {
+ HeapIterator heap_iterator(isolate->heap());
+ while (HeapObject* current_obj = heap_iterator.next()) {
+ if (current_obj->IsFeedbackVector()) {
+ FeedbackVector* vector = FeedbackVector::cast(current_obj);
+ SharedFunctionInfo* shared = vector->shared_function_info();
+ if (!shared->IsSubjectToDebugging()) continue;
+ feedback_vectors.emplace_back(vector, isolate);
+ }
+ }
+ }
+
+ Script::Iterator scripts(isolate);
+
+ while (Script* script = scripts.Next()) {
+ if (!script->IsUserJavaScript()) {
+ continue;
+ }
+
+ Handle<Script> script_handle(script, isolate);
+
+ TypeProfileScript type_profile_script(script_handle);
+ std::vector<TypeProfileEntry>* entries = &type_profile_script.entries;
+
+ for (const auto& vector : feedback_vectors) {
+ SharedFunctionInfo* info = vector->shared_function_info();
+ DCHECK(info->IsSubjectToDebugging());
+
+ // Match vectors with script.
+ if (script != info->script()) {
+ continue;
+ }
+ if (info->feedback_metadata()->is_empty() ||
+ !info->feedback_metadata()->HasTypeProfileSlot()) {
+ continue;
+ }
+ FeedbackSlot slot = vector->GetTypeProfileSlot();
+ CollectTypeProfileNexus nexus(vector, slot);
+ Handle<String> name(info->DebugName(), isolate);
+ std::vector<int> source_positions = nexus.GetSourcePositions();
+ for (int position : source_positions) {
+ DCHECK_GE(position, 0);
+ entries->emplace_back(position, nexus.GetTypesForSourcePositions(
+ static_cast<uint32_t>(position)));
+ }
+
+ // Releases type profile data collected so far.
+ nexus.Clear();
+ }
+ if (!entries->empty()) {
+ result->emplace_back(type_profile_script);
+ }
+ }
+ return result;
+}
+
+void TypeProfile::SelectMode(Isolate* isolate, debug::TypeProfile::Mode mode) {
+ isolate->set_type_profile_mode(mode);
+ HandleScope handle_scope(isolate);
+
+ if (mode == debug::TypeProfile::Mode::kNone) {
+ // Release type profile data collected so far.
+ {
+ HeapIterator heap_iterator(isolate->heap());
+ while (HeapObject* current_obj = heap_iterator.next()) {
+ if (current_obj->IsFeedbackVector()) {
+ FeedbackVector* vector = FeedbackVector::cast(current_obj);
+ SharedFunctionInfo* info = vector->shared_function_info();
+ if (!info->IsSubjectToDebugging() ||
+ info->feedback_metadata()->is_empty() ||
+ !info->feedback_metadata()->HasTypeProfileSlot())
+ continue;
+ FeedbackSlot slot = vector->GetTypeProfileSlot();
+ CollectTypeProfileNexus nexus(vector, slot);
+ nexus.Clear();
+ }
+ }
+ }
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/debug/debug-type-profile.h b/deps/v8/src/debug/debug-type-profile.h
new file mode 100644
index 0000000000..de18951381
--- /dev/null
+++ b/deps/v8/src/debug/debug-type-profile.h
@@ -0,0 +1,45 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_DEBUG_TYPE_PROFILE_H_
+#define V8_DEBUG_DEBUG_TYPE_PROFILE_H_
+
+#include <vector>
+
+#include "src/debug/debug-interface.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declaration.
+class Isolate;
+
+struct TypeProfileEntry {
+ explicit TypeProfileEntry(
+ int pos, std::vector<v8::internal::Handle<internal::String>> t)
+ : position(pos), types(std::move(t)) {}
+ int position;
+ std::vector<v8::internal::Handle<internal::String>> types;
+};
+
+struct TypeProfileScript {
+ explicit TypeProfileScript(Handle<Script> s) : script(s) {}
+ Handle<Script> script;
+ std::vector<TypeProfileEntry> entries;
+};
+
+class TypeProfile : public std::vector<TypeProfileScript> {
+ public:
+ static std::unique_ptr<TypeProfile> Collect(Isolate* isolate);
+ static void SelectMode(Isolate* isolate, debug::TypeProfile::Mode mode);
+
+ private:
+ TypeProfile() {}
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEBUG_DEBUG_TYPE_PROFILE_H_
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 966be62e63..1d50226e72 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -24,13 +24,11 @@
#include "src/globals.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
-#include "src/list.h"
#include "src/log.h"
#include "src/messages.h"
#include "src/objects/debug-objects-inl.h"
#include "src/snapshot/natives.h"
-#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "include/v8-debug.h"
@@ -64,9 +62,9 @@ BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
return it.GetBreakLocation();
}
-void BreakLocation::AllAtCurrentStatement(Handle<DebugInfo> debug_info,
- JavaScriptFrame* frame,
- List<BreakLocation>* result_out) {
+void BreakLocation::AllAtCurrentStatement(
+ Handle<DebugInfo> debug_info, JavaScriptFrame* frame,
+ std::vector<BreakLocation>* result_out) {
auto summary = FrameSummary::GetTop(frame).AsJavaScript();
int offset = summary.code_offset();
Handle<AbstractCode> abstract_code = summary.abstract_code();
@@ -79,7 +77,7 @@ void BreakLocation::AllAtCurrentStatement(Handle<DebugInfo> debug_info,
}
for (BreakIterator it(debug_info); !it.Done(); it.Next()) {
if (it.statement_position() == statement_position) {
- result_out->Add(it.GetBreakLocation());
+ result_out->push_back(it.GetBreakLocation());
}
}
}
@@ -170,8 +168,8 @@ void BreakIterator::Next() {
if (source_position_iterator_.is_statement()) {
statement_position_ = position_;
}
- DCHECK(position_ >= 0);
- DCHECK(statement_position_ >= 0);
+ DCHECK_LE(0, position_);
+ DCHECK_LE(0, statement_position_);
DebugBreakType type = GetDebugBreakType();
if (type != NOT_DEBUG_BREAK) break;
@@ -338,7 +336,7 @@ bool Debug::Load() {
void Debug::Unload() {
ClearAllBreakPoints();
ClearStepping();
- if (FLAG_block_coverage) RemoveAllCoverageInfos();
+ RemoveAllCoverageInfos();
RemoveDebugDelegate();
// Return debugger is not loaded.
@@ -469,10 +467,10 @@ bool Debug::IsMutedAtCurrentLocation(JavaScriptFrame* frame) {
// Enter the debugger.
DebugScope debug_scope(this);
if (debug_scope.failed()) return false;
- List<BreakLocation> break_locations;
+ std::vector<BreakLocation> break_locations;
BreakLocation::AllAtCurrentStatement(debug_info, frame, &break_locations);
bool has_break_points_at_all = false;
- for (int i = 0; i < break_locations.length(); i++) {
+ for (size_t i = 0; i < break_locations.size(); i++) {
bool has_break_points;
MaybeHandle<FixedArray> check_result =
CheckBreakPoints(debug_info, &break_locations[i], &has_break_points);
@@ -557,16 +555,16 @@ bool Debug::SetBreakPoint(Handle<JSFunction> function,
// Make sure the function is compiled and has set up the debug info.
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureBreakInfo(shared)) return true;
- CHECK(PrepareFunctionForBreakPoints(shared));
+ PrepareFunctionForBreakPoints(shared);
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
// Source positions starts with zero.
- DCHECK(*source_position >= 0);
+ DCHECK_LE(0, *source_position);
// Find the break point and change it.
*source_position = FindBreakablePosition(debug_info, *source_position);
DebugInfo::SetBreakPoint(debug_info, *source_position, break_point_object);
// At least one active break point now.
- DCHECK(debug_info->GetBreakPointCount() > 0);
+ DCHECK_LT(0, debug_info->GetBreakPointCount());
ClearBreakPoints(debug_info);
ApplyBreakPoints(debug_info);
@@ -595,7 +593,7 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
// Make sure the function has set up the debug info.
Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast(result);
if (!EnsureBreakInfo(shared)) return false;
- CHECK(PrepareFunctionForBreakPoints(shared));
+ PrepareFunctionForBreakPoints(shared);
// Find position within function. The script position might be before the
// source position of the first function.
@@ -609,7 +607,7 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
*source_position = FindBreakablePosition(debug_info, *source_position);
DebugInfo::SetBreakPoint(debug_info, *source_position, break_point_object);
// At least one active break point now.
- DCHECK(debug_info->GetBreakPointCount() > 0);
+ DCHECK_LT(0, debug_info->GetBreakPointCount());
ClearBreakPoints(debug_info);
ApplyBreakPoints(debug_info);
@@ -700,7 +698,7 @@ void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared,
if (IsBlackboxed(shared)) return;
// Make sure the function is compiled and has set up the debug info.
if (!EnsureBreakInfo(shared)) return;
- CHECK(PrepareFunctionForBreakPoints(shared));
+ PrepareFunctionForBreakPoints(shared);
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
// Flood the function with break points.
DCHECK(debug_info->HasDebugBytecodeArray());
@@ -1041,10 +1039,8 @@ class RedirectActiveFunctions : public ThreadVisitor {
for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
JSFunction* function = frame->function();
- if (frame->is_optimized()) continue;
- if (!function->Inlines(shared_)) continue;
-
- DCHECK(frame->is_interpreted());
+ if (!frame->is_interpreted()) continue;
+ if (function->shared() != shared_) continue;
InterpretedFrame* interpreted_frame =
reinterpret_cast<InterpretedFrame*>(frame);
BytecodeArray* debug_copy = shared_->GetDebugInfo()->DebugBytecodeArray();
@@ -1057,17 +1053,9 @@ class RedirectActiveFunctions : public ThreadVisitor {
DisallowHeapAllocation no_gc_;
};
-
-bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
- // To prepare bytecode for debugging, we already need to have the debug
- // info (containing the debug copy) upfront, but since we do not recompile,
- // preparing for break points cannot fail.
- DCHECK(shared->is_compiled());
- DCHECK(shared->HasDebugInfo());
- DCHECK(shared->HasBreakInfo());
- Handle<DebugInfo> debug_info = GetOrCreateDebugInfo(shared);
- if (debug_info->IsPreparedForBreakpoints()) return true;
-
+void Debug::DeoptimizeFunction(Handle<SharedFunctionInfo> shared) {
+ // Deoptimize all code compiled from this shared function info including
+ // inlining.
if (isolate_->concurrent_recompilation_enabled()) {
isolate_->optimizing_compile_dispatcher()->Flush(
OptimizingCompileDispatcher::BlockingBehavior::kBlock);
@@ -1077,28 +1065,32 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
GarbageCollectionReason::kDebugger);
- DCHECK(shared->is_compiled());
- {
- // TODO(yangguo): with bytecode, we still walk the heap to find all
- // optimized code for the function to deoptimize. We can probably be
- // smarter here and avoid the heap walk.
- HeapIterator iterator(isolate_->heap());
- HeapObject* obj;
-
- while ((obj = iterator.next()) != nullptr) {
- if (obj->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(obj);
- if (!function->Inlines(*shared)) continue;
- if (function->has_feedback_vector()) {
- function->ClearOptimizedCodeSlot("Prepare for breakpoints");
- }
- if (function->code()->kind() == Code::OPTIMIZED_FUNCTION) {
- Deoptimizer::DeoptimizeFunction(function);
- }
- }
+ bool found_something = false;
+ Code::OptimizedCodeIterator iterator(isolate_);
+ while (Code* code = iterator.Next()) {
+ if (code->Inlines(*shared)) {
+ code->set_marked_for_deoptimization(true);
+ found_something = true;
}
}
+ if (found_something) {
+ // Only go through with the deoptimization if something was found.
+ Deoptimizer::DeoptimizeMarkedCode(isolate_);
+ }
+}
+
+void Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
+ // To prepare bytecode for debugging, we already need to have the debug
+ // info (containing the debug copy) upfront, but since we do not recompile,
+ // preparing for break points cannot fail.
+ DCHECK(shared->is_compiled());
+ DCHECK(shared->HasDebugInfo());
+ DCHECK(shared->HasBreakInfo());
+ Handle<DebugInfo> debug_info = GetOrCreateDebugInfo(shared);
+ if (debug_info->IsPreparedForBreakpoints()) return;
+
+ DeoptimizeFunction(shared);
// Update PCs on the stack to point to recompiled code.
RedirectActiveFunctions redirect_visitor(*shared);
redirect_visitor.VisitThread(isolate_, isolate_->thread_local_top());
@@ -1106,7 +1098,6 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
debug_info->set_flags(debug_info->flags() |
DebugInfo::kPreparedForBreakpoints);
- return true;
}
namespace {
@@ -1353,7 +1344,6 @@ Handle<DebugInfo> Debug::GetOrCreateDebugInfo(
void Debug::InstallCoverageInfo(Handle<SharedFunctionInfo> shared,
Handle<CoverageInfo> coverage_info) {
- DCHECK(FLAG_block_coverage);
DCHECK(!coverage_info.is_null());
Handle<DebugInfo> debug_info = GetOrCreateDebugInfo(shared);
@@ -1365,7 +1355,6 @@ void Debug::InstallCoverageInfo(Handle<SharedFunctionInfo> shared,
}
void Debug::RemoveAllCoverageInfos() {
- DCHECK(FLAG_block_coverage);
ClearAllDebugInfos(
[=](Handle<DebugInfo> info) { return info->ClearCoverageInfo(); });
}
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 9601fa7899..154c381729 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -5,6 +5,8 @@
#ifndef V8_DEBUG_DEBUG_H_
#define V8_DEBUG_DEBUG_H_
+#include <vector>
+
#include "src/allocation.h"
#include "src/assembler.h"
#include "src/base/atomicops.h"
@@ -70,7 +72,7 @@ class BreakLocation {
static void AllAtCurrentStatement(Handle<DebugInfo> debug_info,
JavaScriptFrame* frame,
- List<BreakLocation>* result_out);
+ std::vector<BreakLocation>* result_out);
inline bool IsReturn() const { return type_ == DEBUG_BREAK_SLOT_AT_RETURN; }
inline bool IsCall() const { return type_ == DEBUG_BREAK_SLOT_AT_CALL; }
@@ -250,7 +252,8 @@ class Debug {
void ClearStepping();
void ClearStepOut();
- bool PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared);
+ void DeoptimizeFunction(Handle<SharedFunctionInfo> shared);
+ void PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared);
bool GetPossibleBreakpoints(Handle<Script> script, int start_position,
int end_position, bool restrict_to_function,
std::vector<BreakLocation>* locations);
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index ac5229b419..e20e56cd75 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -163,7 +163,7 @@ class Differencer {
// Each cell keeps a value plus direction. Value is multiplied by 4.
void set_value4_and_dir(int i1, int i2, int value4, Direction dir) {
- DCHECK((value4 & kDirectionMask) == 0);
+ DCHECK_EQ(0, value4 & kDirectionMask);
get_cell(i1, i2) = value4 | dir;
}
@@ -806,41 +806,6 @@ class FeedbackVectorFixer {
};
-// Marks code that shares the same shared function info or has inlined
-// code that shares the same function info.
-class DependentFunctionMarker: public OptimizedFunctionVisitor {
- public:
- SharedFunctionInfo* shared_info_;
- bool found_;
-
- explicit DependentFunctionMarker(SharedFunctionInfo* shared_info)
- : shared_info_(shared_info), found_(false) { }
-
- virtual void VisitFunction(JSFunction* function) {
- // It should be guaranteed by the iterator that everything is optimized.
- DCHECK(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
- if (function->Inlines(shared_info_)) {
- // Mark the code for deoptimization.
- function->code()->set_marked_for_deoptimization(true);
- found_ = true;
- }
- }
-};
-
-
-static void DeoptimizeDependentFunctions(SharedFunctionInfo* function_info) {
- DisallowHeapAllocation no_allocation;
- DependentFunctionMarker marker(function_info);
- // TODO(titzer): need to traverse all optimized code to find OSR code here.
- Deoptimizer::VisitAllOptimizedFunctions(function_info->GetIsolate(), &marker);
-
- if (marker.found_) {
- // Only go through with the deoptimization if something was found.
- Deoptimizer::DeoptimizeMarkedCode(function_info->GetIsolate());
- }
-}
-
-
void LiveEdit::ReplaceFunctionCode(
Handle<JSArray> new_compile_info_array,
Handle<JSArray> shared_info_array) {
@@ -889,8 +854,7 @@ void LiveEdit::ReplaceFunctionCode(
FeedbackVectorFixer::PatchFeedbackVector(&compile_info_wrapper, shared_info,
isolate);
- DeoptimizeDependentFunctions(*shared_info);
- isolate->compilation_cache()->Remove(shared_info);
+ isolate->debug()->DeoptimizeFunction(shared_info);
}
void LiveEdit::FunctionSourceUpdated(Handle<JSArray> shared_info_array,
@@ -899,8 +863,7 @@ void LiveEdit::FunctionSourceUpdated(Handle<JSArray> shared_info_array,
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
shared_info->set_function_literal_id(new_function_literal_id);
- DeoptimizeDependentFunctions(*shared_info);
- shared_info_array->GetIsolate()->compilation_cache()->Remove(shared_info);
+ shared_info_array->GetIsolate()->debug()->DeoptimizeFunction(shared_info);
}
void LiveEdit::FixupScript(Handle<Script> script, int max_function_literal_id) {
@@ -1022,11 +985,6 @@ void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array,
Handle<AbstractCode>(AbstractCode::cast(info->bytecode_array())),
position_change_array);
}
- if (info->code()->kind() == Code::FUNCTION) {
- TranslateSourcePositionTable(
- Handle<AbstractCode>(AbstractCode::cast(info->code())),
- position_change_array);
- }
if (info->HasBreakInfo()) {
// Existing break points will be re-applied. Reset the debug info here.
info->GetIsolate()->debug()->RemoveBreakInfoAndMaybeFree(
@@ -1126,7 +1084,9 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
Handle<SharedFunctionInfo> shared =
UnwrapSharedFunctionInfoFromJSValue(jsvalue);
- if (function->Inlines(*shared)) {
+ if (function->shared() == *shared ||
+ (function->code()->is_optimized_code() &&
+ function->code()->Inlines(*shared))) {
SetElementSloppy(result, i, Handle<Smi>(Smi::FromInt(status), isolate));
return true;
}
diff --git a/deps/v8/src/deoptimize-reason.h b/deps/v8/src/deoptimize-reason.h
index d9ac7af7ef..62bd4f68cf 100644
--- a/deps/v8/src/deoptimize-reason.h
+++ b/deps/v8/src/deoptimize-reason.h
@@ -28,16 +28,14 @@ namespace internal {
"Insufficient type feedback for construct") \
V(FastPathFailed, "Falling off the fast path") \
V(InsufficientTypeFeedbackForForIn, "Insufficient type feedback for for-in") \
- V(InsufficientTypeFeedbackForCombinedTypeOfBinaryOperation, \
- "Insufficient type feedback for combined type of binary operation") \
+ V(InsufficientTypeFeedbackForBinaryOperation, \
+ "Insufficient type feedback for binary operation") \
+ V(InsufficientTypeFeedbackForCompareOperation, \
+ "Insufficient type feedback for compare operation") \
V(InsufficientTypeFeedbackForGenericNamedAccess, \
"Insufficient type feedback for generic named access") \
V(InsufficientTypeFeedbackForGenericKeyedAccess, \
"Insufficient type feedback for generic keyed access") \
- V(InsufficientTypeFeedbackForLHSOfBinaryOperation, \
- "Insufficient type feedback for LHS of binary operation") \
- V(InsufficientTypeFeedbackForRHSOfBinaryOperation, \
- "Insufficient type feedback for RHS of binary operation") \
V(KeyIsNegative, "key is negative") \
V(LostPrecision, "lost precision") \
V(LostPrecisionOrNaN, "lost precision or NaN") \
@@ -68,7 +66,6 @@ namespace internal {
V(UnexpectedCellContentsInGlobalStore, \
"Unexpected cell contents in global store") \
V(UnexpectedObject, "unexpected object") \
- V(UnexpectedRHSOfBinaryOperation, "Unexpected RHS of binary operation") \
V(UnknownMapInPolymorphicAccess, "Unknown map in polymorphic access") \
V(UnknownMapInPolymorphicCall, "Unknown map in polymorphic call") \
V(UnknownMapInPolymorphicElementAccess, \
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 93a21a7b3a..125ca932f7 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -144,129 +144,59 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
generator.Generate();
}
-void Deoptimizer::VisitAllOptimizedFunctionsForContext(
- Context* context, OptimizedFunctionVisitor* visitor) {
- DisallowHeapAllocation no_allocation;
-
- CHECK(context->IsNativeContext());
-
- // Visit the list of optimized functions, removing elements that
- // no longer refer to optimized code.
- JSFunction* prev = NULL;
- Object* element = context->OptimizedFunctionsListHead();
- Isolate* isolate = context->GetIsolate();
- while (!element->IsUndefined(isolate)) {
- JSFunction* function = JSFunction::cast(element);
- Object* next = function->next_function_link();
- if (function->code()->kind() != Code::OPTIMIZED_FUNCTION ||
- (visitor->VisitFunction(function),
- function->code()->kind() != Code::OPTIMIZED_FUNCTION)) {
- // The function no longer refers to optimized code, or the visitor
- // changed the code to which it refers to no longer be optimized code.
- // Remove the function from this list.
- if (prev != NULL) {
- prev->set_next_function_link(next, UPDATE_WEAK_WRITE_BARRIER);
- } else {
- context->SetOptimizedFunctionsListHead(next);
- }
- // The visitor should not alter the link directly.
- CHECK_EQ(function->next_function_link(), next);
- // Set the next function link to undefined to indicate it is no longer
- // in the optimized functions list.
- function->set_next_function_link(context->GetHeap()->undefined_value(),
- SKIP_WRITE_BARRIER);
- } else {
- // The visitor should not alter the link directly.
- CHECK_EQ(function->next_function_link(), next);
- // preserve this element.
- prev = function;
- }
- element = next;
+namespace {
+class ActivationsFinder : public ThreadVisitor {
+ public:
+ explicit ActivationsFinder(std::set<Code*>* codes,
+ Code* topmost_optimized_code,
+ bool safe_to_deopt_topmost_optimized_code)
+ : codes_(codes) {
+#ifdef DEBUG
+ topmost_ = topmost_optimized_code;
+ safe_to_deopt_ = safe_to_deopt_topmost_optimized_code;
+#endif
}
-}
-void Deoptimizer::UnlinkOptimizedCode(Code* code, Context* native_context) {
- class CodeUnlinker : public OptimizedFunctionVisitor {
- public:
- explicit CodeUnlinker(Code* code) : code_(code) {}
-
- virtual void VisitFunction(JSFunction* function) {
- if (function->code() == code_) {
- if (FLAG_trace_deopt) {
- PrintF("[removing optimized code for: ");
- function->ShortPrint();
- PrintF("]\n");
+ // Find the frames with activations of codes marked for deoptimization, search
+ // for the trampoline to the deoptimizer call respective to each code, and use
+ // it to replace the current pc on the stack.
+ void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+ for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+ if (it.frame()->type() == StackFrame::OPTIMIZED) {
+ Code* code = it.frame()->LookupCode();
+ if (code->kind() == Code::OPTIMIZED_FUNCTION &&
+ code->marked_for_deoptimization()) {
+ codes_->erase(code);
+ // Obtain the trampoline to the deoptimizer call.
+ SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
+ int trampoline_pc = safepoint.trampoline_pc();
+ DCHECK_IMPLIES(code == topmost_, safe_to_deopt_);
+ // Replace the current pc on the stack with the trampoline.
+ it.frame()->set_pc(code->instruction_start() + trampoline_pc);
}
- function->set_code(function->shared()->code());
}
}
+ }
- private:
- Code* code_;
- };
- CodeUnlinker unlinker(code);
- VisitAllOptimizedFunctionsForContext(native_context, &unlinker);
-}
-
-
-void Deoptimizer::VisitAllOptimizedFunctions(
- Isolate* isolate,
- OptimizedFunctionVisitor* visitor) {
- DisallowHeapAllocation no_allocation;
+ private:
+ std::set<Code*>* codes_;
- // Run through the list of all native contexts.
- Object* context = isolate->heap()->native_contexts_list();
- while (!context->IsUndefined(isolate)) {
- VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor);
- context = Context::cast(context)->next_context_link();
- }
-}
+#ifdef DEBUG
+ Code* topmost_;
+ bool safe_to_deopt_;
+#endif
+};
+} // namespace
-// Unlink functions referring to code marked for deoptimization, then move
-// marked code from the optimized code list to the deoptimized code list,
+// Move marked code from the optimized code list to the deoptimized code list,
// and replace pc on the stack for codes marked for deoptimization.
void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
DisallowHeapAllocation no_allocation;
- // A "closure" that unlinks optimized code that is going to be
- // deoptimized from the functions that refer to it.
- class SelectedCodeUnlinker: public OptimizedFunctionVisitor {
- public:
- virtual void VisitFunction(JSFunction* function) {
- // The code in the function's optimized code feedback vector slot might
- // be different from the code on the function - evict it if necessary.
- function->feedback_vector()->EvictOptimizedCodeMarkedForDeoptimization(
- function->shared(), "unlinking code marked for deopt");
-
- Code* code = function->code();
- if (!code->marked_for_deoptimization()) return;
-
- // Unlink this function.
- if (!code->deopt_already_counted()) {
- function->feedback_vector()->increment_deopt_count();
- code->set_deopt_already_counted(true);
- }
-
- function->set_code(function->shared()->code());
-
- if (FLAG_trace_deopt) {
- CodeTracer::Scope scope(code->GetHeap()->isolate()->GetCodeTracer());
- PrintF(scope.file(), "[deoptimizer unlinked: ");
- function->PrintName(scope.file());
- PrintF(scope.file(),
- " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
- }
- }
- };
-
- // Unlink all functions that refer to marked code.
- SelectedCodeUnlinker unlinker;
- VisitAllOptimizedFunctionsForContext(context, &unlinker);
-
Isolate* isolate = context->GetHeap()->isolate();
-#ifdef DEBUG
Code* topmost_optimized_code = NULL;
bool safe_to_deopt_topmost_optimized_code = false;
+#ifdef DEBUG
// Make sure all activations of optimized code can deopt at their current PC.
// The topmost optimized code has special handling because it cannot be
// deoptimized due to weak object dependency.
@@ -294,8 +224,8 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
deopt_index != Safepoint::kNoDeoptimizationIndex ||
is_non_deoptimizing_asm_code;
bool is_builtin_code = code->kind() == Code::BUILTIN;
- CHECK(topmost_optimized_code == NULL || safe_if_deopt_triggered ||
- is_non_deoptimizing_asm_code || is_builtin_code);
+ DCHECK(topmost_optimized_code == NULL || safe_if_deopt_triggered ||
+ is_non_deoptimizing_asm_code || is_builtin_code);
if (topmost_optimized_code == NULL) {
topmost_optimized_code = code;
safe_to_deopt_topmost_optimized_code = safe_if_deopt_triggered;
@@ -304,6 +234,10 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
}
#endif
+ // We will use this set to mark those Code objects that are marked for
+ // deoptimization and have not been found in stack frames.
+ std::set<Code*> codes;
+
// Move marked code from the optimized code list to the deoptimized
// code list.
// Walk over all optimized code objects in this native context.
@@ -317,6 +251,8 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
if (code->marked_for_deoptimization()) {
// Make sure that this object does not point to any garbage.
code->InvalidateEmbeddedObjects();
+ codes.insert(code);
+
if (prev != NULL) {
// Skip this code in the optimized code list.
prev->set_next_code_link(next);
@@ -335,24 +271,22 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
element = next;
}
- // Finds the with activations of codes marked for deoptimization, search for
- // the trampoline to the deoptimizer call respective to each code, and use it
- // to replace the current pc on the stack.
- for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done();
- it.Advance()) {
- if (it.frame()->type() == StackFrame::OPTIMIZED) {
- Code* code = it.frame()->LookupCode();
- if (code->kind() == Code::OPTIMIZED_FUNCTION &&
- code->marked_for_deoptimization()) {
- // Obtain the trampoline to the deoptimizer call.
- SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
- int trampoline_pc = safepoint.trampoline_pc();
- DCHECK_IMPLIES(code == topmost_optimized_code,
- safe_to_deopt_topmost_optimized_code);
- // Replace the current pc on the stack with the trampoline.
- it.frame()->set_pc(code->instruction_start() + trampoline_pc);
- }
- }
+ ActivationsFinder visitor(&codes, topmost_optimized_code,
+ safe_to_deopt_topmost_optimized_code);
+ // Iterate over the stack of this thread.
+ visitor.VisitThread(isolate, isolate->thread_local_top());
+ // In addition to iterate over the stack of this thread, we also
+ // need to consider all the other threads as they may also use
+ // the code currently beings deoptimized.
+ isolate->thread_manager()->IterateArchivedThreads(&visitor);
+
+ // If there's no activation of a code in any stack then we can remove its
+ // deoptimization data. We do this to ensure that Code objects that will be
+ // unlinked won't be kept alive.
+ std::set<Code*>::iterator it;
+ for (it = codes.begin(); it != codes.end(); ++it) {
+ Code* code = *it;
+ code->set_deoptimization_data(isolate->heap()->empty_fixed_array());
}
}
@@ -397,7 +331,6 @@ void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
}
}
-
void Deoptimizer::MarkAllCodeForContext(Context* context) {
Object* element = context->OptimizedCodeListHead();
Isolate* isolate = context->GetIsolate();
@@ -422,6 +355,14 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function, Code* code) {
// refer to that code. The code cannot be shared across native contexts,
// so we only need to search one.
code->set_marked_for_deoptimization(true);
+ // The code in the function's optimized code feedback vector slot might
+ // be different from the code on the function - evict it if necessary.
+ function->feedback_vector()->EvictOptimizedCodeMarkedForDeoptimization(
+ function->shared(), "unlinking code marked for deopt");
+ if (!code->deopt_already_counted()) {
+ function->feedback_vector()->increment_deopt_count();
+ code->set_deopt_already_counted(true);
+ }
DeoptimizeMarkedCodeForContext(function->context()->native_context());
}
}
@@ -489,18 +430,13 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
DCHECK(from != nullptr);
compiled_code_ = FindOptimizedCode();
-#if DEBUG
DCHECK(compiled_code_ != NULL);
- if (type == EAGER || type == SOFT || type == LAZY) {
- DCHECK(compiled_code_->kind() != Code::FUNCTION);
- }
-#endif
DCHECK(function->IsJSFunction());
trace_scope_ =
FLAG_trace_deopt ? new CodeTracer::Scope(isolate->GetCodeTracer()) : NULL;
#ifdef DEBUG
- CHECK(AllowHeapAllocation::IsAllowed());
+ DCHECK(AllowHeapAllocation::IsAllowed());
disallow_heap_allocation_ = new DisallowHeapAllocation();
#endif // DEBUG
if (compiled_code_->kind() != Code::OPTIMIZED_FUNCTION ||
@@ -546,6 +482,12 @@ void Deoptimizer::PrintFunctionName() {
}
}
+Handle<JSFunction> Deoptimizer::function() const {
+ return Handle<JSFunction>(function_);
+}
+Handle<Code> Deoptimizer::compiled_code() const {
+ return Handle<Code>(compiled_code_);
+}
Deoptimizer::~Deoptimizer() {
DCHECK(input_ == NULL && output_ == NULL);
@@ -563,8 +505,8 @@ void Deoptimizer::DeleteFrameDescriptions() {
input_ = NULL;
output_ = NULL;
#ifdef DEBUG
- CHECK(!AllowHeapAllocation::IsAllowed());
- CHECK(disallow_heap_allocation_ != NULL);
+ DCHECK(!AllowHeapAllocation::IsAllowed());
+ DCHECK(disallow_heap_allocation_ != NULL);
delete disallow_heap_allocation_;
disallow_heap_allocation_ = NULL;
#endif // DEBUG
@@ -615,7 +557,9 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
while (!element->IsUndefined(isolate)) {
Code* code = Code::cast(element);
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
- length++;
+ if (!code->marked_for_deoptimization()) {
+ length++;
+ }
element = code->next_code_link();
}
context = Context::cast(context)->next_context_link();
@@ -629,9 +573,8 @@ int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) {
switch (translated_frame->kind()) {
case TranslatedFrame::kInterpretedFunction: {
int bytecode_offset = translated_frame->node_id().ToInt();
- JSFunction* function =
- JSFunction::cast(translated_frame->begin()->GetRawValue());
- BytecodeArray* bytecode = function->shared()->bytecode_array();
+ BytecodeArray* bytecode =
+ translated_frame->raw_shared_info()->bytecode_array();
HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
return table->LookupRange(bytecode_offset, data_out, nullptr);
}
@@ -773,11 +716,9 @@ void Deoptimizer::DoComputeOutputFrames() {
PrintFunctionName();
PrintF(trace_scope_->file(),
" @%d => node=%d, pc=0x%08" V8PRIxPTR ", caller sp=0x%08" V8PRIxPTR
- ", state=%s, took %0.3f ms]\n",
+ ", took %0.3f ms]\n",
bailout_id_, node_id.ToInt(), output_[index]->GetPc(),
- caller_frame_top_, BailoutStateToString(static_cast<BailoutState>(
- output_[index]->GetState()->value())),
- ms);
+ caller_frame_top_, ms);
}
}
@@ -792,16 +733,20 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
int input_index = 0;
int bytecode_offset = translated_frame->node_id().ToInt();
- unsigned height = translated_frame->height();
- unsigned height_in_bytes = height * kPointerSize;
+ int height = translated_frame->height();
+ int register_count = height - 1; // Exclude accumulator.
+ int register_stack_slot_count =
+ InterpreterFrameConstants::RegisterStackSlotCount(register_count);
+ int height_in_bytes = register_stack_slot_count * kPointerSize;
- // All tranlations for interpreted frames contain the accumulator and hence
- // are assumed to be in bailout state {BailoutState::TOS_REGISTER}. However
- // such a state is only supported for the topmost frame. We need to skip
- // pushing the accumulator for any non-topmost frame.
- if (!is_topmost) height_in_bytes -= kPointerSize;
+ // The topmost frame will contain the accumulator.
+ if (is_topmost) {
+ height_in_bytes += kPointerSize;
+ if (PadTopOfStackRegister()) height_in_bytes += kPointerSize;
+ }
- JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
+ TranslatedFrame::iterator function_iterator = value_iterator;
+ Object* function = value_iterator->GetRawValue();
value_iterator++;
input_index++;
if (trace_scope_ != NULL) {
@@ -941,6 +886,12 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
+ if (function == isolate_->heap()->arguments_marker()) {
+ Address output_address =
+ reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
+ output_offset;
+ values_to_materialize_.push_back({output_address, function_iterator});
+ }
// Set the bytecode array pointer.
output_offset -= kPointerSize;
@@ -968,17 +919,32 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
}
// Translate the rest of the interpreter registers in the frame.
- for (unsigned i = 0; i < height - 1; ++i) {
+ for (int i = 0; i < register_count; ++i) {
output_offset -= kPointerSize;
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
output_offset);
}
+ int register_slots_written = register_count;
+ DCHECK_LE(register_slots_written, register_stack_slot_count);
+ // Some architectures must pad the stack frame with extra stack slots
+ // to ensure the stack frame is aligned. Do this now.
+ while (register_slots_written < register_stack_slot_count) {
+ register_slots_written++;
+ output_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_offset, "padding ");
+ }
+
// Translate the accumulator register (depending on frame position).
if (is_topmost) {
- // For topmost frame, put the accumulator on the stack. The bailout state
- // for interpreted frames is always set to {BailoutState::TOS_REGISTER} and
- // the {NotifyDeoptimized} builtin pops it off the topmost frame (possibly
+ if (PadTopOfStackRegister()) {
+ output_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_offset, "padding ");
+ }
+ // For topmost frame, put the accumulator on the stack. The
+ // {NotifyDeoptimized} builtin pops it off the topmost frame (possibly
// after materialization).
output_offset -= kPointerSize;
if (goto_catch_handler) {
@@ -1011,9 +977,6 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
? builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance)
: builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
output_frame->SetPc(reinterpret_cast<intptr_t>(dispatch_builtin->entry()));
- // Restore accumulator (TOS) register.
- output_frame->SetState(
- Smi::FromInt(static_cast<int>(BailoutState::TOS_REGISTER)));
// Update constant pool.
if (FLAG_enable_embedded_constant_pool) {
@@ -1036,13 +999,6 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
output_frame->SetRegister(context_reg.code(), context_value);
// Set the continuation for the topmost frame.
Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
- if (bailout_type_ == LAZY) {
- continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
- } else if (bailout_type_ == SOFT) {
- continuation = builtins->builtin(Builtins::kNotifySoftDeoptimized);
- } else {
- CHECK_EQ(bailout_type_, EAGER);
- }
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
}
@@ -1056,7 +1012,8 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
unsigned height = translated_frame->height();
unsigned height_in_bytes = height * kPointerSize;
- JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
+ TranslatedFrame::iterator function_iterator = value_iterator;
+ Object* function = value_iterator->GetRawValue();
value_iterator++;
input_index++;
if (trace_scope_ != NULL) {
@@ -1142,6 +1099,12 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
+ if (function == isolate_->heap()->arguments_marker()) {
+ Address output_address =
+ reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
+ output_offset;
+ values_to_materialize_.push_back({output_address, function_iterator});
+ }
// Number of incoming arguments.
output_offset -= kPointerSize;
@@ -1190,10 +1153,11 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
// If the construct frame appears to be topmost we should ensure that the
// value of result register is preserved during continuation execution.
// We do this here by "pushing" the result of the constructor function to the
- // top of the reconstructed stack and then using the
- // BailoutState::TOS_REGISTER machinery.
+ // top of the reconstructed stack and popping it in
+ // {Builtins::kNotifyDeoptimized}.
if (is_topmost) {
height_in_bytes += kPointerSize;
+ if (PadTopOfStackRegister()) height_in_bytes += kPointerSize;
}
JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
@@ -1308,15 +1272,17 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
}
if (is_topmost) {
+ if (PadTopOfStackRegister()) {
+ output_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_offset, "padding ");
+ }
// Ensure the result is restored back when we return to the stub.
output_offset -= kPointerSize;
Register result_reg = kReturnRegister0;
value = input_->GetRegister(result_reg.code());
output_frame->SetFrameSlot(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset, "subcall result\n");
-
- output_frame->SetState(
- Smi::FromInt(static_cast<int>(BailoutState::TOS_REGISTER)));
}
CHECK_EQ(0u, output_offset);
@@ -1356,7 +1322,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
if (is_topmost) {
Builtins* builtins = isolate_->builtins();
DCHECK_EQ(LAZY, bailout_type_);
- Code* continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
+ Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
}
@@ -1385,13 +1351,14 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
// If the accessor frame appears to be topmost we should ensure that the
// value of result register is preserved during continuation execution.
// We do this here by "pushing" the result of the accessor function to the
- // top of the reconstructed stack and then using the
- // BailoutState::TOS_REGISTER machinery.
- // We don't need to restore the result in case of a setter call because we
- // have to return the stored value but not the result of the setter function.
- bool should_preserve_result = is_topmost && !is_setter_stub_frame;
- if (should_preserve_result) {
+ // top of the reconstructed stack and then popping it in
+ // {Builtins::kNotifyDeoptimized}.
+ // For setter calls, since the result register is going to be overwritten
+ // anyway in the stub, we store a dummy value to pop into the result register
+ // to keep the code simpler.
+ if (is_topmost) {
height_in_bytes += kPointerSize;
+ if (PadTopOfStackRegister()) height_in_bytes += kPointerSize;
}
const char* kind = is_setter_stub_frame ? "setter" : "getter";
@@ -1491,7 +1458,12 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
output_offset);
}
- if (should_preserve_result) {
+ if (is_topmost) {
+ if (PadTopOfStackRegister()) {
+ output_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_offset, "padding ");
+ }
// Ensure the result is restored back when we return to the stub.
output_offset -= kPointerSize;
Register result_reg = kReturnRegister0;
@@ -1499,12 +1471,6 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
output_frame->SetFrameSlot(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset,
"accessor result\n");
-
- output_frame->SetState(
- Smi::FromInt(static_cast<int>(BailoutState::TOS_REGISTER)));
- } else {
- output_frame->SetState(
- Smi::FromInt(static_cast<int>(BailoutState::NO_REGISTERS)));
}
CHECK_EQ(0u, output_offset);
@@ -1541,7 +1507,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
if (is_topmost) {
Builtins* builtins = isolate_->builtins();
DCHECK_EQ(LAZY, bailout_type_);
- Code* continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
+ Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
}
@@ -1667,20 +1633,19 @@ void Deoptimizer::DoComputeBuiltinContinuation(
}
output_frame->SetTop(top_address);
- output_frame->SetState(
- Smi::FromInt(static_cast<int>(BailoutState::NO_REGISTERS)));
-
// Get the possible JSFunction for the case that
intptr_t maybe_function =
reinterpret_cast<intptr_t>(value_iterator->GetRawValue());
+ ++input_index;
++value_iterator;
- std::vector<intptr_t> register_values;
+ struct RegisterValue {
+ Object* raw_value_;
+ TranslatedFrame::iterator iterator_;
+ };
+ std::vector<RegisterValue> register_values;
int total_registers = config->num_general_registers();
- register_values.resize(total_registers, 0);
- for (int i = 0; i < total_registers; ++i) {
- register_values[i] = 0;
- }
+ register_values.resize(total_registers, {Smi::kZero, value_iterator});
intptr_t value;
@@ -1709,9 +1674,9 @@ void Deoptimizer::DoComputeBuiltinContinuation(
}
for (int i = 0; i < register_parameter_count; ++i) {
- value = reinterpret_cast<intptr_t>(value_iterator->GetRawValue());
+ Object* object = value_iterator->GetRawValue();
int code = continuation_descriptor.GetRegisterParameter(i).code();
- register_values[code] = value;
+ register_values[code] = {object, value_iterator};
++input_index;
++value_iterator;
}
@@ -1721,8 +1686,9 @@ void Deoptimizer::DoComputeBuiltinContinuation(
// sure that it's harvested from the translation and copied into the register
// set (it was automatically added at the end of the FrameState by the
// instruction selector).
- value = reinterpret_cast<intptr_t>(value_iterator->GetRawValue());
- register_values[kContextRegister.code()] = value;
+ Object* context = value_iterator->GetRawValue();
+ value = reinterpret_cast<intptr_t>(context);
+ register_values[kContextRegister.code()] = {context, value_iterator};
output_frame->SetContext(value);
output_frame->SetRegister(kContextRegister.code(), value);
++input_index;
@@ -1792,7 +1758,8 @@ void Deoptimizer::DoComputeBuiltinContinuation(
for (int i = 0; i < allocatable_register_count; ++i) {
output_frame_offset -= kPointerSize;
int code = config->GetAllocatableGeneralCode(i);
- value = register_values[code];
+ Object* object = register_values[code].raw_value_;
+ value = reinterpret_cast<intptr_t>(object);
output_frame->SetFrameSlot(output_frame_offset, value);
if (trace_scope_ != nullptr) {
ScopedVector<char> str(128);
@@ -1809,6 +1776,22 @@ void Deoptimizer::DoComputeBuiltinContinuation(
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
str.start());
}
+ if (object == isolate_->heap()->arguments_marker()) {
+ Address output_address =
+ reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
+ output_frame_offset;
+ values_to_materialize_.push_back(
+ {output_address, register_values[code].iterator_});
+ }
+ }
+
+ // Clear the context register. The context might be a de-materialized object
+ // and will be materialized by {Runtime_NotifyStubFailure}. For additional
+ // safety we use Smi(0) instead of the potential {arguments_marker} here.
+ if (is_topmost) {
+ intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
+ Register context_reg = JavaScriptFrame::context_register();
+ output_frame->SetRegister(context_reg.code(), context_value);
}
// Ensure the frame pointer register points to the callee's frame. The builtin
@@ -1837,12 +1820,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
}
-void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
- // Walk to the last JavaScript output frame to find out if it has
- // adapted arguments.
- for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
- if (frame_index != 0) it->Advance();
- }
+void Deoptimizer::MaterializeHeapObjects() {
translated_state_.Prepare(reinterpret_cast<Address>(stack_fp_));
for (auto& materialization : values_to_materialize_) {
@@ -2044,6 +2022,10 @@ void TranslationBuffer::Add(int32_t value) {
} while (bits != 0);
}
+TranslationIterator::TranslationIterator(ByteArray* buffer, int index)
+ : buffer_(buffer), index_(index) {
+ DCHECK(index >= 0 && index < buffer->length());
+}
int32_t TranslationIterator::Next() {
// Run through the bytes until we reach one with a least significant
@@ -2061,6 +2043,7 @@ int32_t TranslationIterator::Next() {
return is_negative ? -result : result;
}
+bool TranslationIterator::HasNext() const { return index_ < buffer_->length(); }
Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
Handle<ByteArray> result = factory->NewByteArray(CurrentIndex(), TENURED);
@@ -2121,15 +2104,14 @@ void Translation::BeginInterpretedFrame(BailoutId bytecode_offset,
buffer_->Add(height);
}
-
-void Translation::ArgumentsElements(bool is_rest) {
+void Translation::ArgumentsElements(CreateArgumentsType type) {
buffer_->Add(ARGUMENTS_ELEMENTS);
- buffer_->Add(is_rest);
+ buffer_->Add(static_cast<uint8_t>(type));
}
-void Translation::ArgumentsLength(bool is_rest) {
+void Translation::ArgumentsLength(CreateArgumentsType type) {
buffer_->Add(ARGUMENTS_LENGTH);
- buffer_->Add(is_rest);
+ buffer_->Add(static_cast<uint8_t>(type));
}
void Translation::BeginCapturedObject(int length) {
@@ -2229,6 +2211,8 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case GETTER_STUB_FRAME:
case SETTER_STUB_FRAME:
case DUPLICATED_OBJECT:
+ case ARGUMENTS_ELEMENTS:
+ case ARGUMENTS_LENGTH:
case CAPTURED_OBJECT:
case REGISTER:
case INT32_REGISTER:
@@ -2252,9 +2236,6 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case BUILTIN_CONTINUATION_FRAME:
case JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
return 3;
- case ARGUMENTS_ELEMENTS:
- case ARGUMENTS_LENGTH:
- return 1;
}
FATAL("Unexpected translation type");
return -1;
@@ -2290,8 +2271,8 @@ void MaterializedObjectStore::Set(Address fp,
Handle<FixedArray> materialized_objects) {
int index = StackIdToIndex(fp);
if (index == -1) {
- index = frame_fps_.length();
- frame_fps_.Add(fp);
+ index = static_cast<int>(frame_fps_.size());
+ frame_fps_.push_back(fp);
}
Handle<FixedArray> array = EnsureStackEntries(index + 1);
@@ -2300,30 +2281,28 @@ void MaterializedObjectStore::Set(Address fp,
bool MaterializedObjectStore::Remove(Address fp) {
- int index = StackIdToIndex(fp);
- if (index == -1) {
- return false;
- }
- CHECK_GE(index, 0);
+ auto it = std::find(frame_fps_.begin(), frame_fps_.end(), fp);
+ if (it == frame_fps_.end()) return false;
+ int index = static_cast<int>(std::distance(frame_fps_.begin(), it));
- frame_fps_.Remove(index);
+ frame_fps_.erase(it);
FixedArray* array = isolate()->heap()->materialized_objects();
+
CHECK_LT(index, array->length());
- for (int i = index; i < frame_fps_.length(); i++) {
+ int fps_size = static_cast<int>(frame_fps_.size());
+ for (int i = index; i < fps_size; i++) {
array->set(i, array->get(i + 1));
}
- array->set(frame_fps_.length(), isolate()->heap()->undefined_value());
+ array->set(fps_size, isolate()->heap()->undefined_value());
return true;
}
int MaterializedObjectStore::StackIdToIndex(Address fp) {
- for (int i = 0; i < frame_fps_.length(); i++) {
- if (frame_fps_[i] == fp) {
- return i;
- }
- }
- return -1;
+ auto it = std::find(frame_fps_.begin(), frame_fps_.end(), fp);
+ return it == frame_fps_.end()
+ ? -1
+ : static_cast<int>(std::distance(frame_fps_.begin(), it));
}
@@ -2361,7 +2340,7 @@ Handle<Object> GetValueForDebugger(TranslatedFrame::iterator it,
Isolate* isolate) {
if (it->GetRawValue() == isolate->heap()->arguments_marker()) {
if (!it->IsMaterializableByDebugger()) {
- return isolate->factory()->undefined_value();
+ return isolate->factory()->optimized_out();
}
}
return it->GetValue();
@@ -2780,8 +2759,7 @@ void TranslatedValue::Handlify() {
TranslatedFrame TranslatedFrame::InterpretedFrame(
BailoutId bytecode_offset, SharedFunctionInfo* shared_info, int height) {
- TranslatedFrame frame(kInterpretedFunction, shared_info->GetIsolate(),
- shared_info, height);
+ TranslatedFrame frame(kInterpretedFunction, shared_info, height);
frame.node_id_ = bytecode_offset;
return frame;
}
@@ -2790,36 +2768,32 @@ TranslatedFrame TranslatedFrame::InterpretedFrame(
TranslatedFrame TranslatedFrame::AccessorFrame(
Kind kind, SharedFunctionInfo* shared_info) {
DCHECK(kind == kSetter || kind == kGetter);
- return TranslatedFrame(kind, shared_info->GetIsolate(), shared_info);
+ return TranslatedFrame(kind, shared_info);
}
TranslatedFrame TranslatedFrame::ArgumentsAdaptorFrame(
SharedFunctionInfo* shared_info, int height) {
- return TranslatedFrame(kArgumentsAdaptor, shared_info->GetIsolate(),
- shared_info, height);
+ return TranslatedFrame(kArgumentsAdaptor, shared_info, height);
}
TranslatedFrame TranslatedFrame::ConstructStubFrame(
BailoutId bailout_id, SharedFunctionInfo* shared_info, int height) {
- TranslatedFrame frame(kConstructStub, shared_info->GetIsolate(), shared_info,
- height);
+ TranslatedFrame frame(kConstructStub, shared_info, height);
frame.node_id_ = bailout_id;
return frame;
}
TranslatedFrame TranslatedFrame::BuiltinContinuationFrame(
BailoutId bailout_id, SharedFunctionInfo* shared_info, int height) {
- TranslatedFrame frame(kBuiltinContinuation, shared_info->GetIsolate(),
- shared_info, height);
+ TranslatedFrame frame(kBuiltinContinuation, shared_info, height);
frame.node_id_ = bailout_id;
return frame;
}
TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationFrame(
BailoutId bailout_id, SharedFunctionInfo* shared_info, int height) {
- TranslatedFrame frame(kJavaScriptBuiltinContinuation,
- shared_info->GetIsolate(), shared_info, height);
+ TranslatedFrame frame(kJavaScriptBuiltinContinuation, shared_info, height);
frame.node_id_ = bailout_id;
return frame;
}
@@ -3014,7 +2988,8 @@ void TranslatedFrame::AdvanceIterator(
}
Address TranslatedState::ComputeArgumentsPosition(Address input_frame_pointer,
- bool is_rest, int* length) {
+ CreateArgumentsType type,
+ int* length) {
Address parent_frame_pointer = *reinterpret_cast<Address*>(
input_frame_pointer + StandardFrameConstants::kCallerFPOffset);
intptr_t parent_frame_type = Memory::intptr_at(
@@ -3034,7 +3009,7 @@ Address TranslatedState::ComputeArgumentsPosition(Address input_frame_pointer,
arguments_frame = input_frame_pointer;
}
- if (is_rest) {
+ if (type == CreateArgumentsType::kRestParameter) {
// If the actual number of arguments is less than the number of formal
// parameters, we have zero rest parameters.
if (length) *length = std::max(0, *length - formal_parameter_count_);
@@ -3044,24 +3019,23 @@ Address TranslatedState::ComputeArgumentsPosition(Address input_frame_pointer,
}
// Creates translated values for an arguments backing store, or the backing
-// store for the rest parameters if {is_rest} is true. The TranslatedValue
+// store for rest parameters depending on the given {type}. The TranslatedValue
// objects for the fields are not read from the TranslationIterator, but instead
// created on-the-fly based on dynamic information in the optimized frame.
void TranslatedState::CreateArgumentsElementsTranslatedValues(
- int frame_index, Address input_frame_pointer, bool is_rest,
+ int frame_index, Address input_frame_pointer, CreateArgumentsType type,
FILE* trace_file) {
TranslatedFrame& frame = frames_[frame_index];
int length;
Address arguments_frame =
- ComputeArgumentsPosition(input_frame_pointer, is_rest, &length);
+ ComputeArgumentsPosition(input_frame_pointer, type, &length);
int object_index = static_cast<int>(object_positions_.size());
int value_index = static_cast<int>(frame.values_.size());
if (trace_file != nullptr) {
- PrintF(trace_file,
- "arguments elements object #%d (is_rest = %d, length = %d)",
- object_index, is_rest, length);
+ PrintF(trace_file, "arguments elements object #%d (type = %d, length = %d)",
+ object_index, static_cast<uint8_t>(type), length);
}
object_positions_.push_back({frame_index, value_index});
frame.Add(TranslatedValue::NewDeferredObject(
@@ -3071,7 +3045,17 @@ void TranslatedState::CreateArgumentsElementsTranslatedValues(
TranslatedValue::NewTagged(this, isolate_->heap()->fixed_array_map()));
frame.Add(TranslatedValue::NewInt32(this, length));
- for (int i = length - 1; i >= 0; --i) {
+ int number_of_holes = 0;
+ if (type == CreateArgumentsType::kMappedArguments) {
+ // If the actual number of arguments is less than the number of formal
+ // parameters, we have fewer holes to fill to not overshoot the length.
+ number_of_holes = Min(formal_parameter_count_, length);
+ }
+ for (int i = 0; i < number_of_holes; ++i) {
+ frame.Add(
+ TranslatedValue::NewTagged(this, isolate_->heap()->the_hole_value()));
+ }
+ for (int i = length - number_of_holes - 1; i >= 0; --i) {
Address argument_slot = arguments_frame +
CommonFrameConstants::kFixedFrameSizeAboveFp +
i * kPointerSize;
@@ -3124,19 +3108,21 @@ int TranslatedState::CreateNextTranslatedValue(
}
case Translation::ARGUMENTS_ELEMENTS: {
- bool is_rest = iterator->Next();
- CreateArgumentsElementsTranslatedValues(frame_index, fp, is_rest,
+ CreateArgumentsType arguments_type =
+ static_cast<CreateArgumentsType>(iterator->Next());
+ CreateArgumentsElementsTranslatedValues(frame_index, fp, arguments_type,
trace_file);
return 0;
}
case Translation::ARGUMENTS_LENGTH: {
- bool is_rest = iterator->Next();
+ CreateArgumentsType arguments_type =
+ static_cast<CreateArgumentsType>(iterator->Next());
int length;
- ComputeArgumentsPosition(fp, is_rest, &length);
+ ComputeArgumentsPosition(fp, arguments_type, &length);
if (trace_file != nullptr) {
- PrintF(trace_file, "arguments length field (is_rest = %d, length = %d)",
- is_rest, length);
+ PrintF(trace_file, "arguments length field (type = %d, length = %d)",
+ static_cast<uint8_t>(arguments_type), length);
}
frame.Add(TranslatedValue::NewInt32(this, length));
return 0;
@@ -3366,10 +3352,6 @@ int TranslatedState::CreateNextTranslatedValue(
}
FATAL("We should never get here - unexpected deopt info.");
- TranslatedValue translated_value =
- TranslatedValue(nullptr, TranslatedValue::kInvalid);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
}
TranslatedState::TranslatedState(const JavaScriptFrame* frame)
@@ -3475,10 +3457,32 @@ class TranslatedState::CapturedObjectMaterializer {
int field_count)
: state_(state), frame_index_(frame_index), field_count_(field_count) {}
+ // Ensure the properties never contain mutable heap numbers. This is necessary
+ // because the deoptimizer generalizes all maps to tagged representation
+ // fields (so mutable heap numbers are not allowed).
+ static void EnsurePropertiesGeneralized(Handle<Object> properties_or_hash) {
+ if (properties_or_hash->IsPropertyArray()) {
+ Handle<PropertyArray> properties =
+ Handle<PropertyArray>::cast(properties_or_hash);
+ int length = properties->length();
+ for (int i = 0; i < length; i++) {
+ if (properties->get(i)->IsMutableHeapNumber()) {
+ Handle<HeapObject> box(HeapObject::cast(properties->get(i)));
+ box->set_map(properties->GetIsolate()->heap()->heap_number_map());
+ }
+ }
+ }
+ }
+
Handle<Object> FieldAt(int* value_index) {
CHECK(field_count_ > 0);
--field_count_;
- return state_->MaterializeAt(frame_index_, value_index);
+ Handle<Object> object = state_->MaterializeAt(frame_index_, value_index);
+ // This is a big hammer to make sure that the materialized objects do not
+ // have property arrays with mutable heap numbers (mutable heap numbers are
+ // bad because we generalize maps for all materialized objects).
+ EnsurePropertiesGeneralized(object);
+ return object;
}
~CapturedObjectMaterializer() { CHECK_EQ(0, field_count_); }
@@ -3633,6 +3637,12 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
object->set_raw_properties_or_hash(*properties);
object->set_elements(FixedArrayBase::cast(*elements));
object->set_length(*array_length);
+ int in_object_properties = map->GetInObjectProperties();
+ for (int i = 0; i < in_object_properties; ++i) {
+ Handle<Object> value = materializer.FieldAt(value_index);
+ FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
+ object->FastPropertyAtPut(index, *value);
+ }
return object;
}
case JS_BOUND_FUNCTION_TYPE: {
@@ -3652,6 +3662,38 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
object->set_bound_arguments(FixedArray::cast(*bound_arguments));
return object;
}
+ case JS_FUNCTION_TYPE: {
+ Handle<JSFunction> object = isolate_->factory()->NewFunction(
+ map, handle(isolate_->object_function()->shared()),
+ handle(isolate_->context()), NOT_TENURED);
+ slot->value_ = object;
+ // We temporarily allocated a JSFunction for the {Object} function
+ // within the current context, to break cycles in the object graph.
+ // The correct function and context will be set below once available.
+ Handle<Object> properties = materializer.FieldAt(value_index);
+ Handle<Object> elements = materializer.FieldAt(value_index);
+ Handle<Object> prototype = materializer.FieldAt(value_index);
+ Handle<Object> shared = materializer.FieldAt(value_index);
+ Handle<Object> context = materializer.FieldAt(value_index);
+ Handle<Object> vector_cell = materializer.FieldAt(value_index);
+ Handle<Object> code = materializer.FieldAt(value_index);
+ object->set_map(*map);
+ object->set_raw_properties_or_hash(*properties);
+ object->set_elements(FixedArrayBase::cast(*elements));
+ object->set_prototype_or_initial_map(*prototype);
+ object->set_shared(SharedFunctionInfo::cast(*shared));
+ object->set_context(Context::cast(*context));
+ object->set_feedback_vector_cell(Cell::cast(*vector_cell));
+ object->set_code(Code::cast(*code));
+ int in_object_properties = map->GetInObjectProperties();
+ for (int i = 0; i < in_object_properties; ++i) {
+ Handle<Object> value = materializer.FieldAt(value_index);
+ FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
+ object->FastPropertyAtPut(index, *value);
+ }
+ return object;
+ }
+ case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE: {
Handle<JSGeneratorObject> object = Handle<JSGeneratorObject>::cast(
isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
@@ -3674,6 +3716,15 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
object->set_resume_mode(Smi::ToInt(*resume_mode));
object->set_continuation(Smi::ToInt(*continuation_offset));
object->set_register_file(FixedArray::cast(*register_file));
+
+ if (object->IsJSAsyncGeneratorObject()) {
+ auto generator = Handle<JSAsyncGeneratorObject>::cast(object);
+ Handle<Object> queue = materializer.FieldAt(value_index);
+ Handle<Object> awaited_promise = materializer.FieldAt(value_index);
+ generator->set_queue(HeapObject::cast(*queue));
+ generator->set_awaited_promise(HeapObject::cast(*awaited_promise));
+ }
+
int in_object_properties = map->GetInObjectProperties();
for (int i = 0; i < in_object_properties; ++i) {
Handle<Object> value = materializer.FieldAt(value_index);
@@ -3813,11 +3864,9 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_VALUE_TYPE:
- case JS_FUNCTION_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_DATE_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_TYPED_ARRAY_TYPE:
@@ -3826,7 +3875,7 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case JS_MAP_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
- case JS_PROMISE_CAPABILITY_TYPE:
+ case PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE:
case JS_PROXY_TYPE:
case MAP_TYPE:
@@ -3843,6 +3892,7 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case SCRIPT_TYPE:
case CODE_TYPE:
case PROPERTY_CELL_TYPE:
+ case BIGINT_TYPE:
case MODULE_TYPE:
case MODULE_INFO_ENTRY_TYPE:
case FREE_SPACE_TYPE:
@@ -3946,10 +3996,25 @@ Handle<Object> TranslatedState::MaterializeObjectAt(int object_index) {
return MaterializeAt(pos.frame_index_, &(pos.value_index_));
}
+TranslatedFrame* TranslatedState::GetFrameFromJSFrameIndex(int jsframe_index) {
+ for (size_t i = 0; i < frames_.size(); i++) {
+ if (frames_[i].kind() == TranslatedFrame::kInterpretedFunction ||
+ frames_[i].kind() == TranslatedFrame::kJavaScriptBuiltinContinuation) {
+ if (jsframe_index > 0) {
+ jsframe_index--;
+ } else {
+ return &(frames_[i]);
+ }
+ }
+ }
+ return nullptr;
+}
+
TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
int jsframe_index, int* args_count) {
for (size_t i = 0; i < frames_.size(); i++) {
- if (frames_[i].kind() == TranslatedFrame::kInterpretedFunction) {
+ if (frames_[i].kind() == TranslatedFrame::kInterpretedFunction ||
+ frames_[i].kind() == TranslatedFrame::kJavaScriptBuiltinContinuation) {
if (jsframe_index > 0) {
jsframe_index--;
} else {
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 5bf36a3d3e..dcc5619812 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -5,7 +5,10 @@
#ifndef V8_DEOPTIMIZER_H_
#define V8_DEOPTIMIZER_H_
+#include <vector>
+
#include "src/allocation.h"
+#include "src/base/macros.h"
#include "src/boxed-float.h"
#include "src/deoptimize-reason.h"
#include "src/frame-constants.h"
@@ -202,14 +205,12 @@ class TranslatedFrame {
static void AdvanceIterator(std::deque<TranslatedValue>::iterator* iter);
- TranslatedFrame(Kind kind, Isolate* isolate,
- SharedFunctionInfo* shared_info = nullptr, int height = 0)
+ TranslatedFrame(Kind kind, SharedFunctionInfo* shared_info = nullptr,
+ int height = 0)
: kind_(kind),
node_id_(BailoutId::None()),
raw_shared_info_(shared_info),
- height_(height),
- isolate_(isolate) {}
-
+ height_(height) {}
void Add(const TranslatedValue& value) { values_.push_back(value); }
void Handlify();
@@ -219,7 +220,6 @@ class TranslatedFrame {
SharedFunctionInfo* raw_shared_info_;
Handle<SharedFunctionInfo> shared_info_;
int height_;
- Isolate* isolate_;
typedef std::deque<TranslatedValue> ValuesContainer;
@@ -262,6 +262,7 @@ class TranslatedState {
std::vector<TranslatedFrame>& frames() { return frames_; }
+ TranslatedFrame* GetFrameFromJSFrameIndex(int jsframe_index);
TranslatedFrame* GetArgumentsInfoFromJSFrameIndex(int jsframe_index,
int* arguments_count);
@@ -281,11 +282,12 @@ class TranslatedState {
int CreateNextTranslatedValue(int frame_index, TranslationIterator* iterator,
FixedArray* literal_array, Address fp,
RegisterValues* registers, FILE* trace_file);
- Address ComputeArgumentsPosition(Address input_frame_pointer, bool is_rest,
- int* length);
+ Address ComputeArgumentsPosition(Address input_frame_pointer,
+ CreateArgumentsType type, int* length);
void CreateArgumentsElementsTranslatedValues(int frame_index,
Address input_frame_pointer,
- bool is_rest, FILE* trace_file);
+ CreateArgumentsType type,
+ FILE* trace_file);
void UpdateFromPreviouslyMaterializedObjects();
Handle<Object> MaterializeAt(int frame_index, int* value_index);
@@ -321,21 +323,6 @@ class Deoptimizer : public Malloced {
public:
enum BailoutType { EAGER, LAZY, SOFT, kLastBailoutType = SOFT };
- enum class BailoutState {
- NO_REGISTERS,
- TOS_REGISTER,
- };
-
- static const char* BailoutStateToString(BailoutState state) {
- switch (state) {
- case BailoutState::NO_REGISTERS:
- return "NO_REGISTERS";
- case BailoutState::TOS_REGISTER:
- return "TOS_REGISTER";
- }
- UNREACHABLE();
- }
-
struct DeoptInfo {
DeoptInfo(SourcePosition position, DeoptimizeReason deopt_reason,
int deopt_id)
@@ -378,8 +365,8 @@ class Deoptimizer : public Malloced {
int output_count() const { return output_count_; }
- Handle<JSFunction> function() const { return Handle<JSFunction>(function_); }
- Handle<Code> compiled_code() const { return Handle<Code>(compiled_code_); }
+ Handle<JSFunction> function() const;
+ Handle<Code> compiled_code() const;
BailoutType bailout_type() const { return bailout_type_; }
// Number of created JS frames. Not all created frames are necessarily JS.
@@ -413,15 +400,9 @@ class Deoptimizer : public Malloced {
// refer to that code.
static void DeoptimizeMarkedCode(Isolate* isolate);
- // Visit all the known optimized functions in a given isolate.
- static void VisitAllOptimizedFunctions(
- Isolate* isolate, OptimizedFunctionVisitor* visitor);
-
- static void UnlinkOptimizedCode(Code* code, Context* native_context);
-
~Deoptimizer();
- void MaterializeHeapObjects(JavaScriptFrameIterator* it);
+ void MaterializeHeapObjects();
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
@@ -535,13 +516,13 @@ class Deoptimizer : public Malloced {
// Marks all the code in the given context for deoptimization.
static void MarkAllCodeForContext(Context* native_context);
- // Visit all the known optimized functions in a given context.
- static void VisitAllOptimizedFunctionsForContext(
- Context* context, OptimizedFunctionVisitor* visitor);
-
// Deoptimizes all code marked in the given context.
static void DeoptimizeMarkedCodeForContext(Context* native_context);
+ // Some architectures need to push padding together with the TOS register
+ // in order to maintain stack alignment.
+ static bool PadTopOfStackRegister();
+
// Searches the list of known deoptimizing code for a Code object
// containing the given address (which is supposedly faster than
// searching all code objects).
@@ -643,8 +624,8 @@ class RegisterValues {
static_assert(sizeof(Float64) == kDoubleSize, "size mismatch");
intptr_t registers_[Register::kNumRegisters];
- Float32 float_registers_[FloatRegister::kMaxNumRegisters];
- Float64 double_registers_[DoubleRegister::kMaxNumRegisters];
+ Float32 float_registers_[FloatRegister::kNumRegisters];
+ Float64 double_registers_[DoubleRegister::kNumRegisters];
};
@@ -667,6 +648,7 @@ class FrameDescription {
}
uint32_t GetFrameSize() const {
+ USE(frame_content_);
DCHECK(static_cast<uint32_t>(frame_size_) == frame_size_);
return static_cast<uint32_t>(frame_size_);
}
@@ -726,9 +708,6 @@ class FrameDescription {
constant_pool_ = constant_pool;
}
- Smi* GetState() const { return state_; }
- void SetState(Smi* state) { state_ = state; }
-
void SetContinuation(intptr_t pc) { continuation_ = pc; }
// Argument count, including receiver.
@@ -752,8 +731,6 @@ class FrameDescription {
static int pc_offset() { return offsetof(FrameDescription, pc_); }
- static int state_offset() { return offsetof(FrameDescription, state_); }
-
static int continuation_offset() {
return offsetof(FrameDescription, continuation_);
}
@@ -776,7 +753,6 @@ class FrameDescription {
intptr_t fp_;
intptr_t context_;
intptr_t constant_pool_;
- Smi* state_;
// Continuation is the PC where the execution continues after
// deoptimizing.
@@ -828,14 +804,11 @@ class TranslationBuffer BASE_EMBEDDED {
class TranslationIterator BASE_EMBEDDED {
public:
- TranslationIterator(ByteArray* buffer, int index)
- : buffer_(buffer), index_(index) {
- DCHECK(index >= 0 && index < buffer->length());
- }
+ TranslationIterator(ByteArray* buffer, int index);
int32_t Next();
- bool HasNext() const { return index_ < buffer_->length(); }
+ bool HasNext() const;
void Skip(int n) {
for (int i = 0; i < n; i++) Next();
@@ -906,8 +879,8 @@ class Translation BASE_EMBEDDED {
int literal_id, unsigned height);
void BeginGetterStubFrame(int literal_id);
void BeginSetterStubFrame(int literal_id);
- void ArgumentsElements(bool is_rest);
- void ArgumentsLength(bool is_rest);
+ void ArgumentsElements(CreateArgumentsType type);
+ void ArgumentsLength(CreateArgumentsType type);
void BeginCapturedObject(int length);
void DuplicateObject(int object_index);
void StoreRegister(Register reg);
@@ -957,7 +930,7 @@ class MaterializedObjectStore {
int StackIdToIndex(Address fp);
Isolate* isolate_;
- List<Address> frame_fps_;
+ std::vector<Address> frame_fps_;
};
diff --git a/deps/v8/src/detachable-vector.h b/deps/v8/src/detachable-vector.h
new file mode 100644
index 0000000000..4609ebf0e1
--- /dev/null
+++ b/deps/v8/src/detachable-vector.h
@@ -0,0 +1,73 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DETACHABLE_VECTOR_H_
+#define V8_DETACHABLE_VECTOR_H_
+
+#include <vector>
+
+namespace v8 {
+namespace internal {
+
+// This class wraps a std::vector and provides a few of the common member
+// functions for accessing the data. It acts as a lazy wrapper of the vector,
+// not initiliazing the backing store until push_back() is first called. Two
+// extra methods are also provided: free() and detach(), which allow for manual
+// control of the backing store. This is currently required for use in the
+// HandleScopeImplementer. Any other class should just use a std::vector
+// directly.
+template <typename T>
+class DetachableVector {
+ public:
+ DetachableVector() : vector_(nullptr) {}
+
+ ~DetachableVector() { delete vector_; }
+
+ void push_back(const T& value) {
+ ensureAttached();
+ vector_->push_back(value);
+ }
+
+ // Free the backing store and clear our reference to it.
+ void free() {
+ delete vector_;
+ vector_ = nullptr;
+ }
+
+ // Clear our reference to the backing store. Does not delete it!
+ void detach() { vector_ = nullptr; }
+
+ T& at(typename std::vector<T>::size_type i) const { return vector_->at(i); }
+
+ T& back() const { return vector_->back(); }
+
+ T& front() const { return vector_->front(); }
+
+ void pop_back() { vector_->pop_back(); }
+
+ typename std::vector<T>::size_type size() const {
+ if (vector_) return vector_->size();
+ return 0;
+ }
+
+ bool empty() const {
+ if (vector_) return vector_->empty();
+ return true;
+ }
+
+ private:
+ std::vector<T>* vector_;
+
+ // Attach a vector backing store if not present.
+ void ensureAttached() {
+ if (vector_ == nullptr) {
+ vector_ = new std::vector<T>();
+ }
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DETACHABLE_VECTOR_H_
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index fb73ec2c2a..10b8c1637e 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -5,6 +5,7 @@
#include "src/disassembler.h"
#include <memory>
+#include <vector>
#include "src/assembler-inl.h"
#include "src/code-stubs.h"
@@ -116,9 +117,7 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
out->AddFormatted(" ;; code:");
Code* code = Code::GetCodeFromTargetAddress(relocinfo->target_address());
Code::Kind kind = code->kind();
- if (code->is_inline_cache_stub()) {
- out->AddFormatted(" %s", Code::Kind2String(kind));
- } else if (kind == Code::STUB || kind == Code::HANDLER) {
+ if (kind == Code::STUB) {
// Get the STUB key and extract major and minor key.
uint32_t key = code->stub_key();
uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
@@ -207,27 +206,28 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
}
// Collect RelocInfo for this instruction (prev_pc .. pc-1)
- List<const char*> comments(4);
- List<byte*> pcs(1);
- List<RelocInfo::Mode> rmodes(1);
- List<intptr_t> datas(1);
+ std::vector<const char*> comments;
+ std::vector<byte*> pcs;
+ std::vector<RelocInfo::Mode> rmodes;
+ std::vector<intptr_t> datas;
if (it != NULL) {
while (!it->done() && it->rinfo()->pc() < pc) {
if (RelocInfo::IsComment(it->rinfo()->rmode())) {
// For comments just collect the text.
- comments.Add(reinterpret_cast<const char*>(it->rinfo()->data()));
+ comments.push_back(
+ reinterpret_cast<const char*>(it->rinfo()->data()));
} else {
// For other reloc info collect all data.
- pcs.Add(it->rinfo()->pc());
- rmodes.Add(it->rinfo()->rmode());
- datas.Add(it->rinfo()->data());
+ pcs.push_back(it->rinfo()->pc());
+ rmodes.push_back(it->rinfo()->rmode());
+ datas.push_back(it->rinfo()->data());
}
it->next();
}
}
// Comments.
- for (int i = 0; i < comments.length(); i++) {
+ for (size_t i = 0; i < comments.size(); i++) {
out.AddFormatted(" %s", comments[i]);
DumpBuffer(os, &out);
}
@@ -240,7 +240,7 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
out.AddFormatted("%s", decode_buffer.start());
// Print all the reloc info for this instruction which are not comments.
- for (int i = 0; i < pcs.length(); i++) {
+ for (size_t i = 0; i < pcs.size(); i++) {
// Put together the reloc info
RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], converter.code());
@@ -252,7 +252,7 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
// If this is a constant pool load and we haven't found any RelocInfo
// already, check if we can find some RelocInfo for the target address in
// the constant pool.
- if (pcs.is_empty() && converter.code() != nullptr) {
+ if (pcs.empty() && converter.code() != nullptr) {
RelocInfo dummy_rinfo(prev_pc, RelocInfo::NONE32, 0, nullptr);
if (dummy_rinfo.IsInConstantPool()) {
byte* constant_pool_entry_address =
diff --git a/deps/v8/src/dtoa.cc b/deps/v8/src/dtoa.cc
index 7d5f4258ef..6d12994e55 100644
--- a/deps/v8/src/dtoa.cc
+++ b/deps/v8/src/dtoa.cc
@@ -68,7 +68,6 @@ void DoubleToAscii(double v, DtoaMode mode, int requested_digits,
break;
default:
UNREACHABLE();
- fast_worked = false;
}
if (fast_worked) return;
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 184cd386e8..8c692ecab8 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -615,6 +615,16 @@ class ElementsAccessorBase : public ElementsAccessor {
filter) != kMaxUInt32;
}
+ bool HasEntry(JSObject* holder, uint32_t entry) final {
+ return Subclass::HasEntryImpl(holder->GetIsolate(), holder->elements(),
+ entry);
+ }
+
+ static bool HasEntryImpl(Isolate* isolate, FixedArrayBase* backing_store,
+ uint32_t entry) {
+ UNIMPLEMENTED();
+ }
+
bool HasAccessors(JSObject* holder) final {
return Subclass::HasAccessorsImpl(holder, holder->elements());
}
@@ -1010,13 +1020,14 @@ class ElementsAccessorBase : public ElementsAccessor {
}
Object* CopyElements(Handle<JSReceiver> source, Handle<JSObject> destination,
- size_t length) final {
- return Subclass::CopyElementsHandleImpl(source, destination, length);
+ size_t length, uint32_t offset) final {
+ return Subclass::CopyElementsHandleImpl(source, destination, length,
+ offset);
}
static Object* CopyElementsHandleImpl(Handle<JSReceiver> source,
Handle<JSObject> destination,
- size_t length) {
+ size_t length, uint32_t offset) {
UNREACHABLE();
}
@@ -1317,13 +1328,15 @@ class ElementsAccessorBase : public ElementsAccessor {
return Subclass::GetDetailsImpl(holder, entry);
}
- Handle<FixedArray> CreateListFromArray(Isolate* isolate,
- Handle<JSArray> array) final {
- return Subclass::CreateListFromArrayImpl(isolate, array);
+ Handle<FixedArray> CreateListFromArrayLike(Isolate* isolate,
+ Handle<JSObject> object,
+ uint32_t length) final {
+ return Subclass::CreateListFromArrayLikeImpl(isolate, object, length);
};
- static Handle<FixedArray> CreateListFromArrayImpl(Isolate* isolate,
- Handle<JSArray> array) {
+ static Handle<FixedArray> CreateListFromArrayLikeImpl(Isolate* isolate,
+ Handle<JSObject> object,
+ uint32_t length) {
UNREACHABLE();
}
@@ -2366,14 +2379,13 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
}
- static Handle<FixedArray> CreateListFromArrayImpl(Isolate* isolate,
- Handle<JSArray> array) {
- uint32_t length = 0;
- array->length()->ToArrayLength(&length);
+ static Handle<FixedArray> CreateListFromArrayLikeImpl(Isolate* isolate,
+ Handle<JSObject> object,
+ uint32_t length) {
Handle<FixedArray> result = isolate->factory()->NewFixedArray(length);
- Handle<FixedArrayBase> elements(array->elements(), isolate);
+ Handle<FixedArrayBase> elements(object->elements(), isolate);
for (uint32_t i = 0; i < length; i++) {
- if (!Subclass::HasElementImpl(isolate, *array, i, *elements)) continue;
+ if (!Subclass::HasElementImpl(isolate, *object, i, *elements)) continue;
Handle<Object> value;
value = Subclass::GetImpl(isolate, *elements, i);
if (value->IsName()) {
@@ -3084,6 +3096,20 @@ class TypedElementsAccessor
std::reverse(data, data + len);
}
+ static Handle<FixedArray> CreateListFromArrayLikeImpl(Isolate* isolate,
+ Handle<JSObject> object,
+ uint32_t length) {
+ DCHECK(!WasNeutered(*object));
+ DCHECK(object->IsJSTypedArray());
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(length);
+ Handle<BackingStore> elements(BackingStore::cast(object->elements()));
+ for (uint32_t i = 0; i < length; i++) {
+ Handle<Object> value = AccessorClass::GetImpl(isolate, *elements, i);
+ result->set(i, *value);
+ }
+ return result;
+ }
+
static Handle<JSObject> SliceWithResultImpl(Handle<JSObject> receiver,
uint32_t start, uint32_t end,
Handle<JSObject> result) {
@@ -3143,28 +3169,30 @@ class TypedElementsAccessor
template <typename SourceTraits>
static void CopyBetweenBackingStores(FixedTypedArrayBase* source,
- BackingStore* dest, size_t length) {
+ BackingStore* dest, size_t length,
+ uint32_t offset) {
FixedTypedArray<SourceTraits>* source_fta =
FixedTypedArray<SourceTraits>::cast(source);
for (uint32_t i = 0; i < length; i++) {
typename SourceTraits::ElementType elem = source_fta->get_scalar(i);
- dest->set(i, dest->from(elem));
+ dest->set(offset + i, dest->from(elem));
}
}
- static void CopyElementsHandleFromTypedArray(Handle<JSTypedArray> source,
- Handle<JSTypedArray> destination,
- size_t length) {
+ static void CopyElementsFromTypedArray(JSTypedArray* source,
+ JSTypedArray* destination,
+ size_t length, uint32_t offset) {
// The source is a typed array, so we know we don't need to do ToNumber
// side-effects, as the source elements will always be a number or
// undefined.
DisallowHeapAllocation no_gc;
- Handle<FixedTypedArrayBase> source_elements(
- FixedTypedArrayBase::cast(source->elements()));
- Handle<BackingStore> destination_elements(
- BackingStore::cast(destination->elements()));
+ FixedTypedArrayBase* source_elements =
+ FixedTypedArrayBase::cast(source->elements());
+ BackingStore* destination_elements =
+ BackingStore::cast(destination->elements());
+ DCHECK_LE(offset + source->length(), destination->length());
DCHECK_GE(destination->length(), source->length());
DCHECK(source->length()->IsSmi());
DCHECK_EQ(Smi::FromInt(static_cast<int>(length)), source->length());
@@ -3194,15 +3222,16 @@ class TypedElementsAccessor
// which have special conversion operations.
if (same_type || (same_size && both_are_simple)) {
size_t element_size = source->element_size();
- std::memcpy(dest_data, source_data, length * element_size);
+ std::memcpy(dest_data + offset * element_size, source_data,
+ length * element_size);
} else {
// We use scalar accessors below to avoid boxing/unboxing, so there are
// no allocations.
switch (source->GetElementsKind()) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
- CopyBetweenBackingStores<Type##ArrayTraits>( \
- *source_elements, *destination_elements, length); \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ CopyBetweenBackingStores<Type##ArrayTraits>( \
+ source_elements, destination_elements, length, offset); \
break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
@@ -3213,23 +3242,27 @@ class TypedElementsAccessor
}
}
- static bool HoleyPrototypeLookupRequired(Isolate* isolate,
- Handle<JSArray> source) {
+ static bool HoleyPrototypeLookupRequired(Isolate* isolate, Context* context,
+ JSArray* source) {
+ DisallowHeapAllocation no_gc;
+ DisallowJavascriptExecution no_js(isolate);
+
Object* source_proto = source->map()->prototype();
+
// Null prototypes are OK - we don't need to do prototype chain lookups on
// them.
if (source_proto->IsNull(isolate)) return false;
if (source_proto->IsJSProxy()) return true;
- DCHECK(source_proto->IsJSObject());
- if (!isolate->is_initial_array_prototype(JSObject::cast(source_proto))) {
+ if (!context->is_initial_array_prototype(JSObject::cast(source_proto))) {
return true;
}
- return !isolate->IsFastArrayConstructorPrototypeChainIntact();
+
+ return !isolate->IsFastArrayConstructorPrototypeChainIntact(context);
}
- static bool TryCopyElementsHandleFastNumber(Handle<JSArray> source,
- Handle<JSTypedArray> destination,
- size_t length) {
+ static bool TryCopyElementsFastNumber(Context* context, JSArray* source,
+ JSTypedArray* destination,
+ size_t length, uint32_t offset) {
Isolate* isolate = source->GetIsolate();
DisallowHeapAllocation no_gc;
DisallowJavascriptExecution no_js(isolate);
@@ -3242,7 +3275,7 @@ class TypedElementsAccessor
// When the array has the original array prototype, and that prototype has
// not been changed in a way that would affect lookups, we can just convert
// the hole into undefined.
- if (HoleyPrototypeLookupRequired(isolate, source)) return false;
+ if (HoleyPrototypeLookupRequired(isolate, context, source)) return false;
Object* undefined = isolate->heap()->undefined_value();
@@ -3254,19 +3287,19 @@ class TypedElementsAccessor
Object* elem = source_store->get(i);
DCHECK(elem->IsSmi());
int int_value = Smi::ToInt(elem);
- dest->set(i, dest->from(int_value));
+ dest->set(offset + i, dest->from(int_value));
}
return true;
} else if (kind == HOLEY_SMI_ELEMENTS) {
FixedArray* source_store = FixedArray::cast(source->elements());
for (uint32_t i = 0; i < length; i++) {
if (source_store->is_the_hole(isolate, i)) {
- dest->SetValue(i, undefined);
+ dest->SetValue(offset + i, undefined);
} else {
Object* elem = source_store->get(i);
DCHECK(elem->IsSmi());
int int_value = Smi::ToInt(elem);
- dest->set(i, dest->from(int_value));
+ dest->set(offset + i, dest->from(int_value));
}
}
return true;
@@ -3280,7 +3313,7 @@ class TypedElementsAccessor
// Use the from_double conversion for this specific TypedArray type,
// rather than relying on C++ to convert elem.
double elem = source_store->get_scalar(i);
- dest->set(i, dest->from(elem));
+ dest->set(offset + i, dest->from(elem));
}
return true;
} else if (kind == HOLEY_DOUBLE_ELEMENTS) {
@@ -3288,10 +3321,10 @@ class TypedElementsAccessor
FixedDoubleArray::cast(source->elements());
for (uint32_t i = 0; i < length; i++) {
if (source_store->is_the_hole(i)) {
- dest->SetValue(i, undefined);
+ dest->SetValue(offset + i, undefined);
} else {
double elem = source_store->get_scalar(i);
- dest->set(i, dest->from(elem));
+ dest->set(offset + i, dest->from(elem));
}
}
return true;
@@ -3301,7 +3334,7 @@ class TypedElementsAccessor
static Object* CopyElementsHandleSlow(Handle<JSReceiver> source,
Handle<JSTypedArray> destination,
- size_t length) {
+ size_t length, uint32_t offset) {
Isolate* isolate = source->GetIsolate();
Handle<BackingStore> destination_elements(
BackingStore::cast(destination->elements()));
@@ -3311,13 +3344,21 @@ class TypedElementsAccessor
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
Object::GetProperty(&it));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem, Object::ToNumber(elem));
- // We don't need to check for buffer neutering here, because the
- // source cannot be a TypedArray.
+
+ if (V8_UNLIKELY(destination->WasNeutered())) {
+ const char* op = "set";
+ const MessageTemplate::Template message =
+ MessageTemplate::kDetachedOperation;
+ Handle<String> operation =
+ isolate->factory()->NewStringFromAsciiChecked(op);
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+ NewTypeError(message, operation));
+ }
// The spec says we store the length, then get each element, so we don't
// need to check changes to length.
- destination_elements->SetValue(i, *elem);
+ destination_elements->SetValue(offset + i, *elem);
}
- return Smi::kZero;
+ return *isolate->factory()->undefined_value();
}
// This doesn't guarantee that the destination array will be completely
@@ -3325,28 +3366,32 @@ class TypedElementsAccessor
// that is required.
static Object* CopyElementsHandleImpl(Handle<JSReceiver> source,
Handle<JSObject> destination,
- size_t length) {
+ size_t length, uint32_t offset) {
+ Isolate* isolate = destination->GetIsolate();
Handle<JSTypedArray> destination_ta =
Handle<JSTypedArray>::cast(destination);
+ DCHECK_LE(offset + length, destination_ta->length_value());
+
+ if (length == 0) return *isolate->factory()->undefined_value();
// All conversions from TypedArrays can be done without allocation.
if (source->IsJSTypedArray()) {
Handle<JSTypedArray> source_ta = Handle<JSTypedArray>::cast(source);
- CopyElementsHandleFromTypedArray(source_ta, destination_ta, length);
- return Smi::kZero;
+ CopyElementsFromTypedArray(*source_ta, *destination_ta, length, offset);
+ return *isolate->factory()->undefined_value();
}
// Fast cases for packed numbers kinds where we don't need to allocate.
if (source->IsJSArray()) {
Handle<JSArray> source_array = Handle<JSArray>::cast(source);
- if (TryCopyElementsHandleFastNumber(source_array, destination_ta,
- length)) {
- return Smi::kZero;
+ if (TryCopyElementsFastNumber(isolate->context(), *source_array,
+ *destination_ta, length, offset)) {
+ return *isolate->factory()->undefined_value();
}
}
// Final generic case that handles prototype chain lookups, getters, proxies
// and observable side effects via valueOf, etc.
- return CopyElementsHandleSlow(source, destination_ta, length);
+ return CopyElementsHandleSlow(source, destination_ta, length, offset);
}
};
@@ -4282,6 +4327,43 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
return array;
}
+void CopyFastNumberJSArrayElementsToTypedArray(Context* context,
+ JSArray* source,
+ JSTypedArray* destination,
+ uintptr_t length,
+ uintptr_t offset) {
+ DCHECK(context->IsContext());
+ DCHECK(source->IsJSArray());
+ DCHECK(destination->IsJSTypedArray());
+
+ switch (destination->GetElementsKind()) {
+#define TYPED_ARRAYS_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ CHECK(Fixed##Type##ElementsAccessor::TryCopyElementsFastNumber( \
+ context, source, destination, length, static_cast<uint32_t>(offset))); \
+ break;
+ TYPED_ARRAYS(TYPED_ARRAYS_CASE)
+#undef TYPED_ARRAYS_CASE
+ default:
+ UNREACHABLE();
+ }
+}
+
+void CopyTypedArrayElementsToTypedArray(JSTypedArray* source,
+ JSTypedArray* destination,
+ uintptr_t length, uintptr_t offset) {
+ switch (destination->GetElementsKind()) {
+#define TYPED_ARRAYS_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ Fixed##Type##ElementsAccessor::CopyElementsFromTypedArray( \
+ source, destination, length, static_cast<uint32_t>(offset)); \
+ break;
+ TYPED_ARRAYS(TYPED_ARRAYS_CASE)
+#undef TYPED_ARRAYS_CASE
+ default:
+ UNREACHABLE();
+ }
+}
void ElementsAccessor::InitializeOncePerProcess() {
static ElementsAccessor* accessor_array[] = {
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 9e64764bb0..3f81be0c51 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -50,6 +50,10 @@ class ElementsAccessor {
return HasElement(holder, index, holder->elements(), filter);
}
+ // Note: this is currently not implemented for string wrapper and
+ // typed array elements.
+ virtual bool HasEntry(JSObject* holder, uint32_t entry) = 0;
+
virtual Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) = 0;
virtual PropertyDetails GetDetails(JSObject* holder, uint32_t entry) = 0;
@@ -188,10 +192,12 @@ class ElementsAccessor {
Handle<FixedArrayBase> destination, int size) = 0;
virtual Object* CopyElements(Handle<JSReceiver> source,
- Handle<JSObject> destination, size_t length) = 0;
+ Handle<JSObject> destination, size_t length,
+ uint32_t offset = 0) = 0;
- virtual Handle<FixedArray> CreateListFromArray(Isolate* isolate,
- Handle<JSArray> array) = 0;
+ virtual Handle<FixedArray> CreateListFromArrayLike(Isolate* isolate,
+ Handle<JSObject> object,
+ uint32_t length) = 0;
protected:
friend class LookupIterator;
@@ -231,6 +237,17 @@ MUST_USE_RESULT MaybeHandle<Object> ArrayConstructInitializeElements(
Handle<JSArray> array,
Arguments* args);
+// Called directly from CSA.
+class JSTypedArray;
+void CopyFastNumberJSArrayElementsToTypedArray(Context* context,
+ JSArray* source,
+ JSTypedArray* destination,
+ uintptr_t length,
+ uintptr_t offset);
+void CopyTypedArrayElementsToTypedArray(JSTypedArray* source,
+ JSTypedArray* destination,
+ uintptr_t length, uintptr_t offset);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 17e63ff83b..d560512e07 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -4,6 +4,7 @@
#include "src/execution.h"
+#include "src/api.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
@@ -466,32 +467,73 @@ Object* StackGuard::HandleInterrupts() {
isolate_->heap()->MonotonicallyIncreasingTimeInMs();
}
+ bool any_interrupt_handled = false;
+ if (FLAG_trace_interrupts) {
+ PrintF("[Handling interrupts: ");
+ }
+
if (CheckAndClearInterrupt(GC_REQUEST)) {
+ if (FLAG_trace_interrupts) {
+ PrintF("GC_REQUEST");
+ any_interrupt_handled = true;
+ }
isolate_->heap()->HandleGCRequest();
}
if (CheckDebugBreak()) {
+ if (FLAG_trace_interrupts) {
+ if (any_interrupt_handled) PrintF(", ");
+ PrintF("DEBUG_BREAK");
+ any_interrupt_handled = true;
+ }
isolate_->debug()->HandleDebugBreak(kIgnoreIfTopFrameBlackboxed);
}
if (CheckAndClearInterrupt(TERMINATE_EXECUTION)) {
+ if (FLAG_trace_interrupts) {
+ if (any_interrupt_handled) PrintF(", ");
+ PrintF("TERMINATE_EXECUTION");
+ any_interrupt_handled = true;
+ }
return isolate_->TerminateExecution();
}
if (CheckAndClearInterrupt(DEOPT_MARKED_ALLOCATION_SITES)) {
+ if (FLAG_trace_interrupts) {
+ if (any_interrupt_handled) PrintF(", ");
+ PrintF("DEOPT_MARKED_ALLOCATION_SITES");
+ any_interrupt_handled = true;
+ }
isolate_->heap()->DeoptMarkedAllocationSites();
}
if (CheckAndClearInterrupt(INSTALL_CODE)) {
+ if (FLAG_trace_interrupts) {
+ if (any_interrupt_handled) PrintF(", ");
+ PrintF("INSTALL_CODE");
+ any_interrupt_handled = true;
+ }
DCHECK(isolate_->concurrent_recompilation_enabled());
isolate_->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
}
if (CheckAndClearInterrupt(API_INTERRUPT)) {
+ if (FLAG_trace_interrupts) {
+ if (any_interrupt_handled) PrintF(", ");
+ PrintF("API_INTERRUPT");
+ any_interrupt_handled = true;
+ }
// Callbacks must be invoked outside of ExecusionAccess lock.
isolate_->InvokeApiInterruptCallbacks();
}
+ if (FLAG_trace_interrupts) {
+ if (!any_interrupt_handled) {
+ PrintF("No interrupt flags set");
+ }
+ PrintF("]\n");
+ }
+
isolate_->counters()->stack_interrupts()->Increment();
isolate_->counters()->runtime_profiler_ticks()->Increment();
isolate_->runtime_profiler()->MarkCandidatesForOptimization();
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index 18edc4289b..eeb668a25f 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -7,9 +7,8 @@
#include "src/accessors.h"
#include "src/assembler.h"
#include "src/counters.h"
-#include "src/deoptimizer.h"
#include "src/ic/stub-cache.h"
-#include "src/objects-inl.h"
+#include "src/trap-handler/trap-handler.h"
#if defined(DEBUG) && defined(V8_OS_LINUX) && !defined(V8_OS_ANDROID)
#define SYMBOLIZE_FUNCTION
@@ -60,6 +59,10 @@ const char* ExternalReferenceTable::ResolveSymbol(void* address) {
#endif // SYMBOLIZE_FUNCTION
}
+void ExternalReferenceTable::Add(Address address, const char* name) {
+ refs_.emplace_back(address, name);
+}
+
void ExternalReferenceTable::AddReferences(Isolate* isolate) {
// Miscellaneous
Add(ExternalReference::roots_array_start(isolate).address(),
@@ -89,6 +92,7 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
Add(ExternalReference::address_of_one_half().address(),
"LDoubleConstant::one_half");
Add(ExternalReference::isolate_address(isolate).address(), "isolate");
+ Add(ExternalReference::builtins_address(isolate).address(), "builtins");
Add(ExternalReference::interpreter_dispatch_table_address(isolate).address(),
"Interpreter::dispatch_table_address");
Add(ExternalReference::bytecode_size_table_address(isolate).address(),
@@ -212,6 +216,15 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"wasm::word32_popcnt");
Add(ExternalReference::wasm_word64_popcnt(isolate).address(),
"wasm::word64_popcnt");
+ // If the trap handler is not supported, the optimizer will remove these
+ // runtime functions. In this case, the arm simulator will break if we add
+ // them to the external reference table.
+#ifdef V8_TARGET_ARCH_X64
+ Add(ExternalReference::wasm_set_thread_in_wasm_flag(isolate).address(),
+ "wasm::set_thread_in_wasm_flag");
+ Add(ExternalReference::wasm_clear_thread_in_wasm_flag(isolate).address(),
+ "wasm::clear_thread_in_wasm_flag");
+#endif
Add(ExternalReference::f64_acos_wrapper_function(isolate).address(),
"f64_acos_wrapper");
Add(ExternalReference::f64_asin_wrapper_function(isolate).address(),
@@ -255,6 +268,13 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"orderedhashmap_gethash_raw");
Add(ExternalReference::get_or_create_hash_raw(isolate).address(),
"get_or_create_hash_raw");
+ Add(ExternalReference::copy_fast_number_jsarray_elements_to_typed_array(
+ isolate)
+ .address(),
+ "copy_fast_number_jsarray_elements_to_typed_array");
+ Add(ExternalReference::copy_typed_array_elements_to_typed_array(isolate)
+ .address(),
+ "copy_typed_array_elements_to_typed_array");
Add(ExternalReference::log_enter_external_function(isolate).address(),
"Logger::EnterExternal");
Add(ExternalReference::log_leave_external_function(isolate).address(),
diff --git a/deps/v8/src/external-reference-table.h b/deps/v8/src/external-reference-table.h
index 3b0c6cd899..b2275049a0 100644
--- a/deps/v8/src/external-reference-table.h
+++ b/deps/v8/src/external-reference-table.h
@@ -22,7 +22,7 @@ class ExternalReferenceTable {
public:
static ExternalReferenceTable* instance(Isolate* isolate);
- uint32_t size() const { return static_cast<uint32_t>(refs_.length()); }
+ uint32_t size() const { return static_cast<uint32_t>(refs_.size()); }
Address address(uint32_t i) { return refs_[i].address; }
const char* name(uint32_t i) { return refs_[i].name; }
@@ -32,14 +32,14 @@ class ExternalReferenceTable {
struct ExternalReferenceEntry {
Address address;
const char* name;
+
+ ExternalReferenceEntry(Address address, const char* name)
+ : address(address), name(name) {}
};
explicit ExternalReferenceTable(Isolate* isolate);
- void Add(Address address, const char* name) {
- ExternalReferenceEntry entry = {address, name};
- refs_.Add(entry);
- }
+ void Add(Address address, const char* name);
void AddReferences(Isolate* isolate);
void AddBuiltins(Isolate* isolate);
@@ -48,7 +48,7 @@ class ExternalReferenceTable {
void AddAccessors(Isolate* isolate);
void AddStubCache(Isolate* isolate);
- List<ExternalReferenceEntry> refs_;
+ std::vector<ExternalReferenceEntry> refs_;
DISALLOW_COPY_AND_ASSIGN(ExternalReferenceTable);
};
diff --git a/deps/v8/src/factory-inl.h b/deps/v8/src/factory-inl.h
new file mode 100644
index 0000000000..a3c7a48e6e
--- /dev/null
+++ b/deps/v8/src/factory-inl.h
@@ -0,0 +1,137 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_FACTORY_INL_H_
+#define V8_FACTORY_INL_H_
+
+#include "src/factory.h"
+
+#include "src/handles-inl.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+#define ROOT_ACCESSOR(type, name, camel_name) \
+ Handle<type> Factory::name() { \
+ return Handle<type>(bit_cast<type**>( \
+ &isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \
+ }
+ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
+ Handle<Map> Factory::name##_map() { \
+ return Handle<Map>(bit_cast<Map**>( \
+ &isolate()->heap()->roots_[Heap::k##Name##MapRootIndex])); \
+ }
+STRUCT_LIST(STRUCT_MAP_ACCESSOR)
+#undef STRUCT_MAP_ACCESSOR
+
+#define STRING_ACCESSOR(name, str) \
+ Handle<String> Factory::name() { \
+ return Handle<String>(bit_cast<String**>( \
+ &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
+ }
+INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
+#undef STRING_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name) \
+ Handle<Symbol> Factory::name() { \
+ return Handle<Symbol>(bit_cast<Symbol**>( \
+ &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
+ }
+PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name, description) \
+ Handle<Symbol> Factory::name() { \
+ return Handle<Symbol>(bit_cast<Symbol**>( \
+ &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
+ }
+PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
+WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+Handle<String> Factory::InternalizeString(Handle<String> string) {
+ if (string->IsInternalizedString()) return string;
+ return StringTable::LookupString(isolate(), string);
+}
+
+Handle<Name> Factory::InternalizeName(Handle<Name> name) {
+ if (name->IsUniqueName()) return name;
+ return StringTable::LookupString(isolate(), Handle<String>::cast(name));
+}
+
+Handle<String> Factory::NewSubString(Handle<String> str, int begin, int end) {
+ if (begin == 0 && end == str->length()) return str;
+ return NewProperSubString(str, begin, end);
+}
+
+Handle<Object> Factory::NewNumberFromSize(size_t value,
+ PretenureFlag pretenure) {
+ // We can't use Smi::IsValid() here because that operates on a signed
+ // intptr_t, and casting from size_t could create a bogus sign bit.
+ if (value <= static_cast<size_t>(Smi::kMaxValue)) {
+ return Handle<Object>(Smi::FromIntptr(static_cast<intptr_t>(value)),
+ isolate());
+ }
+ return NewNumber(static_cast<double>(value), pretenure);
+}
+
+Handle<Object> Factory::NewNumberFromInt64(int64_t value,
+ PretenureFlag pretenure) {
+ if (value <= std::numeric_limits<int32_t>::max() &&
+ value >= std::numeric_limits<int32_t>::min() &&
+ Smi::IsValid(static_cast<int32_t>(value))) {
+ return Handle<Object>(Smi::FromInt(static_cast<int32_t>(value)), isolate());
+ }
+ return NewNumber(static_cast<double>(value), pretenure);
+}
+
+Handle<HeapNumber> Factory::NewHeapNumber(double value, MutableMode mode,
+ PretenureFlag pretenure) {
+ Handle<HeapNumber> heap_number = NewHeapNumber(mode, pretenure);
+ heap_number->set_value(value);
+ return heap_number;
+}
+
+Handle<HeapNumber> Factory::NewHeapNumberFromBits(uint64_t bits,
+ MutableMode mode,
+ PretenureFlag pretenure) {
+ Handle<HeapNumber> heap_number = NewHeapNumber(mode, pretenure);
+ heap_number->set_value_as_bits(bits);
+ return heap_number;
+}
+
+Handle<HeapNumber> Factory::NewMutableHeapNumber(PretenureFlag pretenure) {
+ return NewHeapNumberFromBits(kHoleNanInt64, MUTABLE, pretenure);
+}
+
+Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
+ ElementsKind elements_kind,
+ PretenureFlag pretenure) {
+ return NewJSArrayWithElements(elements, elements_kind, elements->length(),
+ pretenure);
+}
+
+Handle<Object> Factory::NewURIError() {
+ return NewError(isolate()->uri_error_function(),
+ MessageTemplate::kURIMalformed);
+}
+
+Handle<String> Factory::Uint32ToString(uint32_t value) {
+ Handle<String> result = NumberToString(NewNumberFromUint(value));
+
+ if (result->length() <= String::kMaxArrayIndexSize) {
+ uint32_t field = StringHasher::MakeArrayIndexHash(value, result->length());
+ result->set_hash_field(field);
+ }
+ return result;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_FACTORY_INL_H_
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 27a0541bb4..7710b0c788 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -14,10 +14,13 @@
#include "src/conversions.h"
#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
+#include "src/objects/bigint-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/module.h"
#include "src/objects/scope-info.h"
+#include "src/unicode-cache.h"
+#include "src/unicode-decoder.h"
namespace v8 {
namespace internal {
@@ -94,7 +97,7 @@ Handle<HeapObject> Factory::NewFillerObject(int size,
Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
Handle<PrototypeInfo> result =
- Handle<PrototypeInfo>::cast(NewStruct(PROTOTYPE_INFO_TYPE));
+ Handle<PrototypeInfo>::cast(NewStruct(PROTOTYPE_INFO_TYPE, TENURED));
result->set_prototype_users(WeakFixedArray::Empty());
result->set_registry_slot(PrototypeInfo::UNREGISTERED);
result->set_validity_cell(Smi::kZero);
@@ -102,17 +105,25 @@ Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
return result;
}
-Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1,
- Handle<Object> value2) {
- Handle<Tuple2> result = Handle<Tuple2>::cast(NewStruct(TUPLE2_TYPE));
+Handle<EnumCache> Factory::NewEnumCache(Handle<FixedArray> keys,
+ Handle<FixedArray> indices) {
+ return Handle<EnumCache>::cast(NewTuple2(keys, indices, TENURED));
+}
+
+Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1, Handle<Object> value2,
+ PretenureFlag pretenure) {
+ Handle<Tuple2> result =
+ Handle<Tuple2>::cast(NewStruct(TUPLE2_TYPE, pretenure));
result->set_value1(*value1);
result->set_value2(*value2);
return result;
}
Handle<Tuple3> Factory::NewTuple3(Handle<Object> value1, Handle<Object> value2,
- Handle<Object> value3) {
- Handle<Tuple3> result = Handle<Tuple3>::cast(NewStruct(TUPLE3_TYPE));
+ Handle<Object> value3,
+ PretenureFlag pretenure) {
+ Handle<Tuple3> result =
+ Handle<Tuple3>::cast(NewStruct(TUPLE3_TYPE, pretenure));
result->set_value1(*value1);
result->set_value2(*value2);
result->set_value3(*value3);
@@ -121,8 +132,8 @@ Handle<Tuple3> Factory::NewTuple3(Handle<Object> value1, Handle<Object> value2,
Handle<ContextExtension> Factory::NewContextExtension(
Handle<ScopeInfo> scope_info, Handle<Object> extension) {
- Handle<ContextExtension> result =
- Handle<ContextExtension>::cast(NewStruct(CONTEXT_EXTENSION_TYPE));
+ Handle<ContextExtension> result = Handle<ContextExtension>::cast(
+ NewStruct(CONTEXT_EXTENSION_TYPE, TENURED));
result->set_scope_info(*scope_info);
result->set_extension(*extension);
return result;
@@ -131,12 +142,25 @@ Handle<ContextExtension> Factory::NewContextExtension(
Handle<ConstantElementsPair> Factory::NewConstantElementsPair(
ElementsKind elements_kind, Handle<FixedArrayBase> constant_values) {
Handle<ConstantElementsPair> result =
- Handle<ConstantElementsPair>::cast(NewStruct(TUPLE2_TYPE));
+ Handle<ConstantElementsPair>::cast(NewStruct(TUPLE2_TYPE, TENURED));
result->set_elements_kind(elements_kind);
result->set_constant_values(*constant_values);
return result;
}
+Handle<TemplateObjectDescription> Factory::NewTemplateObjectDescription(
+ int hash, Handle<FixedArray> raw_strings,
+ Handle<FixedArray> cooked_strings) {
+ DCHECK_EQ(raw_strings->length(), cooked_strings->length());
+ DCHECK_LT(0, raw_strings->length());
+ Handle<TemplateObjectDescription> result =
+ Handle<TemplateObjectDescription>::cast(NewStruct(TUPLE3_TYPE, TENURED));
+ result->set_hash(hash);
+ result->set_raw_strings(*raw_strings);
+ result->set_cooked_strings(*cooked_strings);
+ return result;
+}
+
Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number,
const char* type_of, byte kind) {
@@ -157,6 +181,7 @@ Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
Handle<PropertyArray> Factory::NewPropertyArray(int size,
PretenureFlag pretenure) {
DCHECK_LE(0, size);
+ if (size == 0) return empty_property_array();
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocatePropertyArray(size, pretenure),
PropertyArray);
@@ -288,7 +313,7 @@ Handle<OrderedHashMap> Factory::NewOrderedHashMap() {
Handle<AccessorPair> Factory::NewAccessorPair() {
Handle<AccessorPair> accessors =
- Handle<AccessorPair>::cast(NewStruct(ACCESSOR_PAIR_TYPE));
+ Handle<AccessorPair>::cast(NewStruct(ACCESSOR_PAIR_TYPE, TENURED));
accessors->set_getter(*null_value(), SKIP_WRITE_BARRIER);
accessors->set_setter(*null_value(), SKIP_WRITE_BARRIER);
return accessors;
@@ -297,7 +322,7 @@ Handle<AccessorPair> Factory::NewAccessorPair() {
Handle<TypeFeedbackInfo> Factory::NewTypeFeedbackInfo() {
Handle<TypeFeedbackInfo> info =
- Handle<TypeFeedbackInfo>::cast(NewStruct(TUPLE3_TYPE));
+ Handle<TypeFeedbackInfo>::cast(NewStruct(TUPLE3_TYPE, TENURED));
info->initialize_storage();
return info;
}
@@ -811,7 +836,7 @@ Handle<String> Factory::NewProperSubString(Handle<String> str,
return MakeOrFindTwoCharacterString(isolate(), c1, c2);
}
- if (!FLAG_string_slices || length < SlicedString::kMinLength) {
+ if (length < SlicedString::kMinLength) {
if (str->IsOneByteRepresentation()) {
Handle<SeqOneByteString> result =
NewRawOneByteString(length).ToHandleChecked();
@@ -1104,17 +1129,15 @@ Handle<Context> Factory::NewBlockContext(Handle<JSFunction> function,
return context;
}
-Handle<Struct> Factory::NewStruct(InstanceType type) {
+Handle<Struct> Factory::NewStruct(InstanceType type, PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateStruct(type),
- Struct);
+ isolate(), isolate()->heap()->AllocateStruct(type, pretenure), Struct);
}
Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
int aliased_context_slot) {
Handle<AliasedArgumentsEntry> entry = Handle<AliasedArgumentsEntry>::cast(
- NewStruct(ALIASED_ARGUMENTS_ENTRY_TYPE));
+ NewStruct(ALIASED_ARGUMENTS_ENTRY_TYPE, NOT_TENURED));
entry->set_aliased_context_slot(aliased_context_slot);
return entry;
}
@@ -1122,7 +1145,7 @@ Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
Handle<AccessorInfo> Factory::NewAccessorInfo() {
Handle<AccessorInfo> info =
- Handle<AccessorInfo>::cast(NewStruct(ACCESSOR_INFO_TYPE));
+ Handle<AccessorInfo>::cast(NewStruct(ACCESSOR_INFO_TYPE, TENURED));
info->set_flag(0); // Must clear the flag, it was initialized as undefined.
info->set_is_sloppy(true);
return info;
@@ -1132,7 +1155,7 @@ Handle<AccessorInfo> Factory::NewAccessorInfo() {
Handle<Script> Factory::NewScript(Handle<String> source) {
// Create and initialize script object.
Heap* heap = isolate()->heap();
- Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE));
+ Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE, TENURED));
script->set_source(*source);
script->set_name(heap->undefined_value());
script->set_id(isolate()->heap()->NextScriptId());
@@ -1309,6 +1332,7 @@ Handle<FixedArray> Factory::CopyFixedArrayAndGrow(Handle<FixedArray> array,
Handle<PropertyArray> Factory::CopyPropertyArrayAndGrow(
Handle<PropertyArray> array, int grow_by, PretenureFlag pretenure) {
+ DCHECK_LE(0, grow_by);
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->CopyArrayAndGrow(*array, grow_by, pretenure),
@@ -1389,6 +1413,34 @@ Handle<HeapNumber> Factory::NewHeapNumber(MutableMode mode,
HeapNumber);
}
+Handle<BigInt> Factory::NewBigInt(int length, PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateBigInt(length, true, pretenure),
+ BigInt);
+}
+
+Handle<BigInt> Factory::NewBigIntRaw(int length, PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(
+ isolate(), isolate()->heap()->AllocateBigInt(length, false, pretenure),
+ BigInt);
+}
+
+Handle<BigInt> Factory::NewBigIntFromInt(int value, PretenureFlag pretenure) {
+ if (value == 0) return NewBigInt(0);
+ Handle<BigInt> result = NewBigIntRaw(1);
+ if (value > 0) {
+ result->set_digit(0, value);
+ } else if (value == kMinInt) {
+ STATIC_ASSERT(kMinInt == -kMaxInt - 1);
+ result->set_digit(0, static_cast<BigInt::digit_t>(kMaxInt) + 1);
+ result->set_sign(true);
+ } else {
+ result->set_digit(0, -value);
+ result->set_sign(true);
+ }
+ return result;
+}
+
Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
MessageTemplate::Template template_index,
Handle<Object> arg0, Handle<Object> arg1,
@@ -1482,7 +1534,6 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
function->set_context(*context_or_undefined);
function->set_prototype_or_initial_map(*the_hole_value());
function->set_feedback_vector_cell(*undefined_cell());
- function->set_next_function_link(*undefined_value(), SKIP_WRITE_BARRIER);
isolate()->heap()->InitializeJSObjectBody(*function, *map, JSFunction::kSize);
return function;
}
@@ -1572,7 +1623,7 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name, Handle<Code> code,
NewFunction(name, code, prototype, language_mode, prototype_mutability);
ElementsKind elements_kind =
- type == JS_ARRAY_TYPE ? PACKED_SMI_ELEMENTS : HOLEY_SMI_ELEMENTS;
+ type == JS_ARRAY_TYPE ? PACKED_SMI_ELEMENTS : TERMINAL_FAST_ELEMENTS_KIND;
Handle<Map> initial_map = NewMap(type, instance_size, elements_kind);
// TODO(littledan): Why do we have this is_generator test when
// NewFunctionPrototype already handles finding an appropriately
@@ -1709,8 +1760,8 @@ Handle<ModuleInfo> Factory::NewModuleInfo() {
Handle<PreParsedScopeData> Factory::NewPreParsedScopeData() {
Handle<PreParsedScopeData> result =
- Handle<PreParsedScopeData>::cast(NewStruct(TUPLE2_TYPE));
- result->set_scope_data(PodArray<uint32_t>::cast(*empty_byte_array()));
+ Handle<PreParsedScopeData>::cast(NewStruct(TUPLE2_TYPE, TENURED));
+ result->set_scope_data(PodArray<uint8_t>::cast(*empty_byte_array()));
result->set_child_data(*empty_fixed_array());
return result;
}
@@ -1729,9 +1780,8 @@ Handle<Code> Factory::NewCodeRaw(int object_size, bool immovable) {
Code);
}
-Handle<Code> Factory::NewCode(const CodeDesc& desc, Code::Flags flags,
- Handle<Object> self_ref, bool immovable,
- int prologue_offset) {
+Handle<Code> Factory::NewCode(const CodeDesc& desc, Code::Kind kind,
+ Handle<Object> self_ref, bool immovable) {
Handle<ByteArray> reloc_info = NewByteArray(desc.reloc_size, TENURED);
bool has_unwinding_info = desc.unwinding_info != nullptr;
@@ -1758,7 +1808,7 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc, Code::Flags flags,
DisallowHeapAllocation no_gc;
code->set_instruction_size(desc.instr_size);
code->set_relocation_info(*reloc_info);
- code->set_flags(flags);
+ code->initialize_flags(kind);
code->set_has_unwinding_info(has_unwinding_info);
code->set_raw_kind_specific_flags1(0);
code->set_raw_kind_specific_flags2(0);
@@ -1768,7 +1818,6 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc, Code::Flags flags,
code->set_next_code_link(*undefined_value(), SKIP_WRITE_BARRIER);
code->set_handler_table(*empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_source_position_table(*empty_byte_array(), SKIP_WRITE_BARRIER);
- code->set_prologue_offset(prologue_offset);
code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
code->set_builtin_index(-1);
code->set_trap_handler_index(Smi::FromInt(-1));
@@ -1804,6 +1853,10 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc, Code::Flags flags,
return code;
}
+Handle<Code> Factory::NewCodeForDeserialization(uint32_t size) {
+ const bool kNotImmovable = false;
+ return NewCodeRaw(size, kNotImmovable);
+}
Handle<Code> Factory::CopyCode(Handle<Code> code) {
CALL_HEAP_FUNCTION(isolate(),
@@ -2031,7 +2084,7 @@ Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
requested_modules_length > 0 ? NewFixedArray(requested_modules_length)
: empty_fixed_array();
- Handle<Module> module = Handle<Module>::cast(NewStruct(MODULE_TYPE));
+ Handle<Module> module = Handle<Module>::cast(NewStruct(MODULE_TYPE, TENURED));
module->set_code(*code);
module->set_exports(*exports);
module->set_regular_exports(*regular_exports);
@@ -2516,11 +2569,15 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_raw_name(has_shared_name
? *shared_name
: SharedFunctionInfo::kNoSharedNameSentinel);
- share->set_function_data(*undefined_value(), SKIP_WRITE_BARRIER);
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {
code = BUILTIN_CODE(isolate(), Illegal);
}
+ Object* function_data =
+ (code->is_builtin() && Builtins::IsLazy(code->builtin_index()))
+ ? Smi::FromInt(code->builtin_index())
+ : Object::cast(*undefined_value());
+ share->set_function_data(function_data, SKIP_WRITE_BARRIER);
share->set_code(*code);
share->set_scope_info(ScopeInfo::Empty(isolate()));
share->set_outer_scope_info(*the_hole_value());
@@ -2640,7 +2697,7 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
Heap* heap = isolate()->heap();
Handle<DebugInfo> debug_info =
- Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE));
+ Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE, TENURED));
debug_info->set_flags(DebugInfo::kNone);
debug_info->set_shared(*shared);
debug_info->set_debugger_hints(shared->debugger_hints());
@@ -2671,7 +2728,7 @@ Handle<CoverageInfo> Factory::NewCoverageInfo(
Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
Handle<BreakPointInfo> new_break_point_info =
- Handle<BreakPointInfo>::cast(NewStruct(TUPLE2_TYPE));
+ Handle<BreakPointInfo>::cast(NewStruct(TUPLE2_TYPE, TENURED));
new_break_point_info->set_source_position(source_position);
new_break_point_info->set_break_point_objects(*undefined_value());
return new_break_point_info;
@@ -2679,15 +2736,15 @@ Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
Handle<BreakPoint> Factory::NewBreakPoint(int id, Handle<String> condition) {
Handle<BreakPoint> new_break_point =
- Handle<BreakPoint>::cast(NewStruct(TUPLE2_TYPE));
+ Handle<BreakPoint>::cast(NewStruct(TUPLE2_TYPE, TENURED));
new_break_point->set_id(id);
new_break_point->set_condition(*condition);
return new_break_point;
}
Handle<StackFrameInfo> Factory::NewStackFrameInfo() {
- Handle<StackFrameInfo> stack_frame_info =
- Handle<StackFrameInfo>::cast(NewStruct(STACK_FRAME_INFO_TYPE));
+ Handle<StackFrameInfo> stack_frame_info = Handle<StackFrameInfo>::cast(
+ NewStruct(STACK_FRAME_INFO_TYPE, NOT_TENURED));
stack_frame_info->set_line_number(0);
stack_frame_info->set_column_number(0);
stack_frame_info->set_script_id(0);
@@ -2705,7 +2762,7 @@ Factory::NewSourcePositionTableWithFrameCache(
Handle<SourcePositionTableWithFrameCache>
source_position_table_with_frame_cache =
Handle<SourcePositionTableWithFrameCache>::cast(
- NewStruct(TUPLE2_TYPE));
+ NewStruct(TUPLE2_TYPE, TENURED));
source_position_table_with_frame_cache->set_source_position_table(
*source_position_table);
source_position_table_with_frame_cache->set_stack_frame_cache(
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 6e2a25fc1f..3fe2a79d86 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -18,7 +18,9 @@
namespace v8 {
namespace internal {
+// Forward declarations.
class AliasedArgumentsEntry;
+class BigInt;
class BreakPointInfo;
class BreakPoint;
class BoilerplateDescription;
@@ -28,6 +30,7 @@ class DebugInfo;
class JSModuleNamespace;
struct SourceRange;
class PreParsedScopeData;
+class TemplateObjectDescription;
enum FunctionMode {
kWithNameBit = 1 << 0,
@@ -126,12 +129,17 @@ class V8_EXPORT_PRIVATE Factory final {
// Create a new PrototypeInfo struct.
Handle<PrototypeInfo> NewPrototypeInfo();
+ // Create a new EnumCache struct.
+ Handle<EnumCache> NewEnumCache(Handle<FixedArray> keys,
+ Handle<FixedArray> indices);
+
// Create a new Tuple2 struct.
- Handle<Tuple2> NewTuple2(Handle<Object> value1, Handle<Object> value2);
+ Handle<Tuple2> NewTuple2(Handle<Object> value1, Handle<Object> value2,
+ PretenureFlag pretenure);
// Create a new Tuple3 struct.
Handle<Tuple3> NewTuple3(Handle<Object> value1, Handle<Object> value2,
- Handle<Object> value3);
+ Handle<Object> value3, PretenureFlag pretenure);
// Create a new ContextExtension struct.
Handle<ContextExtension> NewContextExtension(Handle<ScopeInfo> scope_info,
@@ -141,6 +149,11 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<ConstantElementsPair> NewConstantElementsPair(
ElementsKind elements_kind, Handle<FixedArrayBase> constant_values);
+ // Create a new TemplateObjectDescription struct.
+ Handle<TemplateObjectDescription> NewTemplateObjectDescription(
+ int hash, Handle<FixedArray> raw_strings,
+ Handle<FixedArray> cooked_strings);
+
// Create a pre-tenured empty AccessorPair.
Handle<AccessorPair> NewAccessorPair();
@@ -164,15 +177,9 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<String> InternalizeStringWithKey(StringTableKey* key);
// Internalized strings are created in the old generation (data space).
- Handle<String> InternalizeString(Handle<String> string) {
- if (string->IsInternalizedString()) return string;
- return StringTable::LookupString(isolate(), string);
- }
+ inline Handle<String> InternalizeString(Handle<String> string);
- Handle<Name> InternalizeName(Handle<Name> name) {
- if (name->IsUniqueName()) return name;
- return StringTable::LookupString(isolate(), Handle<String>::cast(name));
- }
+ inline Handle<Name> InternalizeName(Handle<Name> name);
// String creation functions. Most of the string creation functions take
// a Heap::PretenureFlag argument to optionally request that they be
@@ -289,10 +296,7 @@ class V8_EXPORT_PRIVATE Factory final {
int end);
// Create a new string object which holds a substring of a string.
- Handle<String> NewSubString(Handle<String> str, int begin, int end) {
- if (begin == 0 && end == str->length()) return str;
- return NewProperSubString(str, begin, end);
- }
+ inline Handle<String> NewSubString(Handle<String> str, int begin, int end);
// Creates a new external String object. There are two String encodings
// in the system: one-byte and two-byte. Unlike other String types, it does
@@ -355,9 +359,8 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<Context> previous,
Handle<ScopeInfo> scope_info);
- // Allocate a new struct. The struct is pretenured (allocated directly in
- // the old generation).
- Handle<Struct> NewStruct(InstanceType type);
+ Handle<Struct> NewStruct(InstanceType type,
+ PretenureFlag pretenure = NOT_TENURED);
Handle<AliasedArgumentsEntry> NewAliasedArgumentsEntry(
int aliased_context_slot);
@@ -460,49 +463,32 @@ class V8_EXPORT_PRIVATE Factory final {
PretenureFlag pretenure = NOT_TENURED);
Handle<Object> NewNumberFromUint(uint32_t value,
PretenureFlag pretenure = NOT_TENURED);
- Handle<Object> NewNumberFromSize(size_t value,
- PretenureFlag pretenure = NOT_TENURED) {
- // We can't use Smi::IsValid() here because that operates on a signed
- // intptr_t, and casting from size_t could create a bogus sign bit.
- if (value <= static_cast<size_t>(Smi::kMaxValue)) {
- return Handle<Object>(Smi::FromIntptr(static_cast<intptr_t>(value)),
- isolate());
- }
- return NewNumber(static_cast<double>(value), pretenure);
- }
- Handle<Object> NewNumberFromInt64(int64_t value,
- PretenureFlag pretenure = NOT_TENURED) {
- if (value <= std::numeric_limits<int32_t>::max() &&
- value >= std::numeric_limits<int32_t>::min() &&
- Smi::IsValid(static_cast<int32_t>(value))) {
- return Handle<Object>(Smi::FromInt(static_cast<int32_t>(value)),
- isolate());
- }
- return NewNumber(static_cast<double>(value), pretenure);
- }
- Handle<HeapNumber> NewHeapNumber(double value, MutableMode mode = IMMUTABLE,
- PretenureFlag pretenure = NOT_TENURED) {
- Handle<HeapNumber> heap_number = NewHeapNumber(mode, pretenure);
- heap_number->set_value(value);
- return heap_number;
- }
- Handle<HeapNumber> NewHeapNumberFromBits(
+ inline Handle<Object> NewNumberFromSize(
+ size_t value, PretenureFlag pretenure = NOT_TENURED);
+ inline Handle<Object> NewNumberFromInt64(
+ int64_t value, PretenureFlag pretenure = NOT_TENURED);
+ inline Handle<HeapNumber> NewHeapNumber(
+ double value, MutableMode mode = IMMUTABLE,
+ PretenureFlag pretenure = NOT_TENURED);
+ inline Handle<HeapNumber> NewHeapNumberFromBits(
uint64_t bits, MutableMode mode = IMMUTABLE,
- PretenureFlag pretenure = NOT_TENURED) {
- Handle<HeapNumber> heap_number = NewHeapNumber(mode, pretenure);
- heap_number->set_value_as_bits(bits);
- return heap_number;
- }
+ PretenureFlag pretenure = NOT_TENURED);
// Creates mutable heap number object with value field set to hole NaN.
- Handle<HeapNumber> NewMutableHeapNumber(
- PretenureFlag pretenure = NOT_TENURED) {
- return NewHeapNumberFromBits(kHoleNanInt64, MUTABLE, pretenure);
- }
+ inline Handle<HeapNumber> NewMutableHeapNumber(
+ PretenureFlag pretenure = NOT_TENURED);
// Creates heap number object with not yet set value field.
Handle<HeapNumber> NewHeapNumber(MutableMode mode,
PretenureFlag pretenure = NOT_TENURED);
+ // Allocates a new BigInt with {length} digits and zero-initializes them.
+ Handle<BigInt> NewBigInt(int length, PretenureFlag pretenure = NOT_TENURED);
+ // Initializes length and sign fields, but leaves digits uninitialized.
+ Handle<BigInt> NewBigIntRaw(int length,
+ PretenureFlag pretenure = NOT_TENURED);
+ Handle<BigInt> NewBigIntFromInt(int value,
+ PretenureFlag pretenure = NOT_TENURED);
+
Handle<JSWeakMap> NewJSWeakMap();
Handle<JSObject> NewArgumentsObject(Handle<JSFunction> callee, int length);
@@ -553,13 +539,10 @@ class V8_EXPORT_PRIVATE Factory final {
ElementsKind elements_kind, int length,
PretenureFlag pretenure = NOT_TENURED);
- Handle<JSArray> NewJSArrayWithElements(
+ inline Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArrayBase> elements,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
- PretenureFlag pretenure = NOT_TENURED) {
- return NewJSArrayWithElements(elements, elements_kind, elements->length(),
- pretenure);
- }
+ PretenureFlag pretenure = NOT_TENURED);
void NewJSArrayStorage(
Handle<JSArray> array,
@@ -689,9 +672,12 @@ class V8_EXPORT_PRIVATE Factory final {
// The reference to the Code object is stored in self_reference.
// This allows generated code to reference its own Code object
// by containing this handle.
- Handle<Code> NewCode(const CodeDesc& desc, Code::Flags flags,
- Handle<Object> self_reference, bool immovable = false,
- int prologue_offset = Code::kPrologueOffsetNotSet);
+ Handle<Code> NewCode(const CodeDesc& desc, Code::Kind kind,
+ Handle<Object> self_reference, bool immovable = false);
+
+ // Allocates a new, empty code object for use by builtin deserialization. The
+ // given {size} argument specifies the size of the entire code object.
+ Handle<Code> NewCodeForDeserialization(uint32_t size);
Handle<Code> CopyCode(Handle<Code> code);
@@ -703,10 +689,7 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<Object> NewInvalidStringLengthError();
- Handle<Object> NewURIError() {
- return NewError(isolate()->uri_error_function(),
- MessageTemplate::kURIMalformed);
- }
+ inline Handle<Object> NewURIError();
Handle<Object> NewError(Handle<JSFunction> constructor,
MessageTemplate::Template template_index,
@@ -733,56 +716,27 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<String> NumberToString(Handle<Object> number,
bool check_number_string_cache = true);
- Handle<String> Uint32ToString(uint32_t value) {
- Handle<String> result = NumberToString(NewNumberFromUint(value));
-
- if (result->length() <= String::kMaxArrayIndexSize) {
- uint32_t field =
- StringHasher::MakeArrayIndexHash(value, result->length());
- result->set_hash_field(field);
- }
- return result;
- }
+ inline Handle<String> Uint32ToString(uint32_t value);
Handle<JSFunction> InstallMembers(Handle<JSFunction> function);
-#define ROOT_ACCESSOR(type, name, camel_name) \
- inline Handle<type> name() { \
- return Handle<type>(bit_cast<type**>( \
- &isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \
- }
+#define ROOT_ACCESSOR(type, name, camel_name) inline Handle<type> name();
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
-#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
- inline Handle<Map> name##_map() { \
- return Handle<Map>(bit_cast<Map**>( \
- &isolate()->heap()->roots_[Heap::k##Name##MapRootIndex])); \
- }
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Handle<Map> name##_map();
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
-#define STRING_ACCESSOR(name, str) \
- inline Handle<String> name() { \
- return Handle<String>(bit_cast<String**>( \
- &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
- }
+#define STRING_ACCESSOR(name, str) inline Handle<String> name();
INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
#undef STRING_ACCESSOR
-#define SYMBOL_ACCESSOR(name) \
- inline Handle<Symbol> name() { \
- return Handle<Symbol>(bit_cast<Symbol**>( \
- &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
- }
+#define SYMBOL_ACCESSOR(name) inline Handle<Symbol> name();
PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
-#define SYMBOL_ACCESSOR(name, description) \
- inline Handle<Symbol> name() { \
- return Handle<Symbol>(bit_cast<Symbol**>( \
- &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
- }
+#define SYMBOL_ACCESSOR(name, description) inline Handle<Symbol> name();
PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
@@ -886,7 +840,7 @@ class V8_EXPORT_PRIVATE Factory final {
PretenureFlag pretenure);
// Creates a code object that is not yet fully initialized yet.
- inline Handle<Code> NewCodeRaw(int object_size, bool immovable);
+ Handle<Code> NewCodeRaw(int object_size, bool immovable);
// Attempt to find the number in a small cache. If we finds it, return
// the string representation of the number. Otherwise return undefined.
diff --git a/deps/v8/src/feedback-vector-inl.h b/deps/v8/src/feedback-vector-inl.h
index b304442136..54d287a3b7 100644
--- a/deps/v8/src/feedback-vector-inl.h
+++ b/deps/v8/src/feedback-vector-inl.h
@@ -5,7 +5,7 @@
#ifndef V8_FEEDBACK_VECTOR_INL_H_
#define V8_FEEDBACK_VECTOR_INL_H_
-#include "src/factory.h"
+#include "src/factory-inl.h"
#include "src/feedback-vector.h"
#include "src/globals.h"
#include "src/heap/heap-inl.h"
@@ -18,26 +18,6 @@
namespace v8 {
namespace internal {
-template <typename Derived>
-FeedbackSlot FeedbackVectorSpecBase<Derived>::AddSlot(FeedbackSlotKind kind) {
- int slot = This()->slots();
- int entries_per_slot = FeedbackMetadata::GetSlotSize(kind);
- This()->append(kind);
- for (int i = 1; i < entries_per_slot; i++) {
- This()->append(FeedbackSlotKind::kInvalid);
- }
- return FeedbackSlot(slot);
-}
-
-template <typename Derived>
-FeedbackSlot FeedbackVectorSpecBase<Derived>::AddTypeProfileSlot() {
- DCHECK(FLAG_type_profile);
- FeedbackSlot slot = AddSlot(FeedbackSlotKind::kTypeProfile);
- CHECK_EQ(FeedbackVectorSpec::kTypeProfileSlotIndex,
- FeedbackVector::GetIndex(slot));
- return slot;
-}
-
// static
FeedbackMetadata* FeedbackMetadata::cast(Object* obj) {
DCHECK(obj->IsFeedbackMetadata());
@@ -63,7 +43,7 @@ FeedbackVector* FeedbackVector::cast(Object* obj) {
int FeedbackMetadata::GetSlotSize(FeedbackSlotKind kind) {
switch (kind) {
- case FeedbackSlotKind::kGeneral:
+ case FeedbackSlotKind::kForIn:
case FeedbackSlotKind::kCompareOp:
case FeedbackSlotKind::kBinaryOp:
case FeedbackSlotKind::kLiteral:
@@ -189,6 +169,9 @@ BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback) {
return BinaryOperationHint::kNumberOrOddball;
case BinaryOperationFeedback::kString:
return BinaryOperationHint::kString;
+ case BinaryOperationFeedback::kBigInt:
+ // TODO(jarin/jkummerow/neis): Support BigInts in TF.
+ // Fall through for now.
case BinaryOperationFeedback::kAny:
default:
return BinaryOperationHint::kAny;
@@ -221,9 +204,23 @@ CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
UNREACHABLE();
}
+// Helper function to transform the feedback to ForInHint.
+ForInHint ForInHintFromFeedback(int type_feedback) {
+ switch (type_feedback) {
+ case ForInFeedback::kNone:
+ return ForInHint::kNone;
+ case ForInFeedback::kEnumCacheKeys:
+ return ForInHint::kEnumCacheKeys;
+ case ForInFeedback::kEnumCacheKeysAndIndices:
+ return ForInHint::kEnumCacheKeysAndIndices;
+ default:
+ return ForInHint::kAny;
+ }
+ UNREACHABLE();
+}
+
void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
- int* vector_ic_count,
- bool code_is_interpreted) {
+ int* vector_ic_count) {
Object* megamorphic_sentinel =
*FeedbackVector::MegamorphicSentinel(GetIsolate());
int with = 0;
@@ -237,11 +234,6 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
Object* const obj = Get(slot);
switch (kind) {
case FeedbackSlotKind::kCall:
- // If we are not running interpreted code, we need to ignore the special
- // IC slots for call/construct used by the interpreter.
- // TODO(mvstanton): Remove code_is_interpreted when full code is retired
- // from service.
- if (!code_is_interpreted) break;
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
@@ -259,34 +251,24 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
with++;
} else if (obj == megamorphic_sentinel) {
gen++;
- if (code_is_interpreted) with++;
+ with++;
}
total++;
break;
}
- case FeedbackSlotKind::kBinaryOp:
- // If we are not running interpreted code, we need to ignore the special
- // IC slots for binaryop/compare used by the interpreter.
- // TODO(mvstanton): Remove code_is_interpreted when full code is retired
- // from service.
- if (code_is_interpreted) {
- int const feedback = Smi::ToInt(obj);
- BinaryOperationHint hint = BinaryOperationHintFromFeedback(feedback);
- if (hint == BinaryOperationHint::kAny) {
- gen++;
- }
- if (hint != BinaryOperationHint::kNone) {
- with++;
- }
- total++;
+ case FeedbackSlotKind::kBinaryOp: {
+ int const feedback = Smi::ToInt(obj);
+ BinaryOperationHint hint = BinaryOperationHintFromFeedback(feedback);
+ if (hint == BinaryOperationHint::kAny) {
+ gen++;
}
+ if (hint != BinaryOperationHint::kNone) {
+ with++;
+ }
+ total++;
break;
+ }
case FeedbackSlotKind::kCompareOp: {
- // If we are not running interpreted code, we need to ignore the special
- // IC slots for binaryop/compare used by the interpreter.
- // TODO(mvstanton): Remove code_is_interpreted when full code is retired
- // from service.
- if (code_is_interpreted) {
int const feedback = Smi::ToInt(obj);
CompareOperationHint hint =
CompareOperationHintFromFeedback(feedback);
@@ -297,11 +279,21 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
with++;
}
total++;
+ break;
+ }
+ case FeedbackSlotKind::kForIn: {
+ int const feedback = Smi::ToInt(obj);
+ ForInHint hint = ForInHintFromFeedback(feedback);
+ if (hint == ForInHint::kAny) {
+ gen++;
}
+ if (hint != ForInHint::kNone) {
+ with++;
+ }
+ total++;
break;
}
case FeedbackSlotKind::kCreateClosure:
- case FeedbackSlotKind::kGeneral:
case FeedbackSlotKind::kLiteral:
break;
case FeedbackSlotKind::kInvalid:
diff --git a/deps/v8/src/feedback-vector.cc b/deps/v8/src/feedback-vector.cc
index 5e2f3c7003..c105effd77 100644
--- a/deps/v8/src/feedback-vector.cc
+++ b/deps/v8/src/feedback-vector.cc
@@ -12,6 +12,35 @@
namespace v8 {
namespace internal {
+template <typename Derived>
+FeedbackSlot FeedbackVectorSpecBase<Derived>::AddSlot(FeedbackSlotKind kind) {
+ int slot = This()->slots();
+ int entries_per_slot = FeedbackMetadata::GetSlotSize(kind);
+ This()->append(kind);
+ for (int i = 1; i < entries_per_slot; i++) {
+ This()->append(FeedbackSlotKind::kInvalid);
+ }
+ return FeedbackSlot(slot);
+}
+
+template FeedbackSlot FeedbackVectorSpecBase<FeedbackVectorSpec>::AddSlot(
+ FeedbackSlotKind kind);
+template FeedbackSlot FeedbackVectorSpecBase<StaticFeedbackVectorSpec>::AddSlot(
+ FeedbackSlotKind kind);
+
+template <typename Derived>
+FeedbackSlot FeedbackVectorSpecBase<Derived>::AddTypeProfileSlot() {
+ FeedbackSlot slot = AddSlot(FeedbackSlotKind::kTypeProfile);
+ CHECK_EQ(FeedbackVectorSpec::kTypeProfileSlotIndex,
+ FeedbackVector::GetIndex(slot));
+ return slot;
+}
+
+template FeedbackSlot
+FeedbackVectorSpecBase<FeedbackVectorSpec>::AddTypeProfileSlot();
+template FeedbackSlot
+FeedbackVectorSpecBase<StaticFeedbackVectorSpec>::AddTypeProfileSlot();
+
bool FeedbackVectorSpec::HasTypeProfileSlot() const {
FeedbackSlot slot =
FeedbackVector::ToSlot(FeedbackVectorSpec::kTypeProfileSlotIndex);
@@ -162,8 +191,8 @@ const char* FeedbackMetadata::Kind2String(FeedbackSlotKind kind) {
return "Literal";
case FeedbackSlotKind::kTypeProfile:
return "TypeProfile";
- case FeedbackSlotKind::kGeneral:
- return "General";
+ case FeedbackSlotKind::kForIn:
+ return "ForIn";
case FeedbackSlotKind::kKindsNumber:
break;
}
@@ -173,7 +202,8 @@ const char* FeedbackMetadata::Kind2String(FeedbackSlotKind kind) {
bool FeedbackMetadata::HasTypeProfileSlot() const {
FeedbackSlot slot =
FeedbackVector::ToSlot(FeedbackVectorSpec::kTypeProfileSlotIndex);
- return GetKind(slot) == FeedbackSlotKind::kTypeProfile;
+ return slot.ToInt() < this->length() &&
+ GetKind(slot) == FeedbackSlotKind::kTypeProfile;
}
FeedbackSlotKind FeedbackVector::GetKind(FeedbackSlot slot) const {
@@ -224,6 +254,7 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
vector->set(index, isolate->heap()->empty_weak_cell(),
SKIP_WRITE_BARRIER);
break;
+ case FeedbackSlotKind::kForIn:
case FeedbackSlotKind::kCompareOp:
case FeedbackSlotKind::kBinaryOp:
vector->set(index, Smi::kZero, SKIP_WRITE_BARRIER);
@@ -250,7 +281,6 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
case FeedbackSlotKind::kStoreKeyedSloppy:
case FeedbackSlotKind::kStoreKeyedStrict:
case FeedbackSlotKind::kStoreDataPropertyInLiteral:
- case FeedbackSlotKind::kGeneral:
case FeedbackSlotKind::kTypeProfile:
vector->set(index, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
break;
@@ -258,7 +288,6 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
case FeedbackSlotKind::kInvalid:
case FeedbackSlotKind::kKindsNumber:
UNREACHABLE();
- vector->set(index, Smi::kZero, SKIP_WRITE_BARRIER);
break;
}
for (int j = 1; j < entry_size; j++) {
@@ -341,9 +370,7 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
}
}
-void FeedbackVector::ClearSlots(JSFunction* host_function) {
- Isolate* isolate = GetIsolate();
-
+bool FeedbackVector::ClearSlots(Isolate* isolate) {
Object* uninitialized_sentinel =
FeedbackVector::RawUninitializedSentinel(isolate);
@@ -410,6 +437,7 @@ void FeedbackVector::ClearSlots(JSFunction* host_function) {
}
break;
}
+ case FeedbackSlotKind::kForIn:
case FeedbackSlotKind::kBinaryOp:
case FeedbackSlotKind::kCompareOp: {
DCHECK(Get(slot)->IsSmi());
@@ -421,20 +449,6 @@ void FeedbackVector::ClearSlots(JSFunction* host_function) {
case FeedbackSlotKind::kTypeProfile:
break;
}
- case FeedbackSlotKind::kGeneral: {
- if (obj->IsHeapObject()) {
- InstanceType instance_type =
- HeapObject::cast(obj)->map()->instance_type();
- // AllocationSites are exempt from clearing. They don't store Maps
- // or Code pointers which can cause memory leaks if not cleared
- // regularly.
- if (instance_type != ALLOCATION_SITE_TYPE) {
- Set(slot, uninitialized_sentinel, SKIP_WRITE_BARRIER);
- feedback_updated = true;
- }
- }
- break;
- }
case FeedbackSlotKind::kLiteral: {
Set(slot, Smi::kZero, SKIP_WRITE_BARRIER);
feedback_updated = true;
@@ -455,9 +469,7 @@ void FeedbackVector::ClearSlots(JSFunction* host_function) {
}
}
}
- if (feedback_updated) {
- IC::OnFeedbackChanged(isolate, this, host_function);
- }
+ return feedback_updated;
}
Handle<FixedArray> FeedbackNexus::EnsureArrayOfSize(int length) {
@@ -691,7 +703,7 @@ void FeedbackNexus::ConfigureMonomorphic(Handle<Name> name,
void FeedbackNexus::ConfigurePolymorphic(Handle<Name> name,
MapHandles const& maps,
- List<Handle<Object>>* handlers) {
+ ObjectHandles* handlers) {
int receiver_count = static_cast<int>(maps.size());
DCHECK(receiver_count > 1);
Handle<FixedArray> array;
@@ -782,8 +794,7 @@ MaybeHandle<Object> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
return MaybeHandle<Code>();
}
-bool FeedbackNexus::FindHandlers(List<Handle<Object>>* code_list,
- int length) const {
+bool FeedbackNexus::FindHandlers(ObjectHandles* code_list, int length) const {
Object* feedback = GetFeedback();
Isolate* isolate = GetIsolate();
int count = 0;
@@ -801,7 +812,7 @@ bool FeedbackNexus::FindHandlers(List<Handle<Object>>* code_list,
if (!cell->cleared()) {
Object* code = array->get(i + increment - 1);
DCHECK(IC::IsHandler(code));
- code_list->Add(handle(code, isolate));
+ code_list->push_back(handle(code, isolate));
count++;
}
}
@@ -810,7 +821,7 @@ bool FeedbackNexus::FindHandlers(List<Handle<Object>>* code_list,
if (!cell->cleared()) {
Object* code = GetFeedbackExtra();
DCHECK(IC::IsHandler(code));
- code_list->Add(handle(code, isolate));
+ code_list->push_back(handle(code, isolate));
count++;
}
}
@@ -836,15 +847,14 @@ Name* KeyedStoreICNexus::FindFirstName() const {
KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
KeyedAccessStoreMode mode = STANDARD_STORE;
MapHandles maps;
- List<Handle<Object>> handlers;
+ ObjectHandles handlers;
if (GetKeyType() == PROPERTY) return mode;
ExtractMaps(&maps);
FindHandlers(&handlers, static_cast<int>(maps.size()));
- for (int i = 0; i < handlers.length(); i++) {
+ for (const Handle<Object>& maybe_code_handler : handlers) {
// The first handler that isn't the slow handler will have the bits we need.
- Handle<Object> maybe_code_handler = handlers.at(i);
Handle<Code> handler;
if (maybe_code_handler->IsTuple3()) {
// Elements transition.
@@ -854,9 +864,14 @@ KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
// Element store with prototype chain check.
Handle<Tuple2> data_handler = Handle<Tuple2>::cast(maybe_code_handler);
handler = handle(Code::cast(data_handler->value2()));
+ } else if (maybe_code_handler->IsSmi()) {
+ // Skip proxy handlers.
+ DCHECK_EQ(*maybe_code_handler, *StoreHandler::StoreProxy(GetIsolate()));
+ continue;
} else {
// Element store without prototype chain check.
handler = Handle<Code>::cast(maybe_code_handler);
+ if (handler->is_builtin()) continue;
}
CodeStub::Major major_key = CodeStub::MajorKeyFromKey(handler->stub_key());
uint32_t minor_key = CodeStub::MinorKeyFromKey(handler->stub_key());
@@ -923,13 +938,18 @@ CompareOperationHint CompareICNexus::GetCompareOperationFeedback() const {
}
InlineCacheState ForInICNexus::StateFromFeedback() const {
- Object* feedback = GetFeedback();
- if (feedback == *FeedbackVector::UninitializedSentinel(GetIsolate())) {
+ ForInHint hint = GetForInFeedback();
+ if (hint == ForInHint::kNone) {
return UNINITIALIZED;
- } else if (feedback == *FeedbackVector::MegamorphicSentinel(GetIsolate())) {
- return MEGAMORPHIC;
+ } else if (hint == ForInHint::kAny) {
+ return GENERIC;
}
- return GENERIC;
+ return MONOMORPHIC;
+}
+
+ForInHint ForInICNexus::GetForInFeedback() const {
+ int feedback = Smi::ToInt(GetFeedback());
+ return ForInHintFromFeedback(feedback);
}
InlineCacheState StoreDataPropertyInLiteralICNexus::StateFromFeedback() const {
@@ -964,6 +984,19 @@ InlineCacheState CollectTypeProfileNexus::StateFromFeedback() const {
return MONOMORPHIC;
}
+namespace {
+
+bool InList(Handle<ArrayList> types, Handle<String> type) {
+ for (int i = 0; i < types->Length(); i++) {
+ Object* obj = types->Get(i);
+ if (String::cast(obj)->Equals(*type)) {
+ return true;
+ }
+ }
+ return false;
+}
+} // anonymous namespace
+
void CollectTypeProfileNexus::Collect(Handle<String> type, int position) {
DCHECK_GE(position, 0);
Isolate* isolate = GetIsolate();
@@ -984,16 +1017,76 @@ void CollectTypeProfileNexus::Collect(Handle<String> type, int position) {
int entry = types->FindEntry(position);
if (entry == UnseededNumberDictionary::kNotFound) {
position_specific_types = ArrayList::New(isolate, 1);
+ types = UnseededNumberDictionary::Set(
+ types, position, ArrayList::Add(position_specific_types, type));
} else {
DCHECK(types->ValueAt(entry)->IsArrayList());
position_specific_types = handle(ArrayList::cast(types->ValueAt(entry)));
+ if (!InList(position_specific_types, type)) { // Add type
+ types = UnseededNumberDictionary::Set(
+ types, position, ArrayList::Add(position_specific_types, type));
+ }
}
-
- types = UnseededNumberDictionary::Set(
- types, position, ArrayList::Add(position_specific_types, type));
SetFeedback(*types);
}
+void CollectTypeProfileNexus::Clear() {
+ SetFeedback(*FeedbackVector::UninitializedSentinel(GetIsolate()));
+}
+
+std::vector<int> CollectTypeProfileNexus::GetSourcePositions() const {
+ std::vector<int> source_positions;
+ Isolate* isolate = GetIsolate();
+
+ Object* const feedback = GetFeedback();
+
+ if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ return source_positions;
+ }
+
+ Handle<UnseededNumberDictionary> types = Handle<UnseededNumberDictionary>(
+ UnseededNumberDictionary::cast(feedback), isolate);
+
+ for (int index = UnseededNumberDictionary::kElementsStartIndex;
+ index < types->length(); index += UnseededNumberDictionary::kEntrySize) {
+ int key_index = index + UnseededNumberDictionary::kEntryKeyIndex;
+ Object* key = types->get(key_index);
+ if (key->IsSmi()) {
+ int position = Smi::cast(key)->value();
+ source_positions.push_back(position);
+ }
+ }
+ return source_positions;
+}
+
+std::vector<Handle<String>> CollectTypeProfileNexus::GetTypesForSourcePositions(
+ uint32_t position) const {
+ Isolate* isolate = GetIsolate();
+
+ Object* const feedback = GetFeedback();
+ std::vector<Handle<String>> types_for_position;
+ if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ return types_for_position;
+ }
+
+ Handle<UnseededNumberDictionary> types = Handle<UnseededNumberDictionary>(
+ UnseededNumberDictionary::cast(feedback), isolate);
+
+ int entry = types->FindEntry(position);
+ if (entry == UnseededNumberDictionary::kNotFound) {
+ return types_for_position;
+ }
+ DCHECK(types->ValueAt(entry)->IsArrayList());
+ Handle<ArrayList> position_specific_types =
+ Handle<ArrayList>(ArrayList::cast(types->ValueAt(entry)));
+ for (int i = 0; i < position_specific_types->Length(); i++) {
+ Object* t = position_specific_types->Get(i);
+ types_for_position.push_back(Handle<String>(String::cast(t), isolate));
+ }
+
+ return types_for_position;
+}
+
namespace {
Handle<JSObject> ConvertToJSObject(Isolate* isolate,
diff --git a/deps/v8/src/feedback-vector.h b/deps/v8/src/feedback-vector.h
index 715aaaa0d2..efa1cf5924 100644
--- a/deps/v8/src/feedback-vector.h
+++ b/deps/v8/src/feedback-vector.h
@@ -25,17 +25,21 @@ enum class FeedbackSlotKind {
// There must be no such slots in the system.
kInvalid,
+ // Sloppy kinds come first, for easy language mode testing.
+ kStoreGlobalSloppy,
+ kStoreNamedSloppy,
+ kStoreKeyedSloppy,
+ kLastSloppyKind = kStoreKeyedSloppy,
+
+ // Strict and language mode unaware kinds.
kCall,
kLoadProperty,
kLoadGlobalNotInsideTypeof,
kLoadGlobalInsideTypeof,
kLoadKeyed,
- kStoreGlobalSloppy,
kStoreGlobalStrict,
- kStoreNamedSloppy,
kStoreNamedStrict,
kStoreOwnNamed,
- kStoreKeyedSloppy,
kStoreKeyedStrict,
kBinaryOp,
kCompareOp,
@@ -43,8 +47,7 @@ enum class FeedbackSlotKind {
kTypeProfile,
kCreateClosure,
kLiteral,
- // This is a general purpose slot that occupies one feedback vector element.
- kGeneral,
+ kForIn,
kKindsNumber // Last value indicating number of kinds.
};
@@ -99,15 +102,19 @@ inline TypeofMode GetTypeofModeFromSlotKind(FeedbackSlotKind kind) {
inline LanguageMode GetLanguageModeFromSlotKind(FeedbackSlotKind kind) {
DCHECK(IsStoreICKind(kind) || IsStoreOwnICKind(kind) ||
IsStoreGlobalICKind(kind) || IsKeyedStoreICKind(kind));
- return (kind == FeedbackSlotKind::kStoreNamedSloppy ||
- kind == FeedbackSlotKind::kStoreGlobalSloppy ||
- kind == FeedbackSlotKind::kStoreKeyedSloppy)
- ? SLOPPY
- : STRICT;
+ STATIC_ASSERT(FeedbackSlotKind::kStoreGlobalSloppy <=
+ FeedbackSlotKind::kLastSloppyKind);
+ STATIC_ASSERT(FeedbackSlotKind::kStoreKeyedSloppy <=
+ FeedbackSlotKind::kLastSloppyKind);
+ STATIC_ASSERT(FeedbackSlotKind::kStoreNamedSloppy <=
+ FeedbackSlotKind::kLastSloppyKind);
+ return (kind <= FeedbackSlotKind::kLastSloppyKind) ? SLOPPY : STRICT;
}
std::ostream& operator<<(std::ostream& os, FeedbackSlotKind kind);
+typedef std::vector<Handle<Object>> ObjectHandles;
+
class FeedbackMetadata;
// A FeedbackVector has a fixed header with:
@@ -123,7 +130,7 @@ class FeedbackVector : public HeapObject {
static inline FeedbackVector* cast(Object* obj);
inline void ComputeCounts(int* with_type_info, int* generic,
- int* vector_ic_count, bool code_is_interpreted);
+ int* vector_ic_count);
inline bool is_empty() const;
@@ -223,8 +230,8 @@ class FeedbackVector : public HeapObject {
DECL_PRINTER(FeedbackVector)
DECL_VERIFIER(FeedbackVector)
- // Clears the vector slots.
- void ClearSlots(JSFunction* host_function);
+ // Clears the vector slots. Return true if feedback has changed.
+ bool ClearSlots(Isolate* isolate);
// The object that indicates an uninitialized cache.
static inline Handle<Symbol> UninitializedSentinel(Isolate* isolate);
@@ -332,7 +339,7 @@ class FeedbackVectorSpecBase {
return AddSlot(FeedbackSlotKind::kCompareOp);
}
- FeedbackSlot AddGeneralSlot() { return AddSlot(FeedbackSlotKind::kGeneral); }
+ FeedbackSlot AddForInSlot() { return AddSlot(FeedbackSlotKind::kForIn); }
FeedbackSlot AddLiteralSlot() { return AddSlot(FeedbackSlotKind::kLiteral); }
@@ -350,7 +357,7 @@ class FeedbackVectorSpecBase {
DECL_PRINTER(FeedbackVectorSpec)
private:
- inline FeedbackSlot AddSlot(FeedbackSlotKind kind);
+ FeedbackSlot AddSlot(FeedbackSlotKind kind);
Derived* This() { return static_cast<Derived*>(this); }
};
@@ -451,6 +458,8 @@ class FeedbackMetadata : public FixedArray {
bool HasTypeProfileSlot() const;
private:
+ friend class AccessorAssembler;
+
static const int kFeedbackSlotKindBits = 5;
STATIC_ASSERT(static_cast<int>(FeedbackSlotKind::kKindsNumber) <
(1 << kFeedbackSlotKindBits));
@@ -554,8 +563,7 @@ class FeedbackNexus {
virtual InlineCacheState StateFromFeedback() const = 0;
virtual int ExtractMaps(MapHandles* maps) const;
virtual MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const;
- virtual bool FindHandlers(List<Handle<Object>>* code_list,
- int length = -1) const;
+ virtual bool FindHandlers(ObjectHandles* code_list, int length = -1) const;
virtual Name* FindFirstName() const { return NULL; }
bool IsCleared() {
@@ -577,7 +585,7 @@ class FeedbackNexus {
Handle<Object> handler);
void ConfigurePolymorphic(Handle<Name> name, MapHandles const& maps,
- List<Handle<Object>>* handlers);
+ ObjectHandles* handlers);
protected:
inline void SetFeedback(Object* feedback,
@@ -620,8 +628,7 @@ class CallICNexus final : public FeedbackNexus {
MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
return MaybeHandle<Code>();
}
- bool FindHandlers(List<Handle<Object>>* code_list,
- int length = -1) const final {
+ bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
return length == 0;
}
@@ -666,8 +673,7 @@ class LoadGlobalICNexus : public FeedbackNexus {
MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
return MaybeHandle<Code>();
}
- bool FindHandlers(List<Handle<Object>>* code_list,
- int length = -1) const final {
+ bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
return length == 0;
}
@@ -759,8 +765,7 @@ class BinaryOpICNexus final : public FeedbackNexus {
MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
return MaybeHandle<Code>();
}
- bool FindHandlers(List<Handle<Object>>* code_list,
- int length = -1) const final {
+ bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
return length == 0;
}
};
@@ -786,8 +791,7 @@ class CompareICNexus final : public FeedbackNexus {
MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
return MaybeHandle<Code>();
}
- bool FindHandlers(List<Handle<Object>>* code_list,
- int length = -1) const final {
+ bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
return length == 0;
}
};
@@ -796,17 +800,21 @@ class ForInICNexus final : public FeedbackNexus {
public:
ForInICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kGeneral, vector->GetKind(slot));
+ DCHECK_EQ(FeedbackSlotKind::kForIn, vector->GetKind(slot));
+ }
+ ForInICNexus(FeedbackVector* vector, FeedbackSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK_EQ(FeedbackSlotKind::kForIn, vector->GetKind(slot));
}
InlineCacheState StateFromFeedback() const final;
+ ForInHint GetForInFeedback() const;
int ExtractMaps(MapHandles* maps) const final { return 0; }
MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
return MaybeHandle<Code>();
}
- bool FindHandlers(List<Handle<Object>>* code_list,
- int length = -1) const final {
+ bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
return length == 0;
}
};
@@ -847,11 +855,17 @@ class CollectTypeProfileNexus : public FeedbackNexus {
void Collect(Handle<String> type, int position);
JSObject* GetTypeProfile() const;
+ std::vector<int> GetSourcePositions() const;
+ std::vector<Handle<String>> GetTypesForSourcePositions(uint32_t pos) const;
+
+ void Clear() override;
+
InlineCacheState StateFromFeedback() const override;
};
inline BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback);
inline CompareOperationHint CompareOperationHintFromFeedback(int type_feedback);
+inline ForInHint ForInHintFromFeedback(int type_feedback);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ffi/OWNERS b/deps/v8/src/ffi/OWNERS
deleted file mode 100644
index f78789f5b5..0000000000
--- a/deps/v8/src/ffi/OWNERS
+++ /dev/null
@@ -1,4 +0,0 @@
-mattloring@google.com
-ofrobots@google.com
-
-# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/ffi/ffi-compiler.cc b/deps/v8/src/ffi/ffi-compiler.cc
deleted file mode 100644
index e442b66cbe..0000000000
--- a/deps/v8/src/ffi/ffi-compiler.cc
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/ffi/ffi-compiler.h"
-#include "src/api.h"
-#include "src/code-factory.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void InstallFFIMap(Isolate* isolate) {
- Handle<Context> context(isolate->context());
- DCHECK(!context->get(Context::NATIVE_FUNCTION_MAP_INDEX)->IsMap());
- Handle<Map> prev_map = Handle<Map>(context->sloppy_function_map(), isolate);
-
- InstanceType instance_type = prev_map->instance_type();
- int embedder_fields = JSObject::GetEmbedderFieldCount(*prev_map);
- CHECK_EQ(0, embedder_fields);
- int pre_allocated =
- prev_map->GetInObjectProperties() - prev_map->unused_property_fields();
- int instance_size;
- int in_object_properties;
- JSFunction::CalculateInstanceSizeHelper(
- instance_type, embedder_fields, 0, &instance_size, &in_object_properties);
- int unused_property_fields = in_object_properties - pre_allocated;
- Handle<Map> map = Map::CopyInitialMap(
- prev_map, instance_size, in_object_properties, unused_property_fields);
- context->set_native_function_map(*map);
-}
-
-namespace ffi {
-
-class FFIAssembler : public CodeStubAssembler {
- public:
- explicit FFIAssembler(CodeAssemblerState* state) : CodeStubAssembler(state) {}
-
- Node* ToJS(Node* node, Node* context, FFIType type) {
- switch (type) {
- case FFIType::kInt32:
- return ChangeInt32ToTagged(node);
- }
- UNREACHABLE();
- }
-
- Node* FromJS(Node* node, Node* context, FFIType type) {
- switch (type) {
- case FFIType::kInt32:
- return TruncateTaggedToWord32(context, node);
- }
- UNREACHABLE();
- }
-
- MachineType FFIToMachineType(FFIType type) {
- switch (type) {
- case FFIType::kInt32:
- return MachineType::Int32();
- }
- UNREACHABLE();
- }
-
- Signature<MachineType>* FFIToMachineSignature(FFISignature* sig) {
- Signature<MachineType>::Builder sig_builder(zone(), sig->return_count(),
- sig->parameter_count());
- for (size_t i = 0; i < sig->return_count(); i++) {
- sig_builder.AddReturn(FFIToMachineType(sig->GetReturn(i)));
- }
- for (size_t j = 0; j < sig->parameter_count(); j++) {
- sig_builder.AddParam(FFIToMachineType(sig->GetParam(j)));
- }
- return sig_builder.Build();
- }
-
- void GenerateJSToNativeWrapper(NativeFunction* func) {
- int params = static_cast<int>(func->sig->parameter_count());
- int returns = static_cast<int>(func->sig->return_count());
- ApiFunction api_func(func->start);
- ExternalReference ref(&api_func, ExternalReference::BUILTIN_CALL,
- isolate());
-
- Node* context_param = GetJSContextParameter();
-
- Node** inputs = zone()->NewArray<Node*>(params + 1);
- int input_count = 0;
- inputs[input_count++] = ExternalConstant(ref);
- for (int i = 0; i < params; i++) {
- inputs[input_count++] =
- FromJS(Parameter(i), context_param, func->sig->GetParam(i));
- }
-
- Node* call =
- CallCFunctionN(FFIToMachineSignature(func->sig), input_count, inputs);
- Node* return_val = UndefinedConstant();
- if (returns == 1) {
- return_val = ToJS(call, context_param, func->sig->GetReturn());
- }
- Return(return_val);
- }
-};
-
-Handle<JSFunction> CompileJSToNativeWrapper(Isolate* isolate,
- Handle<String> name,
- NativeFunction func) {
- int params = static_cast<int>(func.sig->parameter_count());
- Zone zone(isolate->allocator(), ZONE_NAME);
- CodeAssemblerState state(isolate, &zone, params,
- Code::ComputeFlags(Code::BUILTIN), "js-to-native");
- FFIAssembler assembler(&state);
- assembler.GenerateJSToNativeWrapper(&func);
- Handle<Code> code = assembler.GenerateCode(&state);
-
- Handle<SharedFunctionInfo> shared =
- isolate->factory()->NewSharedFunctionInfo(name, code, false);
- shared->set_length(params);
- shared->set_internal_formal_parameter_count(params);
- Handle<JSFunction> function = isolate->factory()->NewFunction(
- isolate->native_function_map(), name, code);
- function->set_shared(*shared);
- return function;
-}
-
-} // namespace ffi
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/ffi/ffi-compiler.h b/deps/v8/src/ffi/ffi-compiler.h
deleted file mode 100644
index 2825f4f0af..0000000000
--- a/deps/v8/src/ffi/ffi-compiler.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SRC_FFI_FFI_COMPILER_H_
-#define SRC_FFI_FFI_COMPILER_H_
-
-#include "src/code-stub-assembler.h"
-#include "src/machine-type.h"
-
-namespace v8 {
-namespace internal {
-
-typedef compiler::Node Node;
-typedef compiler::CodeAssemblerState CodeAssemblerState;
-
-void InstallFFIMap(Isolate* isolate);
-
-namespace ffi {
-
-enum class FFIType : uint8_t { kInt32 };
-
-typedef Signature<FFIType> FFISignature;
-
-struct NativeFunction {
- FFISignature* sig;
- uint8_t* start;
-};
-
-Handle<JSFunction> CompileJSToNativeWrapper(Isolate* isolate,
- Handle<String> name,
- NativeFunction func);
-} // namespace ffi
-} // namespace internal
-} // namespace v8
-
-#endif // SRC_FFI_FFI_COMPILER_H_
diff --git a/deps/v8/src/find-and-replace-pattern.h b/deps/v8/src/find-and-replace-pattern.h
deleted file mode 100644
index aad9bb3bbf..0000000000
--- a/deps/v8/src/find-and-replace-pattern.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_FIND_AND_REPLACE_PATTERN_H_
-#define V8_FIND_AND_REPLACE_PATTERN_H_
-
-#include "src/handles.h"
-
-namespace v8 {
-namespace internal {
-
-class Map;
-class Object;
-
-class FindAndReplacePattern {
- public:
- FindAndReplacePattern() : count_(0) {}
- void Add(Handle<Map> map_to_find, Handle<HeapObject> obj_to_replace) {
- DCHECK(count_ < kMaxCount);
- find_[count_] = map_to_find;
- replace_[count_] = obj_to_replace;
- ++count_;
- }
-
- private:
- static const int kMaxCount = 4;
- int count_;
- Handle<Map> find_[kMaxCount];
- Handle<HeapObject> replace_[kMaxCount];
- friend class Code;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_FIND_AND_REPLACE_PATTERN_H_
diff --git a/deps/v8/src/fixed-dtoa.cc b/deps/v8/src/fixed-dtoa.cc
index a33ba1744f..15797aae86 100644
--- a/deps/v8/src/fixed-dtoa.cc
+++ b/deps/v8/src/fixed-dtoa.cc
@@ -168,8 +168,7 @@ static void FillDigits64(uint64_t number, Vector<char> buffer, int* length) {
}
}
-
-static void RoundUp(Vector<char> buffer, int* length, int* decimal_point) {
+static void DtoaRoundUp(Vector<char> buffer, int* length, int* decimal_point) {
// An empty buffer represents 0.
if (*length == 0) {
buffer[0] = '1';
@@ -242,7 +241,7 @@ static void FillFractionals(uint64_t fractionals, int exponent,
}
// If the first bit after the point is set we have to round up.
if (((fractionals >> (point - 1)) & 1) == 1) {
- RoundUp(buffer, length, decimal_point);
+ DtoaRoundUp(buffer, length, decimal_point);
}
} else { // We need 128 bits.
DCHECK(64 < -exponent && -exponent <= 128);
@@ -261,7 +260,7 @@ static void FillFractionals(uint64_t fractionals, int exponent,
(*length)++;
}
if (fractionals128.BitAt(point - 1) == 1) {
- RoundUp(buffer, length, decimal_point);
+ DtoaRoundUp(buffer, length, decimal_point);
}
}
}
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index bcb5a2c982..50a1e660c0 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -187,27 +187,20 @@ DEFINE_BOOL(es_staging, false,
DEFINE_BOOL(harmony, false, "enable all completed harmony features")
DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
DEFINE_IMPLICATION(es_staging, harmony)
+// Enabling import.meta requires to also enable import()
+DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
// Features that are still work in progress (behind individual flags).
-#define HARMONY_INPROGRESS_BASE(V) \
+#define HARMONY_INPROGRESS(V) \
+ V(harmony_import_meta, "harmony import.meta property") \
V(harmony_array_prototype_values, "harmony Array.prototype.values") \
V(harmony_function_sent, "harmony function.sent") \
V(harmony_do_expressions, "harmony do-expressions") \
V(harmony_class_fields, "harmony public fields in class literals") \
- V(harmony_promise_finally, "harmony Promise.prototype.finally")
-
-#ifdef V8_INTL_SUPPORT
-#define HARMONY_INPROGRESS(V) \
- HARMONY_INPROGRESS_BASE(V) \
- V(harmony_number_format_to_parts, \
- "Intl.NumberFormat.prototype.formatToParts") \
- V(harmony_plural_rules, "Intl.PluralRules")
-#else
-#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
-#endif
+ V(harmony_bigint, "harmony arbitrary precision integers")
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED(V) \
+#define HARMONY_STAGED_BASE(V) \
V(harmony_function_tostring, "harmony Function.prototype.toString") \
V(harmony_regexp_named_captures, "harmony regexp named captures") \
V(harmony_regexp_property, "harmony Unicode regexp property classes") \
@@ -215,10 +208,19 @@ DEFINE_IMPLICATION(es_staging, harmony)
"harmony disallow non undefined primitive return value from class " \
"constructor") \
V(harmony_dynamic_import, "harmony dynamic import") \
- V(harmony_async_iteration, "harmony async iteration") \
+
+#ifdef V8_INTL_SUPPORT
+#define HARMONY_STAGED(V) \
+ HARMONY_STAGED_BASE(V) \
+ V(harmony_number_format_to_parts, \
+ "Intl.NumberFormat.prototype." \
+ "formatToParts")
+#else
+#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
+#endif
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING(V) \
+#define HARMONY_SHIPPING_BASE(V) \
V(harmony_strict_legacy_accessor_builtins, \
"treat __defineGetter__ and related functions as strict") \
V(harmony_restrictive_generators, \
@@ -227,8 +229,18 @@ DEFINE_IMPLICATION(es_staging, harmony)
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
V(harmony_regexp_dotall, "harmony regexp dotAll flag") \
V(harmony_regexp_lookbehind, "harmony regexp lookbehind") \
+ V(harmony_async_iteration, "harmony async iteration") \
V(harmony_template_escapes, \
- "harmony invalid escapes in tagged template literals")
+ "harmony invalid escapes in tagged template literals") \
+ V(harmony_promise_finally, "harmony Promise.prototype.finally")
+
+#ifdef V8_INTL_SUPPORT
+#define HARMONY_SHIPPING(V) \
+ HARMONY_SHIPPING_BASE(V) \
+ V(harmony_plural_rules, "Intl.PluralRules")
+#else
+#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
+#endif
// Once a shipping feature has proved stable in the wild, it will be dropped
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -268,6 +280,7 @@ DEFINE_BOOL(future, FUTURE_BOOL,
"not-too-far future")
DEFINE_IMPLICATION(future, preparser_scope_analysis)
+DEFINE_IMPLICATION(future, lazy_deserialization)
// Flags for experimental implementation features.
DEFINE_BOOL(allocation_site_pretenuring, true,
@@ -289,11 +302,8 @@ DEFINE_IMPLICATION(track_computed_fields, track_fields)
DEFINE_BOOL(track_field_types, true, "track field types")
DEFINE_IMPLICATION(track_field_types, track_fields)
DEFINE_IMPLICATION(track_field_types, track_heap_object_fields)
-DEFINE_BOOL(type_profile, false, "collect type information")
-DEFINE_BOOL(block_coverage, true, "enable block code coverage")
DEFINE_BOOL(trace_block_coverage, false,
"trace collected block coverage information")
-DEFINE_IMPLICATION(trace_block_coverage, block_coverage)
DEFINE_BOOL(feedback_normalization, false,
"feed back normalization to constructors")
// TODO(jkummerow): This currently adds too much load on the stub cache.
@@ -309,7 +319,6 @@ DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
// Flags for data representation optimizations
DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles")
-DEFINE_BOOL(string_slices, true, "use string slices")
// Flags for Ignition.
DEFINE_BOOL(ignition_elide_noneffectful_bytecodes, true,
@@ -366,6 +375,8 @@ DEFINE_BOOL(turbo_preprocess_ranges, true,
DEFINE_STRING(turbo_filter, "*", "optimization filter for TurboFan compiler")
DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
DEFINE_BOOL(trace_turbo_graph, false, "trace generated TurboFan graphs")
+DEFINE_BOOL(trace_turbo_scheduled, false, "trace TurboFan IR with schedule")
+DEFINE_IMPLICATION(trace_turbo_scheduled, trace_turbo_graph)
DEFINE_STRING(trace_turbo_cfg_file, NULL,
"trace turbo cfg graph (for C1 visualizer) to a given file name")
DEFINE_BOOL(trace_turbo_types, true, "trace TurboFan's types")
@@ -404,9 +415,6 @@ DEFINE_BOOL(turbo_inlining, true, "enable inlining in TurboFan")
DEFINE_INT(max_inlining_levels, 5, "maximum number of inlining levels")
DEFINE_INT(max_inlined_bytecode_size, 500,
"maximum size of bytecode for a single inlining")
-DEFINE_INT(max_inlined_bytecode_size_absolute, 4000,
- "maximum absolute size of bytecode considered for inlining "
- "(incl. small functions)")
DEFINE_INT(max_inlined_bytecode_size_cumulative, 1000,
"maximum cumulative size of bytecode considered for inlining")
DEFINE_FLOAT(reserve_inline_budget_scale_factor, 1.2,
@@ -415,10 +423,18 @@ DEFINE_INT(max_inlined_bytecode_size_small, 30,
"maximum size of bytecode considered for small function inlining")
DEFINE_FLOAT(min_inlining_frequency, 0.15, "minimum frequency for inlining")
DEFINE_BOOL(polymorphic_inlining, true, "polymorphic inlining")
+DEFINE_BOOL(stress_inline, false,
+ "set high thresholds for inlining to inline as much as possible")
+DEFINE_VALUE_IMPLICATION(stress_inline, max_inlining_levels, 999999)
+DEFINE_VALUE_IMPLICATION(stress_inline, max_inlined_bytecode_size, 999999)
+DEFINE_VALUE_IMPLICATION(stress_inline, max_inlined_bytecode_size_cumulative,
+ 999999)
+DEFINE_VALUE_IMPLICATION(stress_inline, min_inlining_frequency, 0)
+DEFINE_VALUE_IMPLICATION(stress_inline, polymorphic_inlining, true)
DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
DEFINE_BOOL(inline_accessors, true, "inline JavaScript accessors")
DEFINE_BOOL(inline_into_try, true, "inline into try blocks")
-DEFINE_BOOL(turbo_inline_array_builtins, false,
+DEFINE_BOOL(turbo_inline_array_builtins, true,
"inline array builtins in TurboFan code")
DEFINE_BOOL(use_osr, true, "use on-stack replacement")
DEFINE_BOOL(trace_osr, false, "trace on-stack replacement")
@@ -439,8 +455,6 @@ DEFINE_BOOL(turbo_loop_variable, true, "Turbofan loop variable optimization")
DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
DEFINE_BOOL(turbo_frame_elision, true, "elide frames in TurboFan")
DEFINE_BOOL(turbo_escape, true, "enable escape analysis")
-DEFINE_BOOL(turbo_new_escape, true,
- "enable new implementation of escape analysis")
DEFINE_BOOL(turbo_instruction_scheduling, false,
"enable instruction scheduling in TurboFan")
DEFINE_BOOL(turbo_stress_instruction_scheduling, false,
@@ -470,6 +484,12 @@ DEFINE_INT(wasm_num_compilation_tasks, 10,
"number of parallel compilation tasks for wasm")
DEFINE_BOOL(wasm_async_compilation, false,
"enable actual asynchronous compilation for WebAssembly.compile")
+DEFINE_BOOL(wasm_stream_compilation, false,
+ "enable streaming compilation for WebAssembly")
+DEFINE_IMPLICATION(wasm_stream_compilation, wasm_async_compilation)
+DEFINE_BOOL(wasm_test_streaming, false,
+ "use streaming compilation instead of async compilation for tests")
+DEFINE_IMPLICATION(wasm_test_streaming, wasm_stream_compilation)
// Parallel compilation confuses turbo_stats, force single threaded.
DEFINE_VALUE_IMPLICATION(turbo_stats, wasm_num_compilation_tasks, 0)
DEFINE_UINT(wasm_max_mem_pages, v8::internal::wasm::kV8MaxWasmMemoryPages,
@@ -480,12 +500,16 @@ DEFINE_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
DEFINE_BOOL(trace_wasm_decode_time, false, "trace decoding time of wasm code")
DEFINE_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
DEFINE_BOOL(trace_wasm_interpreter, false, "trace interpretation of wasm code")
+DEFINE_BOOL(trace_wasm_streaming, false,
+ "trace streaming compilation of wasm code")
DEFINE_INT(trace_wasm_ast_start, 0,
"start function for wasm AST trace (inclusive)")
DEFINE_INT(trace_wasm_ast_end, 0, "end function for wasm AST trace (exclusive)")
DEFINE_UINT(skip_compiling_wasm_funcs, 0, "start compiling at function N")
DEFINE_BOOL(wasm_break_on_decoder_error, false,
"debug break when wasm decoder encounters an error")
+DEFINE_BOOL(wasm_trace_memory, false,
+ "print all memory updates performed in wasm code")
DEFINE_BOOL(validate_asm, true, "validate asm.js modules before compiling")
DEFINE_BOOL(suppress_asm_messages, false,
@@ -499,9 +523,6 @@ DEFINE_BOOL(stress_validate_asm, false, "try to validate everything as asm.js")
DEFINE_BOOL(dump_wasm_module, false, "dump wasm module bytes")
DEFINE_STRING(dump_wasm_module_path, NULL, "directory to dump wasm modules to")
-DEFINE_INT(typed_array_max_size_in_heap, 64,
- "threshold for in-heap typed array")
-
DEFINE_BOOL(experimental_wasm_simd, false,
"enable prototype simd opcodes for wasm")
DEFINE_BOOL(experimental_wasm_eh, false,
@@ -520,10 +541,6 @@ DEFINE_BOOL(wasm_no_stack_checks, false,
DEFINE_BOOL(wasm_trap_handler, false,
"use signal handlers to catch out of bounds memory access in wasm"
" (experimental, currently Linux x86_64 only)")
-DEFINE_BOOL(wasm_guard_pages, false,
- "add guard pages to the end of WebWassembly memory"
- " (experimental, no effect on 32-bit)")
-DEFINE_IMPLICATION(wasm_trap_handler, wasm_guard_pages)
DEFINE_BOOL(wasm_code_fuzzer_gen_test, false,
"Generate a test case when running the wasm-code fuzzer")
DEFINE_BOOL(print_wasm_code, false, "Print WebAssembly code")
@@ -540,18 +557,12 @@ DEFINE_NEG_IMPLICATION(wasm_interpret_all, wasm_lazy_compilation)
// Profiler flags.
DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler")
-// 0x1800 fits in the immediate field of an ARM instruction.
-DEFINE_INT(interrupt_budget, 0x1800,
- "execution budget before interrupt is triggered")
DEFINE_INT(type_info_threshold, 25,
"percentage of ICs that must have type info to allow optimization")
DEFINE_INT(generic_ic_threshold, 30,
"max percentage of megamorphic/generic ICs to allow optimization")
DEFINE_INT(self_opt_count, 130, "call count before self-optimization")
-DEFINE_BOOL(trace_opt_verbose, false, "extra verbose compilation tracing")
-DEFINE_IMPLICATION(trace_opt_verbose, trace_opt)
-
// Garbage collections flags.
DEFINE_INT(min_semi_space_size, 0,
"min size of a semi-space (in MBytes), the new space consists of two"
@@ -654,8 +665,6 @@ DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC")
DEFINE_BOOL(never_compact, false,
"Never perform compaction on full GC - testing only")
DEFINE_BOOL(compact_code_space, true, "Compact code space on full collections")
-DEFINE_BOOL(cleanup_code_caches_at_gc, true,
- "Flush code caches in maps during mark compact cycle.")
DEFINE_BOOL(use_marking_progress_bar, true,
"Use a progress bar to scan large objects in increments when "
"incremental marking is active.")
@@ -748,17 +757,18 @@ DEFINE_BOOL(trace, false, "trace function calls")
// codegen.cc
DEFINE_BOOL(lazy, true, "use lazy compilation")
DEFINE_BOOL(trace_opt, false, "trace lazy optimization")
+DEFINE_BOOL(trace_opt_verbose, false, "extra verbose compilation tracing")
+DEFINE_IMPLICATION(trace_opt_verbose, trace_opt)
DEFINE_BOOL(trace_opt_stats, false, "trace lazy optimization statistics")
+DEFINE_BOOL(trace_deopt, false, "trace optimize function deoptimization")
DEFINE_BOOL(trace_file_names, false,
"include file names in trace-opt/trace-deopt output")
+DEFINE_BOOL(trace_interrupts, false, "trace interrupts when they are handled")
DEFINE_BOOL(opt, true, "use adaptive optimizations")
DEFINE_BOOL(always_opt, false, "always try to optimize functions")
DEFINE_BOOL(always_osr, false, "always try to OSR functions")
DEFINE_BOOL(prepare_always_opt, false, "prepare for turning on always opt")
-DEFINE_BOOL(trace_deopt, false, "trace optimize function deoptimization")
-DEFINE_BOOL(serialize_toplevel, true, "enable caching of toplevel scripts")
-DEFINE_BOOL(serialize_eager, false, "compile eagerly when caching scripts")
DEFINE_BOOL(trace_serializer, false, "print code serializer trace")
#ifdef DEBUG
DEFINE_BOOL(external_reference_stats, false,
@@ -868,7 +878,7 @@ DEFINE_BOOL(lazy_inner_functions, true, "enable lazy parsing inner functions")
DEFINE_BOOL(aggressive_lazy_inner_functions, false,
"even lazier inner function parsing")
DEFINE_IMPLICATION(aggressive_lazy_inner_functions, lazy_inner_functions)
-DEFINE_BOOL(preparser_scope_analysis, false,
+DEFINE_BOOL(preparser_scope_analysis, true,
"perform scope analysis for preparsed inner functions")
DEFINE_IMPLICATION(preparser_scope_analysis, aggressive_lazy_inner_functions)
@@ -931,6 +941,9 @@ DEFINE_INT(runtime_stats, 0,
DEFINE_VALUE_IMPLICATION(runtime_call_stats, runtime_stats, 1)
// snapshot-common.cc
+DEFINE_BOOL(lazy_deserialization, false,
+ "Deserialize code lazily from the snapshot.")
+DEFINE_BOOL(trace_lazy_deserialization, false, "Trace lazy deserialization.")
DEFINE_BOOL(profile_deserialization, false,
"Print the time it takes to deserialize the snapshot.")
DEFINE_BOOL(serialization_statistics, false,
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index 9fdc5d04be..0aeb3f91dc 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -12,7 +12,6 @@
#include "src/assembler.h"
#include "src/base/functional.h"
#include "src/base/platform/platform.h"
-#include "src/list-inl.h"
#include "src/ostreams.h"
#include "src/utils.h"
#include "src/wasm/wasm-limits.h"
@@ -249,8 +248,8 @@ std::ostream& operator<<(std::ostream& os, const Flag& flag) { // NOLINT
// static
-List<const char*>* FlagList::argv() {
- List<const char*>* args = new List<const char*>(8);
+std::vector<const char*>* FlagList::argv() {
+ std::vector<const char*>* args = new std::vector<const char*>(8);
Flag* args_flag = NULL;
for (size_t i = 0; i < num_flags; ++i) {
Flag* f = &flags[i];
@@ -264,22 +263,22 @@ List<const char*>* FlagList::argv() {
bool disabled = f->type() == Flag::TYPE_BOOL && !*f->bool_variable();
std::ostringstream os;
os << (disabled ? "--no" : "--") << f->name();
- args->Add(StrDup(os.str().c_str()));
+ args->push_back(StrDup(os.str().c_str()));
}
if (f->type() != Flag::TYPE_BOOL) {
std::ostringstream os;
os << *f;
- args->Add(StrDup(os.str().c_str()));
+ args->push_back(StrDup(os.str().c_str()));
}
}
}
if (args_flag != NULL) {
std::ostringstream os;
os << "--" << args_flag->name();
- args->Add(StrDup(os.str().c_str()));
+ args->push_back(StrDup(os.str().c_str()));
JSArguments jsargs = *args_flag->args_variable();
for (int j = 0; j < jsargs.argc; j++) {
- args->Add(StrDup(jsargs[j]));
+ args->push_back(StrDup(jsargs[j]));
}
}
return args;
diff --git a/deps/v8/src/flags.h b/deps/v8/src/flags.h
index 74a8df0014..7613759343 100644
--- a/deps/v8/src/flags.h
+++ b/deps/v8/src/flags.h
@@ -5,6 +5,8 @@
#ifndef V8_FLAGS_H_
#define V8_FLAGS_H_
+#include <vector>
+
#include "src/globals.h"
namespace v8 {
@@ -24,7 +26,7 @@ class V8_EXPORT_PRIVATE FlagList {
//
// The caller is responsible for disposing the list, as well
// as every element of it.
- static List<const char*>* argv();
+ static std::vector<const char*>* argv();
// Set the flag values by parsing the command line. If remove_flags is
// set, the flags and associated values are removed from (argc,
diff --git a/deps/v8/src/frame-constants.h b/deps/v8/src/frame-constants.h
index 6e67b84056..fa5921aef9 100644
--- a/deps/v8/src/frame-constants.h
+++ b/deps/v8/src/frame-constants.h
@@ -235,13 +235,6 @@ class InternalFrameConstants : public TypedFrameConstants {
DEFINE_TYPED_FRAME_SIZES(1);
};
-class FrameDropperFrameConstants : public InternalFrameConstants {
- public:
- // FP-relative.
- static const int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
- DEFINE_TYPED_FRAME_SIZES(2);
-};
-
class ConstructFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
@@ -300,6 +293,11 @@ class InterpreterFrameConstants : public AllStatic {
static const int kBytecodeArrayExpressionIndex = -2;
static const int kBytecodeOffsetExpressionIndex = -1;
static const int kRegisterFileExpressionIndex = 0;
+
+ // Returns the number of stack slots needed for 'register_count' registers.
+ // This is needed because some architectures must pad the stack frame with
+ // additional stack slots to ensure the stack pointer is aligned.
+ static int RegisterStackSlotCount(int register_count);
};
inline static int FPOffsetToFrameSlot(int frame_offset) {
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 5edd95d2f4..adfff5a9dc 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -153,34 +153,6 @@ Address JavaScriptFrame::GetParameterSlot(int index) const {
return caller_sp() + parameter_offset;
}
-inline Address JavaScriptFrame::GetOperandSlot(int index) const {
- Address base = fp() + JavaScriptFrameConstants::kLocal0Offset;
- DCHECK(IsAddressAligned(base, kPointerSize));
- DCHECK_EQ(type(), JAVA_SCRIPT);
- DCHECK_LT(index, ComputeOperandsCount());
- DCHECK_LE(0, index);
- // Operand stack grows down.
- return base - index * kPointerSize;
-}
-
-
-inline Object* JavaScriptFrame::GetOperand(int index) const {
- return Memory::Object_at(GetOperandSlot(index));
-}
-
-
-inline int JavaScriptFrame::ComputeOperandsCount() const {
- Address base = fp() + JavaScriptFrameConstants::kLocal0Offset;
- // Base points to low address of first operand and stack grows down, so add
- // kPointerSize to get the actual stack size.
- intptr_t stack_size_in_bytes = (base + kPointerSize) - sp();
- DCHECK(IsAligned(stack_size_in_bytes, kPointerSize));
- DCHECK(type() == JAVA_SCRIPT);
- DCHECK(stack_size_in_bytes >= 0);
- return static_cast<int>(stack_size_in_bytes >> kPointerSizeLog2);
-}
-
-
inline void JavaScriptFrame::set_receiver(Object* value) {
Memory::Object_at(GetParameterSlot(-1)) = value;
}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 37f5b4d9cf..d578a64ed3 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -16,8 +16,7 @@
#include "src/string-stream.h"
#include "src/visitors.h"
#include "src/vm-state-inl.h"
-#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
namespace internal {
@@ -137,25 +136,6 @@ void JavaScriptFrameIterator::Advance() {
} while (!iterator_.done() && !iterator_.frame()->is_java_script());
}
-void JavaScriptFrameIterator::AdvanceToArgumentsFrame() {
- if (!frame()->has_adapted_arguments()) return;
- iterator_.Advance();
- DCHECK(iterator_.frame()->is_arguments_adaptor());
-}
-
-void JavaScriptFrameIterator::AdvanceWhileDebugContext(Debug* debug) {
- if (!debug->in_debug_scope()) return;
-
- while (!done()) {
- Context* context = Context::cast(frame()->context());
- if (context->native_context() == *debug->debug_context()) {
- Advance();
- } else {
- break;
- }
- }
-}
-
// -------------------------------------------------------------------------
StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate)
@@ -185,12 +165,6 @@ bool StackTraceFrameIterator::IsValidFrame(StackFrame* frame) const {
return frame->is_wasm();
}
-void StackTraceFrameIterator::AdvanceToArgumentsFrame() {
- if (!is_javascript() || !javascript_frame()->has_adapted_arguments()) return;
- iterator_.Advance();
- DCHECK(iterator_.frame()->is_arguments_adaptor());
-}
-
// -------------------------------------------------------------------------
namespace {
@@ -266,9 +240,9 @@ SafeStackFrameIterator::SafeStackFrameIterator(
advance_frame = true;
}
} else {
- // Mark the frame as JAVA_SCRIPT if we cannot determine its type.
+ // Mark the frame as OPTIMIZED if we cannot determine its type.
// The frame anyways will be skipped.
- type = StackFrame::JAVA_SCRIPT;
+ type = StackFrame::OPTIMIZED;
// Top frame is incomplete so we cannot reliably determine its type.
top_frame_type_ = StackFrame::NONE;
}
@@ -455,7 +429,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
*(state->pc_address))) {
return INTERPRETED;
} else {
- return JAVA_SCRIPT;
+ return OPTIMIZED;
}
}
} else {
@@ -477,8 +451,6 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
return OPTIMIZED;
}
return BUILTIN;
- case Code::FUNCTION:
- return JAVA_SCRIPT;
case Code::OPTIMIZED_FUNCTION:
return OPTIMIZED;
case Code::WASM_FUNCTION:
@@ -517,7 +489,6 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
case WASM_COMPILED:
return candidate;
case JS_TO_WASM:
- case JAVA_SCRIPT:
case OPTIMIZED:
case INTERPRETED:
default:
@@ -768,8 +739,7 @@ void StandardFrame::ComputeCallerState(State* state) const {
bool StandardFrame::IsConstructor() const { return false; }
-void StandardFrame::Summarize(std::vector<FrameSummary>* functions,
- FrameSummary::Mode mode) const {
+void StandardFrame::Summarize(std::vector<FrameSummary>* functions) const {
// This should only be called on frames which override this method.
UNREACHABLE();
}
@@ -820,7 +790,6 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
case C_WASM_ENTRY:
frame_header_size = TypedFrameConstants::kFixedFrameSizeFromFp;
break;
- case JAVA_SCRIPT:
case OPTIMIZED:
case INTERPRETED:
case BUILTIN:
@@ -993,15 +962,14 @@ void JavaScriptFrame::GetFunctions(
}
}
-void JavaScriptFrame::Summarize(std::vector<FrameSummary>* functions,
- FrameSummary::Mode mode) const {
+void JavaScriptFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
Code* code = LookupCode();
int offset = static_cast<int>(pc() - code->instruction_start());
AbstractCode* abstract_code = AbstractCode::cast(code);
FrameSummary::JavaScriptFrameSummary summary(isolate(), receiver(),
function(), abstract_code,
- offset, IsConstructor(), mode);
+ offset, IsConstructor());
functions->push_back(summary);
}
@@ -1166,9 +1134,8 @@ bool IsNonDeoptimizingAsmCode(Code* code, JSFunction* function) {
FrameSummary::JavaScriptFrameSummary::JavaScriptFrameSummary(
Isolate* isolate, Object* receiver, JSFunction* function,
- AbstractCode* abstract_code, int code_offset, bool is_constructor,
- Mode mode)
- : FrameSummaryBase(isolate, JAVA_SCRIPT),
+ AbstractCode* abstract_code, int code_offset, bool is_constructor)
+ : FrameSummaryBase(isolate, FrameSummary::JAVA_SCRIPT),
receiver_(receiver, isolate),
function_(function, isolate),
abstract_code_(abstract_code, isolate),
@@ -1176,8 +1143,7 @@ FrameSummary::JavaScriptFrameSummary::JavaScriptFrameSummary(
is_constructor_(is_constructor) {
DCHECK(abstract_code->IsBytecodeArray() ||
Code::cast(abstract_code)->kind() != Code::OPTIMIZED_FUNCTION ||
- IsNonDeoptimizingAsmCode(Code::cast(abstract_code), function) ||
- mode == kApproximateSummary);
+ IsNonDeoptimizingAsmCode(Code::cast(abstract_code), function));
}
bool FrameSummary::JavaScriptFrameSummary::is_subject_to_debugging() const {
@@ -1230,16 +1196,11 @@ WASM_SUMMARY_DISPATCH(int, byte_offset)
#undef WASM_SUMMARY_DISPATCH
int FrameSummary::WasmFrameSummary::SourcePosition() const {
- int offset = byte_offset();
Handle<WasmCompiledModule> compiled_module(wasm_instance()->compiled_module(),
isolate());
- if (compiled_module->is_asm_js()) {
- offset = WasmCompiledModule::GetAsmJsSourcePosition(
- compiled_module, function_index(), offset, at_to_number_conversion());
- } else {
- offset += compiled_module->GetFunctionOffset(function_index());
- }
- return offset;
+ return WasmCompiledModule::GetSourcePosition(compiled_module,
+ function_index(), byte_offset(),
+ at_to_number_conversion());
}
Handle<Script> FrameSummary::WasmFrameSummary::script() const {
@@ -1353,8 +1314,7 @@ FRAME_SUMMARY_DISPATCH(Handle<Context>, native_context)
#undef FRAME_SUMMARY_DISPATCH
-void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames,
- FrameSummary::Mode mode) const {
+void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
DCHECK(frames->empty());
DCHECK(is_optimized());
@@ -1370,9 +1330,6 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames,
DeoptimizationInputData* const data = GetDeoptimizationData(&deopt_index);
if (deopt_index == Safepoint::kNoDeoptimizationIndex) {
CHECK_NULL(data);
- if (mode == FrameSummary::kApproximateSummary) {
- return JavaScriptFrame::Summarize(frames, mode);
- }
FATAL("Missing deoptimization information for OptimizedFrame::Summarize.");
}
@@ -1631,8 +1588,7 @@ void InterpretedFrame::WriteInterpreterRegister(int register_index,
return SetExpression(index + register_index, value);
}
-void InterpretedFrame::Summarize(std::vector<FrameSummary>* functions,
- FrameSummary::Mode mode) const {
+void InterpretedFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
AbstractCode* abstract_code =
AbstractCode::cast(function()->shared()->bytecode_array());
@@ -1712,7 +1668,7 @@ Address WasmCompiledFrame::GetCallerStackPointer() const {
}
WasmInstanceObject* WasmCompiledFrame::wasm_instance() const {
- WasmInstanceObject* obj = wasm::GetOwningWasmInstance(LookupCode());
+ WasmInstanceObject* obj = WasmInstanceObject::GetOwningInstance(LookupCode());
// This is a live stack frame; it must have a live instance.
DCHECK_NOT_NULL(obj);
return obj;
@@ -1730,8 +1686,7 @@ int WasmCompiledFrame::position() const {
return FrameSummary::GetSingle(this).SourcePosition();
}
-void WasmCompiledFrame::Summarize(std::vector<FrameSummary>* functions,
- FrameSummary::Mode mode) const {
+void WasmCompiledFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
Handle<Code> code(LookupCode(), isolate());
int offset = static_cast<int>(pc() - code->instruction_start());
@@ -1777,8 +1732,8 @@ void WasmInterpreterEntryFrame::Print(StringStream* accumulator, PrintMode mode,
if (mode != OVERVIEW) accumulator->Add("\n");
}
-void WasmInterpreterEntryFrame::Summarize(std::vector<FrameSummary>* functions,
- FrameSummary::Mode mode) const {
+void WasmInterpreterEntryFrame::Summarize(
+ std::vector<FrameSummary>* functions) const {
Handle<WasmInstanceObject> instance(wasm_instance(), isolate());
std::vector<std::pair<uint32_t, int>> interpreted_stack =
instance->debug_info()->GetInterpretedStack(fp());
@@ -1795,7 +1750,7 @@ Code* WasmInterpreterEntryFrame::unchecked_code() const {
}
WasmInstanceObject* WasmInterpreterEntryFrame::wasm_instance() const {
- WasmInstanceObject* ret = wasm::GetOwningWasmInstance(LookupCode());
+ WasmInstanceObject* ret = WasmInstanceObject::GetOwningInstance(LookupCode());
// This is a live stack frame, there must be a live wasm instance available.
DCHECK_NOT_NULL(ret);
return ret;
@@ -1862,13 +1817,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
accumulator->PrintName(script->name());
Address pc = this->pc();
- if (code != NULL && code->kind() == Code::FUNCTION &&
- pc >= code->instruction_start() && pc < code->instruction_end()) {
- int offset = static_cast<int>(pc - code->instruction_start());
- int source_pos = AbstractCode::cast(code)->SourcePosition(offset);
- int line = script->GetLineNumber(source_pos) + 1;
- accumulator->Add(":%d] [pc=%p]", line, pc);
- } else if (is_interpreted()) {
+ if (is_interpreted()) {
const InterpretedFrame* iframe =
reinterpret_cast<const InterpretedFrame*>(this);
BytecodeArray* bytecodes = iframe->GetBytecodeArray();
@@ -1937,10 +1886,10 @@ void JavaScriptFrame::Print(StringStream* accumulator,
Context* context = NULL;
if (this->context() != NULL && this->context()->IsContext()) {
context = Context::cast(this->context());
- }
- while (context->IsWithContext()) {
- context = context->previous();
- DCHECK(context != NULL);
+ while (context->IsWithContext()) {
+ context = context->previous();
+ DCHECK(context != NULL);
+ }
}
// Print heap-allocated local variables.
@@ -2037,24 +1986,8 @@ void InternalFrame::Iterate(RootVisitor* v) const {
if (code->has_tagged_params()) IterateExpressions(v);
}
-
-// -------------------------------------------------------------------------
-
-
-JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) {
- DCHECK(n >= 0);
- for (int i = 0; i <= n; i++) {
- while (!iterator_.frame()->is_java_script()) iterator_.Advance();
- if (i == n) return JavaScriptFrame::cast(iterator_.frame());
- iterator_.Advance();
- }
- UNREACHABLE();
-}
-
-
// -------------------------------------------------------------------------
-
static Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
MapWord map_word = object->map_word();
return map_word.IsForwardingAddress() ?
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 24f7dcd7f8..ebb7f0c3fd 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -88,7 +88,6 @@ class StackHandler BASE_EMBEDDED {
V(ENTRY, EntryFrame) \
V(CONSTRUCT_ENTRY, ConstructEntryFrame) \
V(EXIT, ExitFrame) \
- V(JAVA_SCRIPT, JavaScriptFrame) \
V(OPTIMIZED, OptimizedFrame) \
V(WASM_COMPILED, WasmCompiledFrame) \
V(WASM_TO_JS, WasmToJsFrame) \
@@ -218,8 +217,7 @@ class StackFrame BASE_EMBEDDED {
bool is_java_script() const {
Type type = this->type();
- return (type == JAVA_SCRIPT) || (type == OPTIMIZED) ||
- (type == INTERPRETED) || (type == BUILTIN) ||
+ return (type == OPTIMIZED) || (type == INTERPRETED) || (type == BUILTIN) ||
(type == JAVA_SCRIPT_BUILTIN_CONTINUATION);
}
bool is_wasm() const {
@@ -453,13 +451,6 @@ class StandardFrame;
class FrameSummary BASE_EMBEDDED {
public:
- // Mode for JavaScriptFrame::Summarize. Exact summary is required to produce
- // an exact stack trace. It will trigger an assertion failure if that is not
- // possible, e.g., because of missing deoptimization information. The
- // approximate mode should produce a summary even without deoptimization
- // information, but it might miss frames.
- enum Mode { kExactSummary, kApproximateSummary };
-
// Subclasses for the different summary kinds:
#define FRAME_SUMMARY_VARIANTS(F) \
F(JAVA_SCRIPT, JavaScriptFrameSummary, java_script_summary_, JavaScript) \
@@ -488,8 +479,7 @@ class FrameSummary BASE_EMBEDDED {
public:
JavaScriptFrameSummary(Isolate* isolate, Object* receiver,
JSFunction* function, AbstractCode* abstract_code,
- int code_offset, bool is_constructor,
- Mode mode = kExactSummary);
+ int code_offset, bool is_constructor);
Handle<Object> receiver() const { return receiver_; }
Handle<JSFunction> function() const { return function_; }
@@ -635,9 +625,7 @@ class StandardFrame : public StackFrame {
// Build a list with summaries for this frame including all inlined frames.
// The functions are ordered bottom-to-top (i.e. summaries.last() is the
// top-most activation; caller comes before callee).
- virtual void Summarize(
- std::vector<FrameSummary>* frames,
- FrameSummary::Mode mode = FrameSummary::kExactSummary) const;
+ virtual void Summarize(std::vector<FrameSummary>* frames) const;
static StandardFrame* cast(StackFrame* frame) {
DCHECK(frame->is_standard());
@@ -686,11 +674,9 @@ class StandardFrame : public StackFrame {
class JavaScriptFrame : public StandardFrame {
public:
- Type type() const override { return JAVA_SCRIPT; }
+ Type type() const override = 0;
- void Summarize(
- std::vector<FrameSummary>* frames,
- FrameSummary::Mode mode = FrameSummary::kExactSummary) const override;
+ void Summarize(std::vector<FrameSummary>* frames) const override;
// Accessors.
virtual JSFunction* function() const;
@@ -705,11 +691,6 @@ class JavaScriptFrame : public StandardFrame {
Object* GetParameter(int index) const override;
int ComputeParametersCount() const override;
- // Access the operand stack.
- inline Address GetOperandSlot(int index) const;
- inline Object* GetOperand(int index) const;
- inline int ComputeOperandsCount() const;
-
// Debugger access.
void SetParameterValue(int index, Object* value) const;
@@ -829,9 +810,7 @@ class OptimizedFrame : public JavaScriptFrame {
// is the top-most activation)
void GetFunctions(std::vector<SharedFunctionInfo*>* functions) const override;
- void Summarize(
- std::vector<FrameSummary>* frames,
- FrameSummary::Mode mode = FrameSummary::kExactSummary) const override;
+ void Summarize(std::vector<FrameSummary>* frames) const override;
// Lookup exception handler for current {pc}, returns -1 if none found.
int LookupExceptionHandlerInTable(
@@ -883,9 +862,7 @@ class InterpretedFrame : public JavaScriptFrame {
void WriteInterpreterRegister(int register_index, Object* value);
// Build a list with summaries for this frame including all inlined frames.
- void Summarize(
- std::vector<FrameSummary>* frames,
- FrameSummary::Mode mode = FrameSummary::kExactSummary) const override;
+ void Summarize(std::vector<FrameSummary>* frames) const override;
static int GetBytecodeOffset(Address fp);
@@ -975,8 +952,7 @@ class WasmCompiledFrame final : public StandardFrame {
int position() const override;
bool at_to_number_conversion() const;
- void Summarize(std::vector<FrameSummary>* frames,
- FrameSummary::Mode mode) const override;
+ void Summarize(std::vector<FrameSummary>* frames) const override;
static WasmCompiledFrame* cast(StackFrame* frame) {
DCHECK(frame->is_wasm_compiled());
@@ -1003,9 +979,7 @@ class WasmInterpreterEntryFrame final : public StandardFrame {
void Print(StringStream* accumulator, PrintMode mode,
int index) const override;
- void Summarize(
- std::vector<FrameSummary>* frames,
- FrameSummary::Mode mode = FrameSummary::kExactSummary) const override;
+ void Summarize(std::vector<FrameSummary>* frames) const override;
// Determine the code for the frame.
Code* unchecked_code() const override;
@@ -1205,14 +1179,7 @@ class JavaScriptFrameIterator BASE_EMBEDDED {
bool done() const { return iterator_.done(); }
void Advance();
-
- // Advance to the frame holding the arguments for the current
- // frame. This only affects the current frame if it has adapted
- // arguments.
- void AdvanceToArgumentsFrame();
-
- // Skips the frames that point to the debug context.
- void AdvanceWhileDebugContext(Debug* debug);
+ void AdvanceOneFrame() { iterator_.Advance(); }
private:
StackFrameIterator iterator_;
@@ -1228,6 +1195,7 @@ class StackTraceFrameIterator BASE_EMBEDDED {
StackTraceFrameIterator(Isolate* isolate, StackFrame::Id id);
bool done() const { return iterator_.done(); }
void Advance();
+ void AdvanceOneFrame() { iterator_.Advance(); }
inline StandardFrame* frame() const;
@@ -1235,11 +1203,6 @@ class StackTraceFrameIterator BASE_EMBEDDED {
inline bool is_wasm() const;
inline JavaScriptFrame* javascript_frame() const;
- // Advance to the frame holding the arguments for the current
- // frame. This only affects the current frame if it is a javascript frame and
- // has adapted arguments.
- void AdvanceToArgumentsFrame();
-
private:
StackFrameIterator iterator_;
bool IsValidFrame(StackFrame* frame) const;
@@ -1274,20 +1237,6 @@ class SafeStackFrameIterator: public StackFrameIteratorBase {
ExternalCallbackScope* external_callback_scope_;
};
-
-class StackFrameLocator BASE_EMBEDDED {
- public:
- explicit StackFrameLocator(Isolate* isolate) : iterator_(isolate) {}
-
- // Find the nth JavaScript frame on the stack. The caller must
- // guarantee that such a frame exists.
- JavaScriptFrame* FindJavaScriptFrame(int n);
-
- private:
- StackFrameIterator iterator_;
-};
-
-
// Reads all frames on the current stack and copies them into the current
// zone memory.
Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone);
diff --git a/deps/v8/src/futex-emulation.cc b/deps/v8/src/futex-emulation.cc
index af9cbeb12a..b5144e72db 100644
--- a/deps/v8/src/futex-emulation.cc
+++ b/deps/v8/src/futex-emulation.cc
@@ -11,7 +11,6 @@
#include "src/conversions.h"
#include "src/handles-inl.h"
#include "src/isolate.h"
-#include "src/list-inl.h"
#include "src/objects-inl.h"
namespace v8 {
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index d0395330c2..57c8100325 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -5,7 +5,9 @@
#include "src/gdb-jit.h"
#include <memory>
+#include <vector>
+#include "src/api.h"
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
@@ -923,8 +925,6 @@ class ELFSymbolTable : public ELFSection {
class LineInfo : public Malloced {
public:
- LineInfo() : pc_info_(10) {}
-
void SetPosition(intptr_t pc, int pos, bool is_statement) {
AddPCInfo(PCInfo(pc, pos, is_statement));
}
@@ -938,12 +938,12 @@ class LineInfo : public Malloced {
bool is_statement_;
};
- List<PCInfo>* pc_info() { return &pc_info_; }
+ std::vector<PCInfo>* pc_info() { return &pc_info_; }
private:
- void AddPCInfo(const PCInfo& pc_info) { pc_info_.Add(pc_info); }
+ void AddPCInfo(const PCInfo& pc_info) { pc_info_.push_back(pc_info); }
- List<PCInfo> pc_info_;
+ std::vector<PCInfo> pc_info_;
};
@@ -970,7 +970,7 @@ class CodeDescription BASE_EMBEDDED {
bool is_function() const {
Code::Kind kind = code_->kind();
- return kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION;
+ return kind == Code::OPTIMIZED_FUNCTION;
}
bool has_scope_info() const { return shared_info_ != NULL; }
@@ -1512,11 +1512,10 @@ class DebugLineSection : public DebugSection {
intptr_t line = 1;
bool is_statement = true;
- List<LineInfo::PCInfo>* pc_info = desc_->lineinfo()->pc_info();
- pc_info->Sort(&ComparePCInfo);
+ std::vector<LineInfo::PCInfo>* pc_info = desc_->lineinfo()->pc_info();
+ std::sort(pc_info->begin(), pc_info->end(), &ComparePCInfo);
- int pc_info_length = pc_info->length();
- for (int i = 0; i < pc_info_length; i++) {
+ for (size_t i = 0; i < pc_info->size(); i++) {
LineInfo::PCInfo* info = &pc_info->at(i);
DCHECK(info->pc_ >= pc);
@@ -1531,7 +1530,7 @@ class DebugLineSection : public DebugSection {
// the last pc address in the function as a statement (e.g. "}"), so that
// a user can see the result of the last line executed in the function,
// should control reach the end.
- if ((i+1) == pc_info_length) {
+ if ((i + 1) == pc_info->size()) {
if (!is_statement) {
w->Write<uint8_t>(DW_LNS_NEGATE_STMT);
}
@@ -1588,18 +1587,15 @@ class DebugLineSection : public DebugSection {
w->Write<uint8_t>(op);
}
- static int ComparePCInfo(const LineInfo::PCInfo* a,
- const LineInfo::PCInfo* b) {
- if (a->pc_ == b->pc_) {
- if (a->is_statement_ != b->is_statement_) {
- return b->is_statement_ ? +1 : -1;
+ static bool ComparePCInfo(const LineInfo::PCInfo& a,
+ const LineInfo::PCInfo& b) {
+ if (a.pc_ == b.pc_) {
+ if (a.is_statement_ != b.is_statement_) {
+ return !b.is_statement_;
}
- return 0;
- } else if (a->pc_ > b->pc_) {
- return +1;
- } else {
- return -1;
+ return false;
}
+ return a.pc_ < b.pc_;
}
CodeDescription* desc_;
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 3d5348a537..9ae13d59f4 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -160,14 +160,21 @@ class GlobalHandles::Node {
bool IsInUse() const { return state() != FREE; }
+ bool IsPhantomCallback() const {
+ return weakness_type() == PHANTOM_WEAK ||
+ weakness_type() == PHANTOM_WEAK_2_EMBEDDER_FIELDS;
+ }
+
+ bool IsPhantomResetHandle() const {
+ return weakness_type() == PHANTOM_WEAK_RESET_HANDLE;
+ }
+
bool IsPendingPhantomCallback() const {
- return state() == PENDING &&
- (weakness_type() == PHANTOM_WEAK ||
- weakness_type() == PHANTOM_WEAK_2_EMBEDDER_FIELDS);
+ return state() == PENDING && IsPhantomCallback();
}
bool IsPendingPhantomResetHandle() const {
- return state() == PENDING && weakness_type() == PHANTOM_WEAK_RESET_HANDLE;
+ return state() == PENDING && IsPhantomResetHandle();
}
bool IsRetainer() const {
@@ -254,7 +261,7 @@ class GlobalHandles::Node {
void CollectPhantomCallbackData(
Isolate* isolate,
- List<PendingPhantomCallback>* pending_phantom_callbacks) {
+ std::vector<PendingPhantomCallback>* pending_phantom_callbacks) {
DCHECK(weakness_type() == PHANTOM_WEAK ||
weakness_type() == PHANTOM_WEAK_2_EMBEDDER_FIELDS);
DCHECK(state() == PENDING);
@@ -277,7 +284,7 @@ class GlobalHandles::Node {
typedef v8::WeakCallbackInfo<void> Data;
auto callback = reinterpret_cast<Data::Callback>(weak_callback_);
- pending_phantom_callbacks->Add(
+ pending_phantom_callbacks->push_back(
PendingPhantomCallback(this, callback, parameter(), embedder_fields));
DCHECK(IsInUse());
set_state(NEAR_DEATH);
@@ -502,9 +509,10 @@ class GlobalHandles::PendingPhantomCallbacksSecondPassTask
// Takes ownership of the contents of pending_phantom_callbacks, leaving it in
// the same state it would be after a call to Clear().
PendingPhantomCallbacksSecondPassTask(
- List<PendingPhantomCallback>* pending_phantom_callbacks, Isolate* isolate)
+ std::vector<PendingPhantomCallback>* pending_phantom_callbacks,
+ Isolate* isolate)
: CancelableTask(isolate), isolate_(isolate) {
- pending_phantom_callbacks_.Swap(pending_phantom_callbacks);
+ pending_phantom_callbacks_.swap(*pending_phantom_callbacks);
}
void RunInternal() override {
@@ -520,7 +528,7 @@ class GlobalHandles::PendingPhantomCallbacksSecondPassTask
private:
Isolate* isolate_;
- List<PendingPhantomCallback> pending_phantom_callbacks_;
+ std::vector<PendingPhantomCallback> pending_phantom_callbacks_;
DISALLOW_COPY_AND_ASSIGN(PendingPhantomCallbacksSecondPassTask);
};
@@ -557,7 +565,7 @@ Handle<Object> GlobalHandles::Create(Object* value) {
result->Acquire(value);
if (isolate_->heap()->InNewSpace(value) &&
!result->is_in_new_space_list()) {
- new_space_nodes_.Add(result);
+ new_space_nodes_.push_back(result);
result->set_in_new_space_list(true);
}
return result->handle();
@@ -612,36 +620,50 @@ bool GlobalHandles::IsWeak(Object** location) {
}
DISABLE_CFI_PERF
-void GlobalHandles::IterateWeakRoots(RootVisitor* v) {
+void GlobalHandles::IterateWeakRootsForFinalizers(RootVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
Node* node = it.node();
- if (node->IsWeakRetainer()) {
- // Pending weak phantom handles die immediately. Everything else survives.
- if (node->IsPendingPhantomResetHandle()) {
+ if (node->IsWeakRetainer() && node->state() == Node::PENDING) {
+ DCHECK(!node->IsPhantomCallback());
+ DCHECK(!node->IsPhantomResetHandle());
+ // Finalizers need to survive.
+ v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ }
+ }
+}
+
+DISABLE_CFI_PERF
+void GlobalHandles::IterateWeakRootsForPhantomHandles(
+ WeakSlotCallback should_reset_handle) {
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ Node* node = it.node();
+ if (node->IsWeakRetainer() && should_reset_handle(node->location())) {
+ if (node->IsPhantomResetHandle()) {
+ node->MarkPending();
node->ResetPhantomHandle();
++number_of_phantom_handle_resets_;
- } else if (node->IsPendingPhantomCallback()) {
+ } else if (node->IsPhantomCallback()) {
+ node->MarkPending();
node->CollectPhantomCallbackData(isolate(),
&pending_phantom_callbacks_);
- } else {
- v->VisitRootPointer(Root::kGlobalHandles, node->location());
}
}
}
}
-
-void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
+void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback should_reset_handle) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->IsWeak() && f(it.node()->location())) {
- it.node()->MarkPending();
+ Node* node = it.node();
+ if (node->IsWeak() && should_reset_handle(node->location())) {
+ if (!node->IsPhantomCallback() && !node->IsPhantomResetHandle()) {
+ node->MarkPending();
+ }
}
}
}
void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(RootVisitor* v) {
- for (int i = 0; i < new_space_nodes_.length(); ++i) {
- Node* node = new_space_nodes_[i];
+ for (Node* node : new_space_nodes_) {
if (node->IsStrongRetainer() ||
(node->IsWeakRetainer() && !node->is_independent() &&
node->is_active())) {
@@ -653,7 +675,7 @@ void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(RootVisitor* v) {
void GlobalHandles::IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
RootVisitor* v, size_t start, size_t end) {
for (size_t i = start; i < end; ++i) {
- Node* node = new_space_nodes_[static_cast<int>(i)];
+ Node* node = new_space_nodes_[i];
if (node->IsWeak() && !JSObject::IsUnmodifiedApiObject(node->location())) {
node->set_active(true);
}
@@ -667,8 +689,7 @@ void GlobalHandles::IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
void GlobalHandles::IdentifyWeakUnmodifiedObjects(
WeakSlotCallback is_unmodified) {
- for (int i = 0; i < new_space_nodes_.length(); ++i) {
- Node* node = new_space_nodes_[i];
+ for (Node* node : new_space_nodes_) {
if (node->IsWeak() && !is_unmodified(node->location())) {
node->set_active(true);
}
@@ -678,8 +699,7 @@ void GlobalHandles::IdentifyWeakUnmodifiedObjects(
void GlobalHandles::MarkNewSpaceWeakUnmodifiedObjectsPending(
WeakSlotCallbackWithHeap is_unscavenged) {
- for (int i = 0; i < new_space_nodes_.length(); ++i) {
- Node* node = new_space_nodes_[i];
+ for (Node* node : new_space_nodes_) {
DCHECK(node->is_in_new_space_list());
if ((node->is_independent() || !node->is_active()) && node->IsWeak() &&
is_unscavenged(isolate_->heap(), node->location())) {
@@ -689,8 +709,7 @@ void GlobalHandles::MarkNewSpaceWeakUnmodifiedObjectsPending(
}
void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots(RootVisitor* v) {
- for (int i = 0; i < new_space_nodes_.length(); ++i) {
- Node* node = new_space_nodes_[i];
+ for (Node* node : new_space_nodes_) {
DCHECK(node->is_in_new_space_list());
if ((node->is_independent() || !node->is_active()) &&
node->IsWeakRetainer()) {
@@ -709,9 +728,10 @@ void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots(RootVisitor* v) {
}
void GlobalHandles::InvokeSecondPassPhantomCallbacks(
- List<PendingPhantomCallback>* callbacks, Isolate* isolate) {
- while (callbacks->length() != 0) {
- auto callback = callbacks->RemoveLast();
+ std::vector<PendingPhantomCallback>* callbacks, Isolate* isolate) {
+ while (!callbacks->empty()) {
+ auto callback = callbacks->back();
+ callbacks->pop_back();
DCHECK(callback.node() == nullptr);
// Fire second pass callback
callback.Invoke(isolate);
@@ -722,8 +742,7 @@ void GlobalHandles::InvokeSecondPassPhantomCallbacks(
int GlobalHandles::PostScavengeProcessing(
const int initial_post_gc_processing_count) {
int freed_nodes = 0;
- for (int i = 0; i < new_space_nodes_.length(); ++i) {
- Node* node = new_space_nodes_[i];
+ for (Node* node : new_space_nodes_) {
DCHECK(node->is_in_new_space_list());
if (!node->IsRetainer()) {
// Free nodes do not have weak callbacks. Do not use them to compute
@@ -782,9 +801,8 @@ int GlobalHandles::PostMarkSweepProcessing(
void GlobalHandles::UpdateListOfNewSpaceNodes() {
- int last = 0;
- for (int i = 0; i < new_space_nodes_.length(); ++i) {
- Node* node = new_space_nodes_[i];
+ size_t last = 0;
+ for (Node* node : new_space_nodes_) {
DCHECK(node->is_in_new_space_list());
if (node->IsRetainer()) {
if (isolate_->heap()->InNewSpace(node->object())) {
@@ -799,29 +817,28 @@ void GlobalHandles::UpdateListOfNewSpaceNodes() {
isolate_->heap()->IncrementNodesDiedInNewSpace();
}
}
- new_space_nodes_.Rewind(last);
- new_space_nodes_.Trim();
+ DCHECK_LE(last, new_space_nodes_.size());
+ new_space_nodes_.resize(last);
+ new_space_nodes_.shrink_to_fit();
}
int GlobalHandles::DispatchPendingPhantomCallbacks(
bool synchronous_second_pass) {
int freed_nodes = 0;
- List<PendingPhantomCallback> second_pass_callbacks;
+ std::vector<PendingPhantomCallback> second_pass_callbacks;
{
// The initial pass callbacks must simply clear the nodes.
- for (auto i = pending_phantom_callbacks_.begin();
- i != pending_phantom_callbacks_.end(); ++i) {
- auto callback = i;
+ for (auto callback : pending_phantom_callbacks_) {
// Skip callbacks that have already been processed once.
- if (callback->node() == nullptr) continue;
- callback->Invoke(isolate());
- if (callback->callback()) second_pass_callbacks.Add(*callback);
+ if (callback.node() == nullptr) continue;
+ callback.Invoke(isolate());
+ if (callback.callback()) second_pass_callbacks.push_back(callback);
freed_nodes++;
}
}
- pending_phantom_callbacks_.Clear();
- if (second_pass_callbacks.length() > 0) {
+ pending_phantom_callbacks_.clear();
+ if (!second_pass_callbacks.empty()) {
if (FLAG_optimize_for_size || FLAG_predictable || synchronous_second_pass) {
isolate()->heap()->CallGCPrologueCallbacks(
GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
@@ -913,8 +930,7 @@ void GlobalHandles::IterateAllRoots(RootVisitor* v) {
DISABLE_CFI_PERF
void GlobalHandles::IterateAllNewSpaceRoots(RootVisitor* v) {
- for (int i = 0; i < new_space_nodes_.length(); ++i) {
- Node* node = new_space_nodes_[i];
+ for (Node* node : new_space_nodes_) {
if (node->IsRetainer()) {
v->VisitRootPointer(Root::kGlobalHandles, node->location());
}
@@ -925,7 +941,7 @@ DISABLE_CFI_PERF
void GlobalHandles::IterateNewSpaceRoots(RootVisitor* v, size_t start,
size_t end) {
for (size_t i = start; i < end; ++i) {
- Node* node = new_space_nodes_[static_cast<int>(i)];
+ Node* node = new_space_nodes_[i];
if (node->IsRetainer()) {
v->VisitRootPointer(Root::kGlobalHandles, node->location());
}
@@ -955,8 +971,7 @@ void GlobalHandles::IterateAllRootsWithClassIds(
DISABLE_CFI_PERF
void GlobalHandles::IterateAllRootsInNewSpaceWithClassIds(
v8::PersistentHandleVisitor* visitor) {
- for (int i = 0; i < new_space_nodes_.length(); ++i) {
- Node* node = new_space_nodes_[i];
+ for (Node* node : new_space_nodes_) {
if (node->IsRetainer() && node->has_wrapper_class_id()) {
ApplyPersistentHandleVisitor(visitor, node);
}
@@ -967,8 +982,7 @@ void GlobalHandles::IterateAllRootsInNewSpaceWithClassIds(
DISABLE_CFI_PERF
void GlobalHandles::IterateWeakRootsInNewSpaceWithClassIds(
v8::PersistentHandleVisitor* visitor) {
- for (int i = 0; i < new_space_nodes_.length(); ++i) {
- Node* node = new_space_nodes_[i];
+ for (Node* node : new_space_nodes_) {
if (node->has_wrapper_class_id() && node->IsWeak()) {
ApplyPersistentHandleVisitor(visitor, node);
}
@@ -1044,14 +1058,13 @@ EternalHandles::EternalHandles() : size_(0) {
EternalHandles::~EternalHandles() {
- for (int i = 0; i < blocks_.length(); i++) delete[] blocks_[i];
+ for (Object** block : blocks_) delete[] block;
}
void EternalHandles::IterateAllRoots(RootVisitor* visitor) {
int limit = size_;
- for (int i = 0; i < blocks_.length(); i++) {
+ for (Object** block : blocks_) {
DCHECK(limit > 0);
- Object** block = blocks_[i];
visitor->VisitRootPointers(Root::kEternalHandles, block,
block + Min(limit, kSize));
limit -= kSize;
@@ -1059,22 +1072,21 @@ void EternalHandles::IterateAllRoots(RootVisitor* visitor) {
}
void EternalHandles::IterateNewSpaceRoots(RootVisitor* visitor) {
- for (int i = 0; i < new_space_indices_.length(); i++) {
- visitor->VisitRootPointer(Root::kEternalHandles,
- GetLocation(new_space_indices_[i]));
+ for (int index : new_space_indices_) {
+ visitor->VisitRootPointer(Root::kEternalHandles, GetLocation(index));
}
}
void EternalHandles::PostGarbageCollectionProcessing(Heap* heap) {
- int last = 0;
- for (int i = 0; i < new_space_indices_.length(); i++) {
- int index = new_space_indices_[i];
+ size_t last = 0;
+ for (int index : new_space_indices_) {
if (heap->InNewSpace(*GetLocation(index))) {
new_space_indices_[last++] = index;
}
}
- new_space_indices_.Rewind(last);
+ DCHECK_LE(last, new_space_indices_.size());
+ new_space_indices_.resize(last);
}
@@ -1089,12 +1101,12 @@ void EternalHandles::Create(Isolate* isolate, Object* object, int* index) {
Object** next_block = new Object*[kSize];
Object* the_hole = isolate->heap()->the_hole_value();
MemsetPointer(next_block, the_hole, kSize);
- blocks_.Add(next_block);
+ blocks_.push_back(next_block);
}
DCHECK_EQ(isolate->heap()->the_hole_value(), blocks_[block][offset]);
blocks_[block][offset] = object;
if (isolate->heap()->InNewSpace(object)) {
- new_space_indices_.Add(size_);
+ new_space_indices_.push_back(size_);
}
*index = size_++;
}
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 6a25134698..b5c3b2191d 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -6,12 +6,12 @@
#define V8_GLOBAL_HANDLES_H_
#include <type_traits>
+#include <vector>
#include "include/v8.h"
#include "include/v8-profiler.h"
#include "src/handles.h"
-#include "src/list.h"
#include "src/utils.h"
namespace v8 {
@@ -92,7 +92,7 @@ class GlobalHandles {
number_of_phantom_handle_resets_ = 0;
}
- size_t NumberOfNewSpaceNodes() { return new_space_nodes_.length(); }
+ size_t NumberOfNewSpaceNodes() { return new_space_nodes_.size(); }
// Clear the weakness of a global handle.
static void* ClearWeakness(Object** location);
@@ -133,12 +133,13 @@ class GlobalHandles {
// and have class IDs
void IterateWeakRootsInNewSpaceWithClassIds(v8::PersistentHandleVisitor* v);
- // Iterates over all weak roots in heap.
- void IterateWeakRoots(RootVisitor* v);
+ // Iterates over weak roots on the heap.
+ void IterateWeakRootsForFinalizers(RootVisitor* v);
+ void IterateWeakRootsForPhantomHandles(WeakSlotCallback should_reset_handle);
- // Find all weak handles satisfying the callback predicate, mark
- // them as pending.
- void IdentifyWeakHandles(WeakSlotCallback f);
+ // Marks all handles that should be finalized based on the predicate
+ // |should_reset_handle| as pending.
+ void IdentifyWeakHandles(WeakSlotCallback should_reset_handle);
// NOTE: Five ...NewSpace... functions below are used during
// scavenge collections and iterate over sets of handles that are
@@ -188,7 +189,7 @@ class GlobalHandles {
// Helpers for PostGarbageCollectionProcessing.
static void InvokeSecondPassPhantomCallbacks(
- List<PendingPhantomCallback>* callbacks, Isolate* isolate);
+ std::vector<PendingPhantomCallback>* callbacks, Isolate* isolate);
int PostScavengeProcessing(int initial_post_gc_processing_count);
int PostMarkSweepProcessing(int initial_post_gc_processing_count);
int DispatchPendingPhantomCallbacks(bool synchronous_second_pass);
@@ -212,13 +213,13 @@ class GlobalHandles {
// Contains all nodes holding new space objects. Note: when the list
// is accessed, some of the objects may have been promoted already.
- List<Node*> new_space_nodes_;
+ std::vector<Node*> new_space_nodes_;
int post_gc_processing_count_;
size_t number_of_phantom_handle_resets_;
- List<PendingPhantomCallback> pending_phantom_callbacks_;
+ std::vector<PendingPhantomCallback> pending_phantom_callbacks_;
friend class Isolate;
@@ -311,8 +312,8 @@ class EternalHandles {
}
int size_;
- List<Object**> blocks_;
- List<int> new_space_indices_;
+ std::vector<Object**> blocks_;
+ std::vector<int> new_space_indices_;
int singleton_handles_[NUMBER_OF_SINGLETON_HANDLES];
DISALLOW_COPY_AND_ASSIGN(EternalHandles);
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index e06257d35e..c98f55f02f 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -1256,6 +1256,7 @@ inline uint32_t ObjectHash(Address address) {
// to a more generic type when we combine feedback.
// kSignedSmall -> kSignedSmallInputs -> kNumber -> kNumberOrOddball -> kAny
// kString -> kAny
+// kBigInt -> kAny
// TODO(mythria): Remove kNumber type when crankshaft can handle Oddballs
// similar to Numbers. We don't need kNumber feedback for Turbofan. Extra
// information about Number might reduce few instructions but causes more
@@ -1270,7 +1271,8 @@ class BinaryOperationFeedback {
kNumber = 0x7,
kNumberOrOddball = 0xF,
kString = 0x10,
- kAny = 0x3F
+ kBigInt = 0x20,
+ kAny = 0x7F
};
};
@@ -1297,6 +1299,27 @@ class CompareOperationFeedback {
};
};
+// Type feedback is encoded in such a way that, we can combine the feedback
+// at different points by performing an 'OR' operation. Type feedback moves
+// to a more generic type when we combine feedback.
+// kNone -> kEnumCacheKeysAndIndices -> kEnumCacheKeys -> kAny
+class ForInFeedback {
+ public:
+ enum {
+ kNone = 0x0,
+ kEnumCacheKeysAndIndices = 0x1,
+ kEnumCacheKeys = 0x3,
+ kAny = 0x7
+ };
+};
+STATIC_ASSERT((ForInFeedback::kNone |
+ ForInFeedback::kEnumCacheKeysAndIndices) ==
+ ForInFeedback::kEnumCacheKeysAndIndices);
+STATIC_ASSERT((ForInFeedback::kEnumCacheKeysAndIndices |
+ ForInFeedback::kEnumCacheKeys) == ForInFeedback::kEnumCacheKeys);
+STATIC_ASSERT((ForInFeedback::kEnumCacheKeys | ForInFeedback::kAny) ==
+ ForInFeedback::kAny);
+
enum class UnicodeEncoding : uint8_t {
// Different unicode encodings in a |word32|:
UTF16, // hi 16bits -> trailing surrogate or 0, low 16bits -> lead surrogate
@@ -1417,10 +1440,6 @@ enum IsolateAddressId {
} // namespace internal
} // namespace v8
-// Used by js-builtin-reducer to identify whether ReduceArrayIterator() is
-// reducing a JSArray method, or a JSTypedArray method.
-enum class ArrayIteratorKind { kArray, kTypedArray };
-
namespace i = v8::internal;
#endif // V8_GLOBALS_H_
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index cce00a891d..1d3f922489 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -5,9 +5,9 @@
#ifndef V8_HANDLES_INL_H_
#define V8_HANDLES_INL_H_
-#include "src/api.h"
#include "src/handles.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -33,6 +33,9 @@ HandleScope::HandleScope(Isolate* isolate) {
}
template <typename T>
+Handle<T>::Handle(T* object) : Handle(object, object->GetIsolate()) {}
+
+template <typename T>
Handle<T>::Handle(T* object, Isolate* isolate) : HandleBase(object, isolate) {}
template <typename T>
@@ -122,7 +125,7 @@ Object** HandleScope::GetHandle(Isolate* isolate, Object* value) {
#ifdef DEBUG
inline SealHandleScope::SealHandleScope(Isolate* isolate) : isolate_(isolate) {
// Make sure the current thread is allowed to create handles to begin with.
- CHECK(AllowHandleAllocation::IsAllowed());
+ DCHECK(AllowHandleAllocation::IsAllowed());
HandleScopeData* current = isolate_->handle_scope_data();
// Shrink the current handle scope to make it impossible to do
// handle allocations without an explicit handle scope.
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 57f8c8bc58..d7b35673b6 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -5,6 +5,7 @@
#include "src/handles.h"
#include "src/address-map.h"
+#include "src/api.h"
#include "src/base/logging.h"
#include "src/identity-map.h"
#include "src/objects-inl.h"
@@ -52,10 +53,11 @@ bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
int HandleScope::NumberOfHandles(Isolate* isolate) {
HandleScopeImplementer* impl = isolate->handle_scope_implementer();
- int n = impl->blocks()->length();
+ int n = static_cast<int>(impl->blocks()->size());
if (n == 0) return 0;
- return ((n - 1) * kHandleBlockSize) + static_cast<int>(
- (isolate->handle_scope_data()->next - impl->blocks()->last()));
+ return ((n - 1) * kHandleBlockSize) +
+ static_cast<int>(
+ (isolate->handle_scope_data()->next - impl->blocks()->back()));
}
@@ -75,8 +77,8 @@ Object** HandleScope::Extend(Isolate* isolate) {
HandleScopeImplementer* impl = isolate->handle_scope_implementer();
// If there's more room in the last block, we use that. This is used
// for fast creation of scopes after scope barriers.
- if (!impl->blocks()->is_empty()) {
- Object** limit = &impl->blocks()->last()[kHandleBlockSize];
+ if (!impl->blocks()->empty()) {
+ Object** limit = &impl->blocks()->back()[kHandleBlockSize];
if (current->limit != limit) {
current->limit = limit;
DCHECK(limit - current->next < kHandleBlockSize);
@@ -90,7 +92,7 @@ Object** HandleScope::Extend(Isolate* isolate) {
result = impl->GetSpareOrNewBlock();
// Add the extension to the global list of blocks, but count the
// extension as part of the current scope.
- impl->blocks()->Add(result);
+ impl->blocks()->push_back(result);
current->limit = &result[kHandleBlockSize];
}
@@ -178,10 +180,10 @@ DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
Object** new_next = impl_->GetSpareOrNewBlock();
Object** new_limit = &new_next[kHandleBlockSize];
// Check that at least one HandleScope exists, see the class description.
- DCHECK(!impl_->blocks()->is_empty());
+ DCHECK(!impl_->blocks()->empty());
// Check that we are not in a SealedHandleScope.
- DCHECK(data->limit == &impl_->blocks()->last()[kHandleBlockSize]);
- impl_->blocks()->Add(new_next);
+ DCHECK(data->limit == &impl_->blocks()->back()[kHandleBlockSize]);
+ impl_->blocks()->push_back(new_next);
#ifdef DEBUG
prev_level_ = data->level;
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 21c2f1987b..e31a77be41 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -97,7 +97,7 @@ class Handle final : public HandleBase {
static_assert(std::is_base_of<Object, T>::value, "static type violation");
}
- V8_INLINE explicit Handle(T* object) : Handle(object, object->GetIsolate()) {}
+ V8_INLINE explicit Handle(T* object);
V8_INLINE Handle(T* object, Isolate* isolate);
// Allocate a new handle for the object, do not canonicalize.
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index 68b44c6622..ac308ebad7 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -27,6 +27,7 @@
V(object_to_string, "[object Object]") \
V(regexp_to_string, "[object RegExp]") \
V(string_to_string, "[object String]") \
+ V(bigint_string, "bigint") \
V(bind_string, "bind") \
V(boolean_string, "boolean") \
V(Boolean_string, "Boolean") \
@@ -147,6 +148,7 @@
V(Proxy_string, "Proxy") \
V(query_colon_string, "(?:)") \
V(RangeError_string, "RangeError") \
+ V(raw_string, "raw") \
V(ReferenceError_string, "ReferenceError") \
V(RegExp_string, "RegExp") \
V(reject_string, "reject") \
@@ -201,8 +203,7 @@
V(will_handle_string, "willHandle") \
V(writable_string, "writable") \
V(year_string, "year") \
- V(zero_string, "0") \
- V(WasmExceptionTag_string, "WasmExceptionTag")
+ V(zero_string, "0")
#define PRIVATE_SYMBOL_LIST(V) \
V(array_iteration_kind_symbol) \
@@ -310,6 +311,7 @@
F(MC_FINISH) \
F(MC_MARK) \
F(MC_MARK_FINISH_INCREMENTAL) \
+ F(MC_MARK_MAIN) \
F(MC_MARK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
diff --git a/deps/v8/src/heap/barrier.h b/deps/v8/src/heap/barrier.h
new file mode 100644
index 0000000000..d945a83d90
--- /dev/null
+++ b/deps/v8/src/heap/barrier.h
@@ -0,0 +1,77 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_BARRIER_H_
+#define V8_HEAP_BARRIER_H_
+
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
+
+namespace v8 {
+namespace internal {
+
+// Barrier that can be used once to synchronize a dynamic number of tasks
+// working concurrently.
+//
+// Usage:
+// void RunConcurrently(OneShotBarrier* shared_barrier) {
+// shared_barrier->Start();
+// do {
+// {
+// /* process work and create new work */
+// barrier->NotifyAll();
+// /* process work and create new work */
+// }
+// } while(!shared_barrier->Wait());
+// }
+//
+// Note: If Start() is not called in time, e.g., because the first concurrent
+// task is already done processing all work, then Done() will return true
+// immediately.
+class OneshotBarrier {
+ public:
+ OneshotBarrier() : tasks_(0), waiting_(0), done_(false) {}
+
+ void Start() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ tasks_++;
+ }
+
+ void NotifyAll() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (waiting_ > 0) condition_.NotifyAll();
+ }
+
+ bool Wait() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (done_) return true;
+
+ DCHECK_LE(waiting_, tasks_);
+ waiting_++;
+ if (waiting_ == tasks_) {
+ done_ = true;
+ condition_.NotifyAll();
+ } else {
+ // Spurious wakeup is ok here.
+ condition_.Wait(&mutex_);
+ }
+ waiting_--;
+ return done_;
+ }
+
+ // Only valid to be called in a sequential setting.
+ bool DoneForTesting() const { return done_; }
+
+ private:
+ base::ConditionVariable condition_;
+ base::Mutex mutex_;
+ int tasks_;
+ int waiting_;
+ bool done_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_BARRIER_H_
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 95b3a230ab..60bcbe9bab 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -121,12 +121,6 @@ class ConcurrentMarkingVisitor final
int VisitJSApiObject(Map* map, JSObject* object) {
if (marking_state_.IsGrey(object)) {
- int size = JSObject::BodyDescriptor::SizeOf(map, object);
- VisitMapPointer(object, object->map_slot());
- // It is OK to iterate body of JS API object here because they do not have
- // unboxed double fields.
- DCHECK_IMPLIES(FLAG_unbox_double_fields, map->HasFastPointerLayout());
- JSObject::BodyDescriptor::IterateBody(object, size, this);
// The main thread will do wrapper tracing in Blink.
bailout_.Push(object);
}
@@ -134,6 +128,52 @@ class ConcurrentMarkingVisitor final
}
// ===========================================================================
+ // Strings with pointers =====================================================
+ // ===========================================================================
+
+ int VisitConsString(Map* map, ConsString* object) {
+ int size = ConsString::BodyDescriptor::SizeOf(map, object);
+ const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
+ if (!ShouldVisit(object)) return 0;
+ VisitPointersInSnapshot(object, snapshot);
+ return size;
+ }
+
+ int VisitSlicedString(Map* map, SlicedString* object) {
+ int size = SlicedString::BodyDescriptor::SizeOf(map, object);
+ const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
+ if (!ShouldVisit(object)) return 0;
+ VisitPointersInSnapshot(object, snapshot);
+ return size;
+ }
+
+ int VisitThinString(Map* map, ThinString* object) {
+ int size = ThinString::BodyDescriptor::SizeOf(map, object);
+ const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
+ if (!ShouldVisit(object)) return 0;
+ VisitPointersInSnapshot(object, snapshot);
+ return size;
+ }
+
+ // ===========================================================================
+ // Strings without pointers ==================================================
+ // ===========================================================================
+
+ int VisitSeqOneByteString(Map* map, SeqOneByteString* object) {
+ int size = SeqOneByteString::SizeFor(object->synchronized_length());
+ if (!ShouldVisit(object)) return 0;
+ VisitMapPointer(object, object->map_slot());
+ return size;
+ }
+
+ int VisitSeqTwoByteString(Map* map, SeqTwoByteString* object) {
+ int size = SeqTwoByteString::SizeFor(object->synchronized_length());
+ if (!ShouldVisit(object)) return 0;
+ VisitMapPointer(object, object->map_slot());
+ return size;
+ }
+
+ // ===========================================================================
// Fixed array object ========================================================
// ===========================================================================
@@ -215,11 +255,12 @@ class ConcurrentMarkingVisitor final
}
int VisitTransitionArray(Map* map, TransitionArray* array) {
- if (marking_state_.IsGrey(array)) {
- // TODO(ulan): process transition arrays.
- bailout_.Push(array);
- }
- return 0;
+ if (!ShouldVisit(array)) return 0;
+ VisitMapPointer(array, array->map_slot());
+ int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
+ TransitionArray::BodyDescriptor::IterateBody(array, size, this);
+ weak_objects_->transition_arrays.Push(task_id_, array);
+ return size;
}
int VisitWeakCell(Map* map, WeakCell* object) {
@@ -283,13 +324,14 @@ class ConcurrentMarkingVisitor final
SlotSnapshot* slot_snapshot_;
};
- const SlotSnapshot& MakeSlotSnapshot(Map* map, HeapObject* object, int size) {
+ template <typename T>
+ const SlotSnapshot& MakeSlotSnapshot(Map* map, T* object, int size) {
// TODO(ulan): Iterate only the existing fields and skip slack at the end
// of the object.
SlotSnapshottingVisitor visitor(&slot_snapshot_);
visitor.VisitPointer(object,
reinterpret_cast<Object**>(object->map_slot()));
- JSObject::BodyDescriptor::IterateBody(object, size, &visitor);
+ T::BodyDescriptor::IterateBody(object, size, &visitor);
return slot_snapshot_;
}
ConcurrentMarking::MarkingWorklist::View shared_;
@@ -325,17 +367,20 @@ class ConcurrentMarking::Task : public CancelableTask {
ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout,
+ MarkingWorklist* on_hold,
WeakObjects* weak_objects)
: heap_(heap),
shared_(shared),
bailout_(bailout),
+ on_hold_(on_hold),
weak_objects_(weak_objects),
- pending_task_count_(0) {
+ pending_task_count_(0),
+ task_count_(0) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
CHECK(!FLAG_concurrent_marking);
#endif
- for (int i = 0; i <= kTasks; i++) {
+ for (int i = 0; i <= kMaxTasks; i++) {
is_pending_[i] = false;
}
}
@@ -351,7 +396,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
ConcurrentMarkingVisitor visitor(shared_, bailout_, live_bytes, weak_objects_,
task_id);
double time_ms;
- size_t total_bytes_marked = 0;
+ size_t marked_bytes = 0;
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp(
"Starting concurrent marking task %d\n", task_id);
@@ -361,9 +406,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
bool done = false;
while (!done) {
base::LockGuard<base::Mutex> guard(&task_state->lock);
- size_t bytes_marked = 0;
+ size_t current_marked_bytes = 0;
int objects_processed = 0;
- while (bytes_marked < kBytesUntilInterruptCheck &&
+ while (current_marked_bytes < kBytesUntilInterruptCheck &&
objects_processed < kObjectsUntilInterrupCheck) {
HeapObject* object;
if (!shared_->Pop(task_id, &object)) {
@@ -375,13 +420,15 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
Address new_space_limit = heap_->new_space()->original_limit();
Address addr = object->address();
if (new_space_top <= addr && addr < new_space_limit) {
- bailout_->Push(task_id, object);
+ on_hold_->Push(task_id, object);
} else {
Map* map = object->synchronized_map();
- bytes_marked += visitor.Visit(map, object);
+ current_marked_bytes += visitor.Visit(map, object);
}
}
- total_bytes_marked += bytes_marked;
+ marked_bytes += current_marked_bytes;
+ base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
+ marked_bytes);
if (task_state->interrupt_request.Value()) {
task_state->interrupt_condition.Wait(&task_state->lock);
}
@@ -391,9 +438,12 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
// young generation GC.
base::LockGuard<base::Mutex> guard(&task_state->lock);
bailout_->FlushToGlobal(task_id);
+ on_hold_->FlushToGlobal(task_id);
}
weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id);
+ base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
+ total_marked_bytes_.Increment(marked_bytes);
{
base::LockGuard<base::Mutex> guard(&pending_lock_);
is_pending_[task_id] = false;
@@ -404,28 +454,34 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp(
"Task %d concurrently marked %dKB in %.2fms\n", task_id,
- static_cast<int>(total_bytes_marked / KB), time_ms);
+ static_cast<int>(marked_bytes / KB), time_ms);
}
}
void ConcurrentMarking::ScheduleTasks() {
if (!FLAG_concurrent_marking) return;
base::LockGuard<base::Mutex> guard(&pending_lock_);
- if (pending_task_count_ < kTasks) {
- // Task id 0 is for the main thread.
- for (int i = 1; i <= kTasks; i++) {
- if (!is_pending_[i]) {
- if (FLAG_trace_concurrent_marking) {
- heap_->isolate()->PrintWithTimestamp(
- "Scheduling concurrent marking task %d\n", i);
- }
- task_state_[i].interrupt_request.SetValue(false);
- is_pending_[i] = true;
- ++pending_task_count_;
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new Task(heap_->isolate(), this, &task_state_[i], i),
- v8::Platform::kShortRunningTask);
+ if (task_count_ == 0) {
+ // TODO(ulan): Increase the number of tasks for platforms that benefit
+ // from it.
+ task_count_ = static_cast<int>(
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads() / 2);
+ task_count_ = Max(Min(task_count_, kMaxTasks), 1);
+ }
+ // Task id 0 is for the main thread.
+ for (int i = 1; i <= task_count_ && pending_task_count_ < task_count_; i++) {
+ if (!is_pending_[i]) {
+ if (FLAG_trace_concurrent_marking) {
+ heap_->isolate()->PrintWithTimestamp(
+ "Scheduling concurrent marking task %d\n", i);
}
+ task_state_[i].interrupt_request.SetValue(false);
+ is_pending_[i] = true;
+ ++pending_task_count_;
+ Task* task = new Task(heap_->isolate(), this, &task_state_[i], i);
+ cancelable_id_[i] = task->id();
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
}
}
}
@@ -441,18 +497,40 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
}
}
+void ConcurrentMarking::WaitForTasks() {
+ if (!FLAG_concurrent_marking) return;
+ base::LockGuard<base::Mutex> guard(&pending_lock_);
+ while (pending_task_count_ > 0) {
+ pending_condition_.Wait(&pending_lock_);
+ }
+}
+
void ConcurrentMarking::EnsureCompleted() {
if (!FLAG_concurrent_marking) return;
base::LockGuard<base::Mutex> guard(&pending_lock_);
+ CancelableTaskManager* task_manager =
+ heap_->isolate()->cancelable_task_manager();
+ for (int i = 1; i <= task_count_; i++) {
+ if (is_pending_[i]) {
+ if (task_manager->TryAbort(cancelable_id_[i]) ==
+ CancelableTaskManager::kTaskAborted) {
+ is_pending_[i] = false;
+ --pending_task_count_;
+ }
+ }
+ }
while (pending_task_count_ > 0) {
pending_condition_.Wait(&pending_lock_);
}
+ for (int i = 1; i <= task_count_; i++) {
+ DCHECK(!is_pending_[i]);
+ }
}
void ConcurrentMarking::FlushLiveBytes(
MajorNonAtomicMarkingState* marking_state) {
DCHECK_EQ(pending_task_count_, 0);
- for (int i = 1; i <= kTasks; i++) {
+ for (int i = 1; i <= task_count_; i++) {
LiveBytesMap& live_bytes = task_state_[i].live_bytes;
for (auto pair : live_bytes) {
// ClearLiveness sets the live bytes to zero.
@@ -463,32 +541,43 @@ void ConcurrentMarking::FlushLiveBytes(
}
live_bytes.clear();
}
+ total_marked_bytes_.SetValue(0);
}
void ConcurrentMarking::ClearLiveness(MemoryChunk* chunk) {
- for (int i = 1; i <= kTasks; i++) {
+ for (int i = 1; i <= task_count_; i++) {
if (task_state_[i].live_bytes.count(chunk)) {
task_state_[i].live_bytes[chunk] = 0;
}
}
}
+size_t ConcurrentMarking::TotalMarkedBytes() {
+ size_t result = 0;
+ for (int i = 1; i <= task_count_; i++) {
+ result +=
+ base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
+ }
+ result += total_marked_bytes_.Value();
+ return result;
+}
+
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
: concurrent_marking_(concurrent_marking) {
if (!FLAG_concurrent_marking) return;
// Request task_state for all tasks.
- for (int i = 1; i <= kTasks; i++) {
+ for (int i = 1; i <= kMaxTasks; i++) {
concurrent_marking_->task_state_[i].interrupt_request.SetValue(true);
}
// Now take a lock to ensure that the tasks are waiting.
- for (int i = 1; i <= kTasks; i++) {
+ for (int i = 1; i <= kMaxTasks; i++) {
concurrent_marking_->task_state_[i].lock.Lock();
}
}
ConcurrentMarking::PauseScope::~PauseScope() {
if (!FLAG_concurrent_marking) return;
- for (int i = kTasks; i >= 1; i--) {
+ for (int i = kMaxTasks; i >= 1; i--) {
concurrent_marking_->task_state_[i].interrupt_request.SetValue(false);
concurrent_marking_->task_state_[i].interrupt_condition.NotifyAll();
concurrent_marking_->task_state_[i].lock.Unlock();
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index aa73db3a6a..0f0c8bf992 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -36,13 +36,15 @@ class ConcurrentMarking {
ConcurrentMarking* concurrent_marking_;
};
- static const int kTasks = 4;
+ static const int kMaxTasks = 4;
using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
- MarkingWorklist* bailout, WeakObjects* weak_objects);
+ MarkingWorklist* bailout, MarkingWorklist* on_hold,
+ WeakObjects* weak_objects);
void ScheduleTasks();
+ void WaitForTasks();
void EnsureCompleted();
void RescheduleTasksIfNeeded();
// Flushes the local live bytes into the given marking state.
@@ -51,6 +53,10 @@ class ConcurrentMarking {
// scavenge and is going to be re-used.
void ClearLiveness(MemoryChunk* chunk);
+ int TaskCount() { return task_count_; }
+
+ size_t TotalMarkedBytes();
+
private:
struct TaskState {
// When the concurrent marking task has this lock, then objects in the
@@ -63,6 +69,7 @@ class ConcurrentMarking {
// flag is cleared by the main thread.
base::ConditionVariable interrupt_condition;
LiveBytesMap live_bytes;
+ size_t marked_bytes;
char cache_line_padding[64];
};
class Task;
@@ -70,12 +77,16 @@ class ConcurrentMarking {
Heap* heap_;
MarkingWorklist* shared_;
MarkingWorklist* bailout_;
+ MarkingWorklist* on_hold_;
WeakObjects* weak_objects_;
- TaskState task_state_[kTasks + 1];
+ TaskState task_state_[kMaxTasks + 1];
+ base::AtomicNumber<size_t> total_marked_bytes_;
base::Mutex pending_lock_;
base::ConditionVariable pending_condition_;
int pending_task_count_;
- bool is_pending_[kTasks + 1];
+ bool is_pending_[kMaxTasks + 1];
+ CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1];
+ int task_count_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index 62c3313924..1e10e81ddb 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -47,7 +47,7 @@ void GCIdleTimeHeapState::Print() {
size_t GCIdleTimeHandler::EstimateMarkingStepSize(
double idle_time_in_ms, double marking_speed_in_bytes_per_ms) {
- DCHECK(idle_time_in_ms > 0);
+ DCHECK_LT(0, idle_time_in_ms);
if (marking_speed_in_bytes_per_ms == 0) {
marking_speed_in_bytes_per_ms = kInitialConservativeMarkingSpeed;
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index e14fbb4862..7bfe0adfa0 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -234,7 +234,7 @@ void GCTracer::Stop(GarbageCollector collector) {
return;
}
- DCHECK(start_counter_ >= 0);
+ DCHECK_LE(0, start_counter_);
DCHECK((collector == SCAVENGER && current_.type == Event::SCAVENGER) ||
(collector == MINOR_MARK_COMPACTOR &&
current_.type == Event::MINOR_MARK_COMPACTOR) ||
@@ -462,8 +462,8 @@ void GCTracer::PrintNVP() const {
"scavenge=%.2f "
"scavenge.roots=%.2f "
"scavenge.weak=%.2f "
- "scavenge.weak_global_handles.identify=%.2f"
- "scavenge.weak_global_handles.process=%.2f"
+ "scavenge.weak_global_handles.identify=%.2f "
+ "scavenge.weak_global_handles.process=%.2f "
"scavenge.parallel=%.2f "
"incremental.steps_count=%d "
"incremental.steps_took=%.1f "
@@ -608,6 +608,7 @@ void GCTracer::PrintNVP() const {
"mark=%.1f "
"mark.finish_incremental=%.1f "
"mark.roots=%.1f "
+ "mark.main=%.1f "
"mark.weak_closure=%.1f "
"mark.weak_closure.ephemeral=%.1f "
"mark.weak_closure.weak_handles=%.1f "
@@ -694,6 +695,7 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_FINISH], current_.scopes[Scope::MC_MARK],
current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
current_.scopes[Scope::MC_MARK_ROOTS],
+ current_.scopes[Scope::MC_MARK_MAIN],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES],
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 49d69c80e2..a966fa03d8 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -18,7 +18,6 @@
#include "src/heap/spaces-inl.h"
#include "src/heap/store-buffer.h"
#include "src/isolate.h"
-#include "src/list-inl.h"
#include "src/log.h"
#include "src/msan.h"
#include "src/objects-inl.h"
@@ -26,6 +25,7 @@
#include "src/objects/script-inl.h"
#include "src/profiler/heap-profiler.h"
#include "src/string-hasher.h"
+#include "src/zone/zone-list-inl.h"
namespace v8 {
namespace internal {
@@ -381,10 +381,6 @@ void Heap::FinalizeExternalString(String* string) {
Address Heap::NewSpaceTop() { return new_space_->top(); }
-bool Heap::DeoptMaybeTenuredAllocationSites() {
- return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
-}
-
bool Heap::InNewSpace(Object* object) {
// Inlined check from NewSpace::Contains.
bool result =
@@ -542,25 +538,12 @@ void Heap::UpdateAllocationSite(Map* map, HeapObject* object,
(*pretenuring_feedback)[reinterpret_cast<AllocationSite*>(key)]++;
}
-
-void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
- global_pretenuring_feedback_.erase(site);
-}
-
Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(
reinterpret_cast<intptr_t>(this) -
reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
}
-void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
- old_space_strings_.reserve(old_space_strings_.size() +
- new_space_strings_.size());
- std::move(std::begin(new_space_strings_), std::end(new_space_strings_),
- std::back_inserter(old_space_strings_));
- new_space_strings_.clear();
-}
-
void Heap::ExternalStringTable::AddString(String* string) {
DCHECK(string->IsExternalString());
if (heap_->InNewSpace(string)) {
@@ -570,46 +553,6 @@ void Heap::ExternalStringTable::AddString(String* string) {
}
}
-void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
- if (!new_space_strings_.empty()) {
- v->VisitRootPointers(Root::kExternalStringsTable, new_space_strings_.data(),
- new_space_strings_.data() + new_space_strings_.size());
- }
-}
-
-void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
- IterateNewSpaceStrings(v);
- if (!old_space_strings_.empty()) {
- v->VisitRootPointers(Root::kExternalStringsTable, old_space_strings_.data(),
- old_space_strings_.data() + old_space_strings_.size());
- }
-}
-
-
-// Verify() is inline to avoid ifdef-s around its calls in release
-// mode.
-void Heap::ExternalStringTable::Verify() {
-#ifdef DEBUG
- for (size_t i = 0; i < new_space_strings_.size(); ++i) {
- Object* obj = Object::cast(new_space_strings_[i]);
- DCHECK(heap_->InNewSpace(obj));
- DCHECK(!obj->IsTheHole(heap_->isolate()));
- }
- for (size_t i = 0; i < old_space_strings_.size(); ++i) {
- Object* obj = Object::cast(old_space_strings_[i]);
- DCHECK(!heap_->InNewSpace(obj));
- DCHECK(!obj->IsTheHole(heap_->isolate()));
- }
-#endif
-}
-
-
-void Heap::ExternalStringTable::AddOldString(String* string) {
- DCHECK(string->IsExternalString());
- DCHECK(!heap_->InNewSpace(string));
- old_space_strings_.push_back(string);
-}
-
Oddball* Heap::ToBoolean(bool condition) {
return condition ? true_value() : false_value();
}
@@ -632,70 +575,12 @@ int Heap::NextScriptId() {
return last_id;
}
-void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
- DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::kZero);
- set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
-}
-
-void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
- // TODO(tebbi): Remove second half of DCHECK once
- // FLAG_harmony_restrict_constructor_return is gone.
- DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero ||
- construct_stub_create_deopt_pc_offset() == Smi::FromInt(pc_offset));
- set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
-}
-
-void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
- // TODO(tebbi): Remove second half of DCHECK once
- // FLAG_harmony_restrict_constructor_return is gone.
- DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero ||
- construct_stub_invoke_deopt_pc_offset() == Smi::FromInt(pc_offset));
- set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
-}
-
-void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
- DCHECK(getter_stub_deopt_pc_offset() == Smi::kZero);
- set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
-}
-
-void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
- DCHECK(setter_stub_deopt_pc_offset() == Smi::kZero);
- set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
-}
-
-void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
- DCHECK(interpreter_entry_return_pc_offset() == Smi::kZero);
- set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
-}
-
int Heap::GetNextTemplateSerialNumber() {
int next_serial_number = next_template_serial_number()->value() + 1;
set_next_template_serial_number(Smi::FromInt(next_serial_number));
return next_serial_number;
}
-void Heap::SetSerializedTemplates(FixedArray* templates) {
- DCHECK_EQ(empty_fixed_array(), serialized_templates());
- DCHECK(isolate()->serializer_enabled());
- set_serialized_templates(templates);
-}
-
-void Heap::SetSerializedGlobalProxySizes(FixedArray* sizes) {
- DCHECK_EQ(empty_fixed_array(), serialized_global_proxy_sizes());
- DCHECK(isolate()->serializer_enabled());
- set_serialized_global_proxy_sizes(sizes);
-}
-
-void Heap::CreateObjectStats() {
- if (V8_LIKELY(FLAG_gc_stats == 0)) return;
- if (!live_object_stats_) {
- live_object_stats_ = new ObjectStats(this);
- }
- if (!dead_object_stats_) {
- dead_object_stats_ = new ObjectStats(this);
- }
-}
-
AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
: heap_(isolate->heap()) {
heap_->always_allocate_scope_count_.Increment(1);
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index b13ec784f5..458c6c7e09 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -24,6 +24,7 @@
#include "src/feedback-vector.h"
#include "src/global-handles.h"
#include "src/heap/array-buffer-tracker-inl.h"
+#include "src/heap/barrier.h"
#include "src/heap/code-stats.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/embedder-tracing.h"
@@ -50,6 +51,8 @@
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
#include "src/tracing/trace-event.h"
+#include "src/trap-handler/trap-handler.h"
+#include "src/unicode-inl.h"
#include "src/utils-inl.h"
#include "src/utils.h"
#include "src/v8.h"
@@ -58,6 +61,54 @@
namespace v8 {
namespace internal {
+void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
+ DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset());
+ set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
+ // TODO(tebbi): Remove second half of DCHECK once
+ // FLAG_harmony_restrict_constructor_return is gone.
+ DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero ||
+ construct_stub_create_deopt_pc_offset() == Smi::FromInt(pc_offset));
+ set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
+ // TODO(tebbi): Remove second half of DCHECK once
+ // FLAG_harmony_restrict_constructor_return is gone.
+ DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero ||
+ construct_stub_invoke_deopt_pc_offset() == Smi::FromInt(pc_offset));
+ set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
+ DCHECK_EQ(Smi::kZero, getter_stub_deopt_pc_offset());
+ set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
+ DCHECK_EQ(Smi::kZero, setter_stub_deopt_pc_offset());
+ set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
+ DCHECK_EQ(Smi::kZero, interpreter_entry_return_pc_offset());
+ set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
+}
+
+void Heap::SetSerializedTemplates(FixedArray* templates) {
+ DCHECK_EQ(empty_fixed_array(), serialized_templates());
+ DCHECK(isolate()->serializer_enabled());
+ set_serialized_templates(templates);
+}
+
+void Heap::SetSerializedGlobalProxySizes(FixedArray* sizes) {
+ DCHECK_EQ(empty_fixed_array(), serialized_global_proxy_sizes());
+ DCHECK(isolate()->serializer_enabled());
+ set_serialized_global_proxy_sizes(sizes);
+}
+
bool Heap::GCCallbackTuple::operator==(
const Heap::GCCallbackTuple& other) const {
return other.callback == callback && other.data == data;
@@ -183,7 +234,7 @@ Heap::Heap()
delay_sweeper_tasks_for_testing_(false),
pending_layout_change_object_(nullptr) {
// Ensure old_generation_size_ is a multiple of kPageSize.
- DCHECK((max_old_generation_size_ & (Page::kPageSize - 1)) == 0);
+ DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
set_native_contexts_list(NULL);
@@ -703,6 +754,14 @@ inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite* site,
}
} // namespace
+void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
+ global_pretenuring_feedback_.erase(site);
+}
+
+bool Heap::DeoptMaybeTenuredAllocationSites() {
+ return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
+}
+
void Heap::ProcessPretenuringFeedback() {
bool trigger_deoptimization = false;
if (FLAG_allocation_site_pretenuring) {
@@ -832,18 +891,6 @@ void Heap::GarbageCollectionEpilogue() {
isolate_->counters()->external_fragmentation_total()->AddSample(
static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
- isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
- (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
- isolate_->counters()->heap_fraction_old_space()->AddSample(static_cast<int>(
- (old_space()->CommittedMemory() * 100.0) / CommittedMemory()));
- isolate_->counters()->heap_fraction_code_space()->AddSample(
- static_cast<int>((code_space()->CommittedMemory() * 100.0) /
- CommittedMemory()));
- isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
- (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
- isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
- (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
-
isolate_->counters()->heap_sample_total_committed()->AddSample(
static_cast<int>(CommittedMemory() / KB));
isolate_->counters()->heap_sample_total_used()->AddSample(
@@ -888,9 +935,6 @@ void Heap::GarbageCollectionEpilogue() {
ReportStatisticsAfterGC();
#endif // DEBUG
- // Remember the last top pointer so that we can later find out
- // whether we allocated in new space since the last GC.
- new_space_top_after_last_gc_ = new_space()->top();
last_gc_time_ = MonotonicallyIncreasingTimeInMs();
{
@@ -1335,8 +1379,11 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
if (space == MAP_SPACE) {
// We allocate each map individually to avoid fragmentation.
maps->clear();
- DCHECK_EQ(1, reservation->size());
- int num_maps = reservation->at(0).size / Map::kSize;
+ DCHECK_LE(reservation->size(), 2);
+ int reserved_size = 0;
+ for (const Chunk& c : *reservation) reserved_size += c.size;
+ DCHECK_EQ(0, reserved_size % Map::kSize);
+ int num_maps = reserved_size / Map::kSize;
for (int i = 0; i < num_maps; i++) {
// The deserializer will update the skip list.
AllocationResult allocation = map_space()->AllocateRawUnaligned(
@@ -1356,8 +1403,10 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
}
} else if (space == LO_SPACE) {
// Just check that we can allocate during deserialization.
- DCHECK_EQ(1, reservation->size());
- perform_gc = !CanExpandOldGeneration(reservation->at(0).size);
+ DCHECK_LE(reservation->size(), 2);
+ int reserved_size = 0;
+ for (const Chunk& c : *reservation) reserved_size += c.size;
+ perform_gc = !CanExpandOldGeneration(reserved_size);
} else {
for (auto& chunk : *reservation) {
AllocationResult allocation;
@@ -1379,7 +1428,8 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
Address free_space_address = free_space->address();
CreateFillerObjectAt(free_space_address, size,
ClearRecordedSlots::kNo);
- DCHECK(space < SerializerDeserializer::kNumberOfPreallocatedSpaces);
+ DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces,
+ space);
chunk.start = free_space_address;
chunk.end = free_space_address + size;
} else {
@@ -1667,8 +1717,6 @@ void Heap::MarkCompactEpilogue() {
PreprocessStackTraces();
DCHECK(incremental_marking()->IsStopped());
-
- mark_compact_collector()->marking_worklist()->StopUsing();
}
@@ -1787,7 +1835,7 @@ class ScavengingItem : public ItemParallelJob::Item {
class ScavengingTask final : public ItemParallelJob::Task {
public:
- ScavengingTask(Heap* heap, Scavenger* scavenger, Scavenger::Barrier* barrier)
+ ScavengingTask(Heap* heap, Scavenger* scavenger, OneshotBarrier* barrier)
: ItemParallelJob::Task(heap->isolate()),
heap_(heap),
scavenger_(scavenger),
@@ -1803,10 +1851,9 @@ class ScavengingTask final : public ItemParallelJob::Task {
item->Process(scavenger_);
item->MarkFinished();
}
- while (!barrier_->Done()) {
+ do {
scavenger_->Process(barrier_);
- barrier_->Wait();
- }
+ } while (!barrier_->Wait());
scavenger_->Process();
}
if (FLAG_trace_parallel_scavenge) {
@@ -1820,7 +1867,7 @@ class ScavengingTask final : public ItemParallelJob::Task {
private:
Heap* const heap_;
Scavenger* const scavenger_;
- Scavenger::Barrier* const barrier_;
+ OneshotBarrier* const barrier_;
};
class PageScavengingItem final : public ScavengingItem {
@@ -1909,9 +1956,9 @@ void Heap::Scavenge() {
Scavenger* scavengers[kMaxScavengerTasks];
const bool is_logging = IsLogging(isolate());
const int num_scavenge_tasks = NumberOfScavengeTasks();
- Scavenger::Barrier barrier;
- CopiedList copied_list(num_scavenge_tasks);
- PromotionList promotion_list(num_scavenge_tasks);
+ OneshotBarrier barrier;
+ Scavenger::CopiedList copied_list(num_scavenge_tasks);
+ Scavenger::PromotionList promotion_list(num_scavenge_tasks);
for (int i = 0; i < num_scavenge_tasks; i++) {
scavengers[i] =
new Scavenger(this, is_logging, &copied_list, &promotion_list, i);
@@ -2044,6 +2091,21 @@ String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
return string->IsExternalString() ? string : nullptr;
}
+void Heap::ExternalStringTable::Verify() {
+#ifdef DEBUG
+ for (size_t i = 0; i < new_space_strings_.size(); ++i) {
+ Object* obj = Object::cast(new_space_strings_[i]);
+ DCHECK(heap_->InNewSpace(obj));
+ DCHECK(!obj->IsTheHole(heap_->isolate()));
+ }
+ for (size_t i = 0; i < old_space_strings_.size(); ++i) {
+ Object* obj = Object::cast(old_space_strings_[i]);
+ DCHECK(!heap_->InNewSpace(obj));
+ DCHECK(!obj->IsTheHole(heap_->isolate()));
+ }
+#endif
+}
+
void Heap::ExternalStringTable::UpdateNewSpaceReferences(
Heap::ExternalStringTableUpdaterCallback updater_func) {
if (new_space_strings_.empty()) return;
@@ -2060,12 +2122,12 @@ void Heap::ExternalStringTable::UpdateNewSpaceReferences(
DCHECK(target->IsExternalString());
if (heap_->InNewSpace(target)) {
- // String is still in new space. Update the table entry.
+ // String is still in new space. Update the table entry.
*last = target;
++last;
} else {
- // String got promoted. Move it to the old string list.
- AddOldString(target);
+ // String got promoted. Move it to the old string list.
+ old_space_strings_.push_back(target);
}
}
@@ -2078,6 +2140,29 @@ void Heap::ExternalStringTable::UpdateNewSpaceReferences(
#endif
}
+void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
+ old_space_strings_.reserve(old_space_strings_.size() +
+ new_space_strings_.size());
+ std::move(std::begin(new_space_strings_), std::end(new_space_strings_),
+ std::back_inserter(old_space_strings_));
+ new_space_strings_.clear();
+}
+
+void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
+ if (!new_space_strings_.empty()) {
+ v->VisitRootPointers(Root::kExternalStringsTable, new_space_strings_.data(),
+ new_space_strings_.data() + new_space_strings_.size());
+ }
+}
+
+void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
+ IterateNewSpaceStrings(v);
+ if (!old_space_strings_.empty()) {
+ v->VisitRootPointers(Root::kExternalStringsTable, old_space_strings_.data(),
+ old_space_strings_.data() + old_space_strings_.size());
+ }
+}
+
void Heap::UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func) {
external_string_table_.UpdateNewSpaceReferences(updater_func);
@@ -2238,7 +2323,7 @@ HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
int allocation_size,
AllocationAlignment alignment) {
int filler_size = allocation_size - object_size;
- DCHECK(filler_size > 0);
+ DCHECK_LT(0, filler_size);
int pre_filler = GetFillToAlign(object->address(), alignment);
if (pre_filler) {
object = PrecedeWithFiller(object, pre_filler);
@@ -2296,6 +2381,7 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
Map::ConstructionCounter::encode(Map::kNoSlackTracking);
map->set_bit_field3(bit_field3);
map->set_weak_cell_cache(Smi::kZero);
+ map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
return map;
}
@@ -2303,6 +2389,11 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
AllocationResult Heap::AllocateMap(InstanceType instance_type,
int instance_size,
ElementsKind elements_kind) {
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ DCHECK_IMPLIES(instance_type >= FIRST_JS_OBJECT_TYPE &&
+ !Map::CanHaveFastTransitionableElementsKind(instance_type),
+ IsDictionaryElementsKind(elements_kind) ||
+ IsTerminalElementsKind(elements_kind));
HeapObject* result = nullptr;
AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
if (!allocation.To(&result)) return allocation;
@@ -2316,7 +2407,6 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
map->set_instance_size(instance_size);
map->clear_unused();
map->set_inobject_properties_or_constructor_function_index(0);
- map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
map->set_weak_cell_cache(Smi::kZero);
@@ -2359,307 +2449,6 @@ AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
}
-const Heap::StringTypeTable Heap::string_type_table[] = {
-#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
- { type, size, k##camel_name##MapRootIndex } \
- ,
- STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
-#undef STRING_TYPE_ELEMENT
-};
-
-
-const Heap::ConstantStringTable Heap::constant_string_table[] = {
- {"", kempty_stringRootIndex},
-#define CONSTANT_STRING_ELEMENT(name, contents) \
- { contents, k##name##RootIndex } \
- ,
- INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
-#undef CONSTANT_STRING_ELEMENT
-};
-
-
-const Heap::StructTable Heap::struct_table[] = {
-#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
- { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
- ,
- STRUCT_LIST(STRUCT_TABLE_ELEMENT)
-#undef STRUCT_TABLE_ELEMENT
-};
-
-namespace {
-
-void FinalizePartialMap(Heap* heap, Map* map) {
- map->set_code_cache(heap->empty_fixed_array());
- map->set_dependent_code(DependentCode::cast(heap->empty_fixed_array()));
- map->set_raw_transitions(Smi::kZero);
- map->set_instance_descriptors(heap->empty_descriptor_array());
- if (FLAG_unbox_double_fields) {
- map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
- }
- map->set_prototype(heap->null_value());
- map->set_constructor_or_backpointer(heap->null_value());
-}
-
-} // namespace
-
-bool Heap::CreateInitialMaps() {
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
- if (!allocation.To(&obj)) return false;
- }
- // Map::cast cannot be used due to uninitialized map field.
- Map* new_meta_map = reinterpret_cast<Map*>(obj);
- set_meta_map(new_meta_map);
- new_meta_map->set_map_after_allocation(new_meta_map);
-
- { // Partial map allocation
-#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
- { \
- Map* map; \
- if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
- set_##field_name##_map(map); \
- }
-
- ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
- fixed_array_map()->set_elements_kind(HOLEY_ELEMENTS);
- ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
- ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
- ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
-
-#undef ALLOCATE_PARTIAL_MAP
- }
-
- // Allocate the empty array.
- {
- AllocationResult allocation = AllocateEmptyFixedArray();
- if (!allocation.To(&obj)) return false;
- }
- set_empty_fixed_array(FixedArray::cast(obj));
-
- {
- AllocationResult allocation = Allocate(null_map(), OLD_SPACE);
- if (!allocation.To(&obj)) return false;
- }
- set_null_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kNull);
-
- {
- AllocationResult allocation = Allocate(undefined_map(), OLD_SPACE);
- if (!allocation.To(&obj)) return false;
- }
- set_undefined_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kUndefined);
- DCHECK(!InNewSpace(undefined_value()));
- {
- AllocationResult allocation = Allocate(the_hole_map(), OLD_SPACE);
- if (!allocation.To(&obj)) return false;
- }
- set_the_hole_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kTheHole);
-
- // Set preliminary exception sentinel value before actually initializing it.
- set_exception(null_value());
-
- // Allocate the empty descriptor array.
- {
- AllocationResult allocation = AllocateEmptyFixedArray();
- if (!allocation.To(&obj)) return false;
- }
- set_empty_descriptor_array(DescriptorArray::cast(obj));
-
- // Fix the instance_descriptors for the existing maps.
- FinalizePartialMap(this, meta_map());
- FinalizePartialMap(this, fixed_array_map());
- FinalizePartialMap(this, undefined_map());
- undefined_map()->set_is_undetectable();
- FinalizePartialMap(this, null_map());
- null_map()->set_is_undetectable();
- FinalizePartialMap(this, the_hole_map());
-
- { // Map allocation
-#define ALLOCATE_MAP(instance_type, size, field_name) \
- { \
- Map* map; \
- if (!AllocateMap((instance_type), size).To(&map)) return false; \
- set_##field_name##_map(map); \
- }
-
-#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
- ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
-
-#define ALLOCATE_PRIMITIVE_MAP(instance_type, size, field_name, \
- constructor_function_index) \
- { \
- ALLOCATE_MAP((instance_type), (size), field_name); \
- field_name##_map()->SetConstructorFunctionIndex( \
- (constructor_function_index)); \
- }
-
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
- fixed_cow_array_map()->set_elements_kind(HOLEY_ELEMENTS);
- DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
-
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
- ALLOCATE_VARSIZE_MAP(FEEDBACK_VECTOR_TYPE, feedback_vector)
- ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
- Context::NUMBER_FUNCTION_INDEX)
- ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
- mutable_heap_number)
- ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
- Context::SYMBOL_FUNCTION_INDEX)
- ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
-
- ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
- Context::BOOLEAN_FUNCTION_INDEX);
- ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
- ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
- ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
- ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
- ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
- ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, stale_register);
-
- ALLOCATE_MAP(JS_PROMISE_CAPABILITY_TYPE, JSPromiseCapability::kSize,
- js_promise_capability);
-
- for (unsigned i = 0; i < arraysize(string_type_table); i++) {
- const StringTypeTable& entry = string_type_table[i];
- {
- AllocationResult allocation = AllocateMap(entry.type, entry.size);
- if (!allocation.To(&obj)) return false;
- }
- Map* map = Map::cast(obj);
- map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
- // Mark cons string maps as unstable, because their objects can change
- // maps during GC.
- if (StringShape(entry.type).IsCons()) map->mark_unstable();
- roots_[entry.index] = map;
- }
-
- { // Create a separate external one byte string map for native sources.
- AllocationResult allocation =
- AllocateMap(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE,
- ExternalOneByteString::kShortSize);
- if (!allocation.To(&obj)) return false;
- Map* map = Map::cast(obj);
- map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
- set_native_source_string_map(map);
- }
-
- ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
- fixed_double_array_map()->set_elements_kind(HOLEY_DOUBLE_ELEMENTS);
- ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
- ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
- ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
- ALLOCATE_VARSIZE_MAP(PROPERTY_ARRAY_TYPE, property_array)
- ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_MAP_TYPE, small_ordered_hash_map)
- ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_SET_TYPE, small_ordered_hash_set)
-
-#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
- ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
-
- TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
-#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
-
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
-
- ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
-
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
- ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
- ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, no_closures_cell)
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, one_closure_cell)
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, many_closures_cell)
- ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
- ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
-
- ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
-
- for (unsigned i = 0; i < arraysize(struct_table); i++) {
- const StructTable& entry = struct_table[i];
- Map* map;
- if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
- roots_[entry.index] = map;
- }
-
- ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, hash_table)
- ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, ordered_hash_table)
- ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, unseeded_number_dictionary)
-
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, debug_evaluate_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, eval_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
-
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
- native_context_map()->set_visitor_id(kVisitNativeContext);
-
- ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
- shared_function_info)
-
- ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
- ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
- external_map()->set_is_extensible(false);
-#undef ALLOCATE_PRIMITIVE_MAP
-#undef ALLOCATE_VARSIZE_MAP
-#undef ALLOCATE_MAP
- }
-
- {
- AllocationResult allocation = AllocateEmptyScopeInfo();
- if (!allocation.To(&obj)) return false;
- }
-
- set_empty_scope_info(ScopeInfo::cast(obj));
- {
- AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
- if (!allocation.To(&obj)) return false;
- }
- set_true_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kTrue);
-
- {
- AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
- if (!allocation.To(&obj)) return false;
- }
- set_false_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kFalse);
-
- { // Empty arrays
- {
- ByteArray* byte_array;
- if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
- set_empty_byte_array(byte_array);
- }
-
- {
- PropertyArray* property_array;
- if (!AllocatePropertyArray(0, TENURED).To(&property_array)) return false;
- set_empty_property_array(property_array);
- }
-
-#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
- { \
- FixedTypedArrayBase* obj; \
- if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
- return false; \
- set_empty_fixed_##type##_array(obj); \
- }
-
- TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
-#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
- }
- DCHECK(!InNewSpace(empty_fixed_array()));
- return true;
-}
-
AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate heap numbers in paged
@@ -2680,6 +2469,23 @@ AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
return result;
}
+AllocationResult Heap::AllocateBigInt(int length, bool zero_initialize,
+ PretenureFlag pretenure) {
+ if (length < 0 || length > BigInt::kMaxLength) {
+ v8::internal::Heap::FatalProcessOutOfMemory("invalid BigInt length", true);
+ }
+ int size = BigInt::SizeFor(length);
+ AllocationSpace space = SelectSpace(pretenure);
+ HeapObject* result = nullptr;
+ {
+ AllocationResult allocation = AllocateRaw(size, space);
+ if (!allocation.To(&result)) return allocation;
+ }
+ result->set_map_after_allocation(bigint_map(), SKIP_WRITE_BARRIER);
+ BigInt::cast(result)->Initialize(length, zero_initialize);
+ return result;
+}
+
AllocationResult Heap::AllocateCell(Object* value) {
int size = Cell::kSize;
STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
@@ -2730,7 +2536,7 @@ AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
AllocationResult Heap::AllocateTransitionArray(int capacity) {
- DCHECK(capacity > 0);
+ DCHECK_LT(0, capacity);
HeapObject* raw_array = nullptr;
{
AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
@@ -2749,21 +2555,6 @@ AllocationResult Heap::AllocateTransitionArray(int capacity) {
return array;
}
-bool Heap::CreateApiObjects() {
- HandleScope scope(isolate());
- set_message_listeners(*TemplateList::New(isolate(), 2));
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocateStruct(INTERCEPTOR_INFO_TYPE);
- if (!allocation.To(&obj)) return false;
- }
- InterceptorInfo* info = InterceptorInfo::cast(obj);
- info->set_flags(0);
- set_noop_interceptor_info(info);
- return true;
-}
-
-
void Heap::CreateJSEntryStub() {
JSEntryStub stub(isolate(), StackFrame::ENTRY);
set_js_entry_code(*stub.GetCode());
@@ -2808,294 +2599,6 @@ void Heap::CreateFixedStubs() {
Heap::CreateJSConstructEntryStub();
}
-
-void Heap::CreateInitialObjects() {
- HandleScope scope(isolate());
- Factory* factory = isolate()->factory();
-
- // The -0 value must be set before NewNumber works.
- set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
- DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
-
- set_nan_value(*factory->NewHeapNumber(
- std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
- set_hole_nan_value(
- *factory->NewHeapNumberFromBits(kHoleNanInt64, IMMUTABLE, TENURED));
- set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
- set_minus_infinity_value(
- *factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED));
-
- // Allocate initial string table.
- set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
-
- // Allocate
-
- // Finish initializing oddballs after creating the string table.
- Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
- factory->nan_value(), "undefined", Oddball::kUndefined);
-
- // Initialize the null_value.
- Oddball::Initialize(isolate(), factory->null_value(), "null",
- handle(Smi::kZero, isolate()), "object", Oddball::kNull);
-
- // Initialize the_hole_value.
- Oddball::Initialize(isolate(), factory->the_hole_value(), "hole",
- factory->hole_nan_value(), "undefined",
- Oddball::kTheHole);
-
- // Initialize the true_value.
- Oddball::Initialize(isolate(), factory->true_value(), "true",
- handle(Smi::FromInt(1), isolate()), "boolean",
- Oddball::kTrue);
-
- // Initialize the false_value.
- Oddball::Initialize(isolate(), factory->false_value(), "false",
- handle(Smi::kZero, isolate()), "boolean",
- Oddball::kFalse);
-
- set_uninitialized_value(
- *factory->NewOddball(factory->uninitialized_map(), "uninitialized",
- handle(Smi::FromInt(-1), isolate()), "undefined",
- Oddball::kUninitialized));
-
- set_arguments_marker(
- *factory->NewOddball(factory->arguments_marker_map(), "arguments_marker",
- handle(Smi::FromInt(-4), isolate()), "undefined",
- Oddball::kArgumentsMarker));
-
- set_termination_exception(*factory->NewOddball(
- factory->termination_exception_map(), "termination_exception",
- handle(Smi::FromInt(-3), isolate()), "undefined", Oddball::kOther));
-
- set_exception(*factory->NewOddball(factory->exception_map(), "exception",
- handle(Smi::FromInt(-5), isolate()),
- "undefined", Oddball::kException));
-
- set_optimized_out(*factory->NewOddball(factory->optimized_out_map(),
- "optimized_out",
- handle(Smi::FromInt(-6), isolate()),
- "undefined", Oddball::kOptimizedOut));
-
- set_stale_register(
- *factory->NewOddball(factory->stale_register_map(), "stale_register",
- handle(Smi::FromInt(-7), isolate()), "undefined",
- Oddball::kStaleRegister));
-
- for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
- Handle<String> str =
- factory->InternalizeUtf8String(constant_string_table[i].contents);
- roots_[constant_string_table[i].index] = *str;
- }
-
- // Create the code_stubs dictionary. The initial size is set to avoid
- // expanding the dictionary during bootstrapping.
- set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
-
- {
- HandleScope scope(isolate());
-#define SYMBOL_INIT(name) \
- { \
- Handle<String> name##d = factory->NewStringFromStaticChars(#name); \
- Handle<Symbol> symbol(isolate()->factory()->NewPrivateSymbol()); \
- symbol->set_name(*name##d); \
- roots_[k##name##RootIndex] = *symbol; \
- }
- PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
-#undef SYMBOL_INIT
- }
-
- {
- HandleScope scope(isolate());
-#define SYMBOL_INIT(name, description) \
- Handle<Symbol> name = factory->NewSymbol(); \
- Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
- name->set_name(*name##d); \
- roots_[k##name##RootIndex] = *name;
- PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
-#undef SYMBOL_INIT
-
-#define SYMBOL_INIT(name, description) \
- Handle<Symbol> name = factory->NewSymbol(); \
- Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
- name->set_is_well_known_symbol(true); \
- name->set_name(*name##d); \
- roots_[k##name##RootIndex] = *name;
- WELL_KNOWN_SYMBOL_LIST(SYMBOL_INIT)
-#undef SYMBOL_INIT
-
- // Mark "Interesting Symbols" appropriately.
- to_string_tag_symbol->set_is_interesting_symbol(true);
- }
-
- Handle<NameDictionary> empty_property_dictionary =
- NameDictionary::New(isolate(), 1, TENURED, USE_CUSTOM_MINIMUM_CAPACITY);
- DCHECK(!empty_property_dictionary->HasSufficientCapacityToAdd(1));
- set_empty_property_dictionary(*empty_property_dictionary);
-
- set_public_symbol_table(*empty_property_dictionary);
- set_api_symbol_table(*empty_property_dictionary);
- set_api_private_symbol_table(*empty_property_dictionary);
-
- set_number_string_cache(
- *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
-
- // Allocate cache for single character one byte strings.
- set_single_character_string_cache(
- *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
-
- // Allocate cache for string split and regexp-multiple.
- set_string_split_cache(*factory->NewFixedArray(
- RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
- set_regexp_multiple_cache(*factory->NewFixedArray(
- RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
-
- set_undefined_cell(*factory->NewCell(factory->undefined_value()));
-
- // Microtask queue uses the empty fixed array as a sentinel for "empty".
- // Number of queued microtasks stored in Isolate::pending_microtask_count().
- set_microtask_queue(empty_fixed_array());
-
- {
- Handle<FixedArray> empty_sloppy_arguments_elements =
- factory->NewFixedArray(2, TENURED);
- empty_sloppy_arguments_elements->set_map_after_allocation(
- sloppy_arguments_elements_map(), SKIP_WRITE_BARRIER);
- set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
- }
-
- {
- Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
- set_empty_weak_cell(*cell);
- cell->clear();
- }
-
- set_detached_contexts(empty_fixed_array());
- set_retained_maps(ArrayList::cast(empty_fixed_array()));
- set_retaining_path_targets(undefined_value());
-
- set_weak_object_to_code_table(*WeakHashTable::New(isolate(), 16, TENURED));
-
- set_weak_new_space_object_to_code_list(
- ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
- weak_new_space_object_to_code_list()->SetLength(0);
-
- set_code_coverage_list(undefined_value());
-
- set_script_list(Smi::kZero);
-
- Handle<SeededNumberDictionary> slow_element_dictionary =
- SeededNumberDictionary::New(isolate(), 1, TENURED,
- USE_CUSTOM_MINIMUM_CAPACITY);
- DCHECK(!slow_element_dictionary->HasSufficientCapacityToAdd(1));
- slow_element_dictionary->set_requires_slow_elements();
- set_empty_slow_element_dictionary(*slow_element_dictionary);
-
- set_materialized_objects(*factory->NewFixedArray(0, TENURED));
-
- // Handling of script id generation is in Heap::NextScriptId().
- set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
- set_next_template_serial_number(Smi::kZero);
-
- // Allocate the empty OrderedHashTable.
- Handle<FixedArray> empty_ordered_hash_table =
- factory->NewFixedArray(OrderedHashMap::kHashTableStartIndex, TENURED);
- empty_ordered_hash_table->set_map_no_write_barrier(
- *factory->ordered_hash_table_map());
- for (int i = 0; i < empty_ordered_hash_table->length(); ++i) {
- empty_ordered_hash_table->set(i, Smi::kZero);
- }
- set_empty_ordered_hash_table(*empty_ordered_hash_table);
-
- // Allocate the empty script.
- Handle<Script> script = factory->NewScript(factory->empty_string());
- script->set_type(Script::TYPE_NATIVE);
- set_empty_script(*script);
-
- Handle<PropertyCell> cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_array_protector(*cell);
-
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(the_hole_value());
- set_empty_property_cell(*cell);
-
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_array_iterator_protector(*cell);
-
- Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
-
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_species_protector(*cell);
-
- Handle<Cell> string_length_overflow_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_string_length_protector(*string_length_overflow_cell);
-
- Handle<Cell> fast_array_iteration_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_fast_array_iteration_protector(*fast_array_iteration_cell);
-
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_array_buffer_neutering_protector(*cell);
-
- set_serialized_templates(empty_fixed_array());
- set_serialized_global_proxy_sizes(empty_fixed_array());
-
- set_weak_stack_trace_list(Smi::kZero);
-
- set_noscript_shared_function_infos(Smi::kZero);
-
- // Initialize context slot cache.
- isolate_->context_slot_cache()->Clear();
-
- // Initialize descriptor cache.
- isolate_->descriptor_lookup_cache()->Clear();
-
- // Initialize compilation cache.
- isolate_->compilation_cache()->Clear();
-
- // Finish creating JSPromiseCapabilityMap
- {
- // TODO(caitp): This initialization can be removed once PromiseCapability
- // object is no longer used by builtins implemented in javascript.
- Handle<Map> map = factory->js_promise_capability_map();
- map->set_inobject_properties_or_constructor_function_index(3);
-
- Map::EnsureDescriptorSlack(map, 3);
-
- PropertyAttributes attrs =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- { // promise
- Descriptor d = Descriptor::DataField(factory->promise_string(),
- JSPromiseCapability::kPromiseIndex,
- attrs, Representation::Tagged());
- map->AppendDescriptor(&d);
- }
-
- { // resolve
- Descriptor d = Descriptor::DataField(factory->resolve_string(),
- JSPromiseCapability::kResolveIndex,
- attrs, Representation::Tagged());
- map->AppendDescriptor(&d);
- }
-
- { // reject
- Descriptor d = Descriptor::DataField(factory->reject_string(),
- JSPromiseCapability::kRejectIndex,
- attrs, Representation::Tagged());
- map->AppendDescriptor(&d);
- }
-
- map->set_is_extensible(false);
- set_js_promise_capability_map(*map);
- }
-}
-
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
switch (root_index) {
case kNumberStringCacheRootIndex:
@@ -3297,7 +2800,7 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
instance->set_parameter_count(parameter_count);
instance->set_incoming_new_target_or_generator_register(
interpreter::Register::invalid_value());
- instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
+ instance->set_interrupt_budget(interpreter::Interpreter::kInterruptBudget);
instance->set_osr_loop_nesting_level(0);
instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
instance->set_constant_pool(constant_pool);
@@ -3617,6 +3120,12 @@ AllocationResult Heap::CopyCode(Code* code) {
DCHECK(!memory_allocator()->code_range()->valid() ||
memory_allocator()->code_range()->contains(code->address()) ||
obj_size <= code_space()->AreaSize());
+
+ // Clear the trap handler index since they can't be shared between code. We
+ // have to do this before calling Relocate becauase relocate would adjust the
+ // base pointer for the old code.
+ new_code->set_trap_handler_index(Smi::FromInt(trap_handler::kInvalidIndex));
+
new_code->Relocate(new_addr - old_addr);
// We have to iterate over the object and process its pointers when black
// allocation is on.
@@ -3872,7 +3381,7 @@ static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
while (stream_length != 0) {
size_t consumed = 0;
uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
- DCHECK(c != unibrow::Utf8::kBadChar);
+ DCHECK_NE(unibrow::Utf8::kBadChar, c);
DCHECK(consumed <= stream_length);
stream_length -= consumed;
stream += consumed;
@@ -3887,8 +3396,8 @@ static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
*chars++ = c;
}
}
- DCHECK(stream_length == 0);
- DCHECK(len == 0);
+ DCHECK_EQ(0, stream_length);
+ DCHECK_EQ(0, len);
}
@@ -3907,7 +3416,7 @@ static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
template <bool is_one_byte, typename T>
AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
uint32_t hash_field) {
- DCHECK(chars >= 0);
+ DCHECK_LE(0, chars);
// Compute map and object size.
int size;
Map* map;
@@ -3962,7 +3471,7 @@ AllocationResult Heap::AllocateRawOneByteString(int length,
DCHECK_LE(0, length);
DCHECK_GE(String::kMaxLength, length);
int size = SeqOneByteString::SizeFor(length);
- DCHECK(size <= SeqOneByteString::kMaxSize);
+ DCHECK_GE(SeqOneByteString::kMaxSize, size);
AllocationSpace space = SelectSpace(pretenure);
HeapObject* result = nullptr;
@@ -3986,7 +3495,7 @@ AllocationResult Heap::AllocateRawTwoByteString(int length,
DCHECK_LE(0, length);
DCHECK_GE(String::kMaxLength, length);
int size = SeqTwoByteString::SizeFor(length);
- DCHECK(size <= SeqTwoByteString::kMaxSize);
+ DCHECK_GE(SeqTwoByteString::kMaxSize, size);
AllocationSpace space = SelectSpace(pretenure);
HeapObject* result = nullptr;
@@ -4237,7 +3746,7 @@ AllocationResult Heap::AllocateRawFixedArray(int length,
AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
PretenureFlag pretenure,
Object* filler) {
- DCHECK(length >= 0);
+ DCHECK_LE(0, length);
DCHECK(empty_fixed_array()->IsFixedArray());
if (length == 0) return empty_fixed_array();
@@ -4257,7 +3766,10 @@ AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
AllocationResult Heap::AllocatePropertyArray(int length,
PretenureFlag pretenure) {
- DCHECK(length >= 0);
+ // Allow length = 0 for the empty_property_array singleton.
+ DCHECK_LE(0, length);
+ DCHECK_IMPLIES(length == 0, pretenure == TENURED);
+
DCHECK(!InNewSpace(undefined_value()));
HeapObject* result = nullptr;
{
@@ -4272,12 +3784,13 @@ AllocationResult Heap::AllocatePropertyArray(int length,
return result;
}
-AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
+AllocationResult Heap::AllocateUninitializedFixedArray(
+ int length, PretenureFlag pretenure) {
if (length == 0) return empty_fixed_array();
HeapObject* obj = nullptr;
{
- AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
+ AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
if (!allocation.To(&obj)) return allocation;
}
@@ -4321,7 +3834,7 @@ AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
AllocationResult Heap::AllocateRawFeedbackVector(int length,
PretenureFlag pretenure) {
- DCHECK(length >= 0);
+ DCHECK_LE(0, length);
int size = FeedbackVector::SizeFor(length);
AllocationSpace space = SelectSpace(pretenure);
@@ -4381,8 +3894,8 @@ AllocationResult Heap::AllocateSymbol() {
return result;
}
-
-AllocationResult Heap::AllocateStruct(InstanceType type) {
+AllocationResult Heap::AllocateStruct(InstanceType type,
+ PretenureFlag pretenure) {
Map* map;
switch (type) {
#define MAKE_CASE(NAME, Name, name) \
@@ -4397,7 +3910,8 @@ AllocationResult Heap::AllocateStruct(InstanceType type) {
int size = map->instance_size();
Struct* result = nullptr;
{
- AllocationResult allocation = Allocate(map, OLD_SPACE);
+ AllocationSpace space = SelectSpace(pretenure);
+ AllocationResult allocation = Allocate(map, space);
if (!allocation.To(&result)) return allocation;
}
result->InitializeBody(size);
@@ -4555,7 +4069,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
// object space for side effects.
IncrementalMarking::MarkingState* marking_state =
incremental_marking()->marking_state();
- for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
+ for (int i = OLD_SPACE; i < Serializer<>::kNumberOfSpaces; i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
Address addr = chunk.start;
@@ -4600,8 +4114,10 @@ void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
}
}
#ifdef VERIFY_HEAP
- DCHECK(pending_layout_change_object_ == nullptr);
- pending_layout_change_object_ = object;
+ if (FLAG_verify_heap) {
+ DCHECK_NULL(pending_layout_change_object_);
+ pending_layout_change_object_ = object;
+ }
#endif
}
@@ -4624,6 +4140,8 @@ class SlotCollectingVisitor final : public ObjectVisitor {
};
void Heap::VerifyObjectLayoutChange(HeapObject* object, Map* new_map) {
+ if (!FLAG_verify_heap) return;
+
// Check that Heap::NotifyObjectLayout was called for object transitions
// that are not safe for concurrent marking.
// If you see this check triggering for a freshly allocated object,
@@ -4686,7 +4204,7 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
break;
}
case DO_FULL_GC: {
- DCHECK(contexts_disposed_ > 0);
+ DCHECK_LT(0, contexts_disposed_);
HistogramTimerScope scope(isolate_->counters()->gc_context());
TRACE_EVENT0("v8", "V8.GCContext");
CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
@@ -5302,8 +4820,8 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
private:
inline void FixHandle(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
HeapObject* current = reinterpret_cast<HeapObject*>(*p);
- if (!current->IsHeapObject()) return;
const MapWord map_word = current->map_word();
if (!map_word.IsForwardingAddress() && current->IsFiller()) {
#ifdef DEBUG
@@ -5649,8 +5167,8 @@ const double Heap::kTargetMutatorUtilization = 0.97;
// F = R * (1 - MU) / (R * (1 - MU) - MU)
double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed,
double max_factor) {
- DCHECK(max_factor >= kMinHeapGrowingFactor);
- DCHECK(max_factor <= kMaxHeapGrowingFactor);
+ DCHECK_LE(kMinHeapGrowingFactor, max_factor);
+ DCHECK_GE(kMaxHeapGrowingFactor, max_factor);
if (gc_speed == 0 || mutator_speed == 0) return max_factor;
const double speed_ratio = gc_speed / mutator_speed;
@@ -5695,8 +5213,8 @@ double Heap::MaxHeapGrowingFactor(size_t max_old_generation_size) {
size_t Heap::CalculateOldGenerationAllocationLimit(double factor,
size_t old_gen_size) {
- CHECK(factor > 1.0);
- CHECK(old_gen_size > 0);
+ CHECK_LT(1.0, factor);
+ CHECK_LT(0, old_gen_size);
uint64_t limit = static_cast<uint64_t>(old_gen_size * factor);
limit = Max(limit, static_cast<uint64_t>(old_gen_size) +
MinimumAllocationLimitGrowingStep());
@@ -5886,7 +5404,7 @@ bool Heap::SetUp() {
}
mmap_region_base_ =
- reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
+ reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
~kMmapRegionMask;
// Set up memory allocator.
@@ -5905,10 +5423,10 @@ bool Heap::SetUp() {
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->bailout(),
- mark_compact_collector_->weak_objects());
+ marking_worklist->on_hold(), mark_compact_collector_->weak_objects());
} else {
concurrent_marking_ =
- new ConcurrentMarking(this, nullptr, nullptr, nullptr);
+ new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
}
for (int i = 0; i <= LAST_SPACE; i++) {
@@ -5919,7 +5437,6 @@ bool Heap::SetUp() {
if (!new_space_->SetUp(initial_semispace_size_, max_semi_space_size_)) {
return false;
}
- new_space_top_after_last_gc_ = new_space()->top();
space_[OLD_SPACE] = old_space_ =
new OldSpace(this, OLD_SPACE, NOT_EXECUTABLE);
@@ -5938,7 +5455,7 @@ bool Heap::SetUp() {
if (!lo_space_->SetUp()) return false;
// Set up the seed that is used to randomize the string hash function.
- DCHECK(hash_seed() == 0);
+ DCHECK_EQ(Smi::kZero, hash_seed());
if (FLAG_randomize_hashes) InitializeHashSeed();
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
@@ -5986,22 +5503,6 @@ void Heap::InitializeHashSeed() {
}
}
-bool Heap::CreateHeapObjects() {
- // Create initial maps.
- if (!CreateInitialMaps()) return false;
- if (!CreateApiObjects()) return false;
-
- // Create initial objects
- CreateInitialObjects();
- CHECK_EQ(0u, gc_count_);
-
- set_native_contexts_list(undefined_value());
- set_allocation_sites_list(undefined_value());
-
- return true;
-}
-
-
void Heap::SetStackLimits() {
DCHECK(isolate_ != NULL);
DCHECK(isolate_ == isolate());
@@ -6034,7 +5535,7 @@ void Heap::NotifyDeserializationComplete() {
#ifdef DEBUG
// All pages right after bootstrapping must be marked as never-evacuate.
for (Page* p : *s) {
- CHECK(p->NeverEvacuate());
+ DCHECK(p->NeverEvacuate());
}
#endif // DEBUG
}
@@ -6053,7 +5554,8 @@ void Heap::TracePossibleWrapper(JSObject* js_object) {
js_object->GetEmbedderField(0) &&
js_object->GetEmbedderField(0) != undefined_value() &&
js_object->GetEmbedderField(1) != undefined_value()) {
- DCHECK(reinterpret_cast<intptr_t>(js_object->GetEmbedderField(0)) % 2 == 0);
+ DCHECK_EQ(0,
+ reinterpret_cast<intptr_t>(js_object->GetEmbedderField(0)) % 2);
local_embedder_heap_tracer()->AddWrapperToTrace(std::pair<void*, void*>(
reinterpret_cast<void*>(js_object->GetEmbedderField(0)),
reinterpret_cast<void*>(js_object->GetEmbedderField(1))));
@@ -6359,7 +5861,7 @@ class CheckHandleCountVisitor : public RootVisitor {
public:
CheckHandleCountVisitor() : handle_count_(0) {}
~CheckHandleCountVisitor() override {
- CHECK(handle_count_ < HandleScope::kCheckHandleThreshold);
+ CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
}
void VisitRootPointers(Root root, Object** start, Object** end) override {
handle_count_ += end - start;
@@ -6532,7 +6034,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
class MarkingVisitor : public ObjectVisitor, public RootVisitor {
public:
explicit MarkingVisitor(UnreachableObjectsFilter* filter)
- : filter_(filter), marking_stack_(10) {}
+ : filter_(filter) {}
void VisitPointers(HeapObject* host, Object** start,
Object** end) override {
@@ -6544,8 +6046,9 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
}
void TransitiveClosure() {
- while (!marking_stack_.is_empty()) {
- HeapObject* obj = marking_stack_.RemoveLast();
+ while (!marking_stack_.empty()) {
+ HeapObject* obj = marking_stack_.back();
+ marking_stack_.pop_back();
obj->Iterate(this);
}
}
@@ -6556,12 +6059,12 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
if (filter_->MarkAsReachable(obj)) {
- marking_stack_.Add(obj);
+ marking_stack_.push_back(obj);
}
}
}
UnreachableObjectsFilter* filter_;
- List<HeapObject*> marking_stack_;
+ std::vector<HeapObject*> marking_stack_;
};
friend class MarkingVisitor;
@@ -6606,7 +6109,7 @@ HeapIterator::~HeapIterator() {
// Assert that in filtering mode we have iterated through all
// objects. Otherwise, heap will be left in an inconsistent state.
if (filtering_ != kNoFiltering) {
- DCHECK(object_iterator_ == nullptr);
+ DCHECK_NULL(object_iterator_);
}
#endif
delete space_iterator_;
@@ -6891,5 +6394,15 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
UNREACHABLE();
}
+void Heap::CreateObjectStats() {
+ if (V8_LIKELY(FLAG_gc_stats == 0)) return;
+ if (!live_object_stats_) {
+ live_object_stats_ = new ObjectStats(this);
+ }
+ if (!dead_object_stats_) {
+ dead_object_stats_ = new ObjectStats(this);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 7b87770385..687be8a3db 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -111,6 +111,7 @@ using v8::MemoryPressureLevel;
V(Map, one_closure_cell_map, OneClosureCellMap) \
V(Map, many_closures_cell_map, ManyClosuresCellMap) \
V(Map, property_array_map, PropertyArrayMap) \
+ V(Map, bigint_map, BigIntMap) \
/* String maps */ \
V(Map, native_source_string_map, NativeSourceStringMap) \
V(Map, string_map, StringMap) \
@@ -162,6 +163,7 @@ using v8::MemoryPressureLevel;
V(Map, optimized_out_map, OptimizedOutMap) \
V(Map, stale_register_map, StaleRegisterMap) \
/* Canonical empty values */ \
+ V(EnumCache, empty_enum_cache, EmptyEnumCache) \
V(PropertyArray, empty_property_array, EmptyPropertyArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
@@ -184,6 +186,7 @@ using v8::MemoryPressureLevel;
V(WeakCell, empty_weak_cell, EmptyWeakCell) \
V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
/* Protectors */ \
+ V(Cell, array_constructor_protector, ArrayConstructorProtector) \
V(PropertyCell, array_protector, ArrayProtector) \
V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
V(PropertyCell, species_protector, SpeciesProtector) \
@@ -228,9 +231,6 @@ using v8::MemoryPressureLevel;
V(FixedArray, serialized_templates, SerializedTemplates) \
V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
V(TemplateList, message_listeners, MessageListeners) \
- /* per-Isolate map for JSPromiseCapability. */ \
- /* TODO(caitp): Make this a Struct */ \
- V(Map, js_promise_capability_map, JSPromiseCapabilityMap) \
/* JS Entries */ \
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode)
@@ -267,6 +267,7 @@ using v8::MemoryPressureLevel;
V(ArrayBufferNeuteringProtector) \
V(ArrayIteratorProtector) \
V(ArrayProtector) \
+ V(BigIntMap) \
V(BlockContextMap) \
V(BooleanMap) \
V(ByteArrayMap) \
@@ -819,17 +820,17 @@ class Heap {
inline uint32_t HashSeed();
inline int NextScriptId();
-
- inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
- inline void SetConstructStubCreateDeoptPCOffset(int pc_offset);
- inline void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
- inline void SetGetterStubDeoptPCOffset(int pc_offset);
- inline void SetSetterStubDeoptPCOffset(int pc_offset);
- inline void SetInterpreterEntryReturnPCOffset(int pc_offset);
inline int GetNextTemplateSerialNumber();
- inline void SetSerializedTemplates(FixedArray* templates);
- inline void SetSerializedGlobalProxySizes(FixedArray* sizes);
+ void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
+ void SetConstructStubCreateDeoptPCOffset(int pc_offset);
+ void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
+ void SetGetterStubDeoptPCOffset(int pc_offset);
+ void SetSetterStubDeoptPCOffset(int pc_offset);
+ void SetInterpreterEntryReturnPCOffset(int pc_offset);
+
+ void SetSerializedTemplates(FixedArray* templates);
+ void SetSerializedGlobalProxySizes(FixedArray* sizes);
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
@@ -850,7 +851,7 @@ class Heap {
void DeoptMarkedAllocationSites();
- inline bool DeoptMaybeTenuredAllocationSites();
+ bool DeoptMaybeTenuredAllocationSites();
void AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
Handle<WeakCell> code);
@@ -938,7 +939,7 @@ class Heap {
bool CreateHeapObjects();
// Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
- V8_INLINE void CreateObjectStats();
+ void CreateObjectStats();
// Destroys all memory allocated by the heap.
void TearDown();
@@ -1485,9 +1486,6 @@ class Heap {
Map* map, HeapObject* object,
PretenuringFeedbackMap* pretenuring_feedback);
- // Removes an entry from the global pretenuring storage.
- inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
-
// Merges local pretenuring feedback into the global one. Note that this
// method needs to be called after evacuation, as allocation sites may be
// evacuated and this method resolves forward pointers accordingly.
@@ -1524,7 +1522,7 @@ class Heap {
void ReportCodeStatistics(const char* title);
#endif
void* GetRandomMmapAddr() {
- void* result = base::OS::GetRandomMmapAddr();
+ void* result = v8::internal::GetRandomMmapAddr();
#if V8_TARGET_ARCH_X64
#if V8_OS_MACOSX
// The Darwin kernel [as of macOS 10.12.5] does not clean up page
@@ -1535,7 +1533,7 @@ class Heap {
// killed. Confine the hint to a 32-bit section of the virtual address
// space. See crbug.com/700928.
uintptr_t offset =
- reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
+ reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
kMmapRegionMask;
result = reinterpret_cast<void*>(mmap_region_base_ + offset);
#endif // V8_OS_MACOSX
@@ -1563,16 +1561,16 @@ class Heap {
// Registers an external string.
inline void AddString(String* string);
- inline void IterateAll(RootVisitor* v);
- inline void IterateNewSpaceStrings(RootVisitor* v);
- inline void PromoteAllNewSpaceStrings();
+ void IterateAll(RootVisitor* v);
+ void IterateNewSpaceStrings(RootVisitor* v);
+ void PromoteAllNewSpaceStrings();
// Restores internal invariant and gets rid of collected strings. Must be
// called after each Iterate*() that modified the strings.
void CleanUpAll();
void CleanUpNewSpaceStrings();
- // Destroys all allocated memory.
+ // Finalize all registered external strings and clear tables.
void TearDown();
void UpdateNewSpaceReferences(
@@ -1581,9 +1579,7 @@ class Heap {
Heap::ExternalStringTableUpdaterCallback updater_func);
private:
- inline void Verify();
-
- inline void AddOldString(String* string);
+ void Verify();
Heap* const heap_;
@@ -1820,6 +1816,9 @@ class Heap {
// object in old space must not move.
void ProcessPretenuringFeedback();
+ // Removes an entry from the global pretenuring storage.
+ void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
+
// ===========================================================================
// Actual GC. ================================================================
// ===========================================================================
@@ -1984,6 +1983,10 @@ class Heap {
MUST_USE_RESULT AllocationResult AllocateHeapNumber(
MutableMode mode = IMMUTABLE, PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT AllocationResult AllocateBigInt(int length,
+ bool zero_initialize,
+ PretenureFlag pretenure);
+
// Allocates a byte array of the specified length
MUST_USE_RESULT AllocationResult
AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
@@ -2084,7 +2087,8 @@ class Heap {
T t, int chars, uint32_t hash_field);
// Allocates an uninitialized fixed array. It must be filled by the caller.
- MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length);
+ MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(
+ int length, PretenureFlag pretenure = NOT_TENURED);
// Make a copy of src and return it.
MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
@@ -2171,7 +2175,8 @@ class Heap {
MUST_USE_RESULT AllocationResult AllocateTransitionArray(int capacity);
// Allocates a new utility object in the old generation.
- MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
+ MUST_USE_RESULT AllocationResult
+ AllocateStruct(InstanceType type, PretenureFlag pretenure = NOT_TENURED);
// Allocates a new foreign object.
MUST_USE_RESULT AllocationResult
@@ -2253,7 +2258,6 @@ class Heap {
Space* space_[LAST_SPACE + 1];
HeapState gc_state_;
int gc_post_processing_depth_;
- Address new_space_top_after_last_gc_;
// Returns the amount of external memory registered since last global gc.
uint64_t PromotedExternalMemorySize();
@@ -2639,7 +2643,7 @@ class AllocationObserver {
public:
explicit AllocationObserver(intptr_t step_size)
: step_size_(step_size), bytes_to_next_step_(step_size) {
- DCHECK(step_size >= kPointerSize);
+ DCHECK_LE(kPointerSize, step_size);
}
virtual ~AllocationObserver() {}
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 92d257a669..b286289254 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -23,12 +23,26 @@
namespace v8 {
namespace internal {
-void IncrementalMarking::Observer::Step(int bytes_allocated, Address, size_t) {
- VMState<GC> state(incremental_marking_.heap()->isolate());
+void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
+ size_t size) {
+ Heap* heap = incremental_marking_.heap();
+ VMState<GC> state(heap->isolate());
RuntimeCallTimerScope runtime_timer(
- incremental_marking_.heap()->isolate(),
- &RuntimeCallStats::GC_Custom_IncrementalMarkingObserver);
+ heap->isolate(), &RuntimeCallStats::GC_Custom_IncrementalMarkingObserver);
incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
+ if (incremental_marking_.black_allocation() && addr != nullptr) {
+ // AdvanceIncrementalMarkingOnAllocation can start black allocation.
+ // Ensure that the new object is marked black.
+ HeapObject* object = HeapObject::FromAddress(addr);
+ if (incremental_marking_.marking_state()->IsWhite(object) &&
+ !heap->InNewSpace(object)) {
+ if (heap->lo_space()->Contains(object)) {
+ incremental_marking_.marking_state()->WhiteToBlack(object);
+ } else {
+ Page::FromAddress(addr)->CreateBlackArea(addr, addr + size);
+ }
+ }
+ }
}
IncrementalMarking::IncrementalMarking(Heap* heap)
@@ -46,8 +60,8 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
finalize_marking_completed_(false),
trace_wrappers_toggle_(false),
request_type_(NONE),
- new_generation_observer_(*this, kAllocatedThreshold),
- old_generation_observer_(*this, kAllocatedThreshold) {
+ new_generation_observer_(*this, kYoungGenerationAllocatedThreshold),
+ old_generation_observer_(*this, kOldGenerationAllocatedThreshold) {
SetState(STOPPED);
}
@@ -108,8 +122,8 @@ void IncrementalMarking::MarkBlackAndPush(HeapObject* obj) {
if (marking_state()->GreyToBlack(obj)) {
if (FLAG_concurrent_marking) {
marking_worklist()->PushBailout(obj);
- } else if (!marking_worklist()->Push(obj)) {
- non_atomic_marking_state()->BlackToGrey(obj);
+ } else {
+ marking_worklist()->Push(obj);
}
}
}
@@ -200,27 +214,24 @@ class IncrementalMarkingMarkingVisitor final
int start_offset =
Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
if (start_offset < object_size) {
+ // Ensure that the object is either grey or black before pushing it
+ // into marking worklist.
+ incremental_marking_->marking_state()->WhiteToGrey(object);
if (FLAG_concurrent_marking) {
incremental_marking_->marking_worklist()->PushBailout(object);
} else {
- if (incremental_marking_->marking_state()->IsGrey(object)) {
- incremental_marking_->marking_worklist()->Push(object);
- } else {
- DCHECK(incremental_marking_->marking_state()->IsBlack(object));
- collector_->PushBlack(object);
- }
+ incremental_marking_->marking_worklist()->Push(object);
}
+ DCHECK(incremental_marking_->marking_state()->IsGrey(object) ||
+ incremental_marking_->marking_state()->IsBlack(object));
+
int end_offset =
Min(object_size, start_offset + kProgressBarScanningChunk);
int already_scanned_offset = start_offset;
- bool scan_until_end = false;
- do {
- VisitPointers(object, HeapObject::RawField(object, start_offset),
- HeapObject::RawField(object, end_offset));
- start_offset = end_offset;
- end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
- scan_until_end = incremental_marking_->marking_worklist()->IsFull();
- } while (scan_until_end && start_offset < object_size);
+ VisitPointers(object, HeapObject::RawField(object, start_offset),
+ HeapObject::RawField(object, end_offset));
+ start_offset = end_offset;
+ end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
chunk->set_progress_bar(start_offset);
if (start_offset < object_size) {
incremental_marking_->NotifyIncompleteScanOfObject(
@@ -414,7 +425,6 @@ void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
}
}
-
static void PatchIncrementalMarkingRecordWriteStubs(
Heap* heap, RecordWriteStub::Mode mode) {
UnseededNumberDictionary* stubs = heap->code_stubs();
@@ -436,6 +446,12 @@ static void PatchIncrementalMarkingRecordWriteStubs(
}
}
+void IncrementalMarking::Deactivate() {
+ DeactivateIncrementalWriteBarrier();
+ PatchIncrementalMarkingRecordWriteStubs(heap_,
+ RecordWriteStub::STORE_BUFFER_ONLY);
+}
+
void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
if (FLAG_trace_incremental_marking) {
int old_generation_size_mb =
@@ -529,8 +545,6 @@ void IncrementalMarking::StartMarking() {
PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
- marking_worklist()->StartUsing();
-
ActivateIncrementalWriteBarrier();
// Marking bits are cleared by the sweeper.
@@ -924,11 +938,6 @@ void IncrementalMarking::Stop() {
}
IncrementalMarking::set_should_hurry(false);
- if (IsMarking()) {
- PatchIncrementalMarkingRecordWriteStubs(heap_,
- RecordWriteStub::STORE_BUFFER_ONLY);
- DeactivateIncrementalWriteBarrier();
- }
heap_->isolate()->stack_guard()->ClearGC();
SetState(STOPPED);
is_compacting_ = false;
@@ -1054,8 +1063,8 @@ size_t IncrementalMarking::StepSizeToMakeProgress() {
// leave marking work to standalone tasks. The ramp up duration and the
// target step count are chosen based on benchmarks.
const int kRampUpIntervalMs = 300;
- const size_t kTargetStepCount = 128;
- const size_t kTargetStepCountAtOOM = 16;
+ const size_t kTargetStepCount = 256;
+ const size_t kTargetStepCountAtOOM = 32;
size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
if (heap()->IsCloseToOutOfMemory(oom_slack)) {
@@ -1063,7 +1072,7 @@ size_t IncrementalMarking::StepSizeToMakeProgress() {
}
size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount,
- IncrementalMarking::kAllocatedThreshold);
+ IncrementalMarking::kMinStepSizeInBytes);
double time_passed_ms =
heap_->MonotonicallyIncreasingTimeInMs() - start_time_ms_;
double factor = Min(time_passed_ms / kRampUpIntervalMs, 1.0);
@@ -1081,7 +1090,7 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
size_t bytes_to_process =
StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
- if (bytes_to_process >= IncrementalMarking::kAllocatedThreshold) {
+ if (bytes_to_process >= IncrementalMarking::kMinStepSizeInBytes) {
// The first step after Scavenge will see many allocated bytes.
// Cap the step size to distribute the marking work more uniformly.
size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
@@ -1089,6 +1098,13 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
bytes_to_process = Min(bytes_to_process, max_step_size);
+ if (FLAG_concurrent_marking && marking_worklist()->IsBailoutEmpty()) {
+ // The number of background tasks + the main thread.
+ size_t tasks = heap()->concurrent_marking()->TaskCount() + 1;
+ bytes_to_process = Max(IncrementalMarking::kMinStepSizeInBytes,
+ bytes_to_process / tasks);
+ }
+
size_t bytes_processed = 0;
if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
// Steps performed in tasks have put us ahead of schedule.
@@ -1121,6 +1137,14 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
size_t bytes_processed = 0;
if (state_ == MARKING) {
+ if (FLAG_concurrent_marking) {
+ heap_->new_space()->ResetOriginalTop();
+ // It is safe to merge back all objects that were on hold to the shared
+ // work list at Step because we are at a safepoint where all objects
+ // are properly initialized.
+ marking_worklist()->shared()->MergeGlobalPool(
+ marking_worklist()->on_hold());
+ }
if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
FLAG_trace_gc_verbose) {
marking_worklist()->Print();
@@ -1160,9 +1184,14 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Step %s %zu bytes (%zu) in %.1f\n",
- step_origin == StepOrigin::kV8 ? "in v8" : "in task", bytes_processed,
- bytes_to_process, duration);
+ "[IncrementalMarking] Step %s %" PRIuS "KB (%" PRIuS "KB) in %.1f\n",
+ step_origin == StepOrigin::kV8 ? "in v8" : "in task",
+ bytes_processed / KB, bytes_to_process / KB, duration);
+ }
+ if (FLAG_trace_concurrent_marking) {
+ heap_->isolate()->PrintWithTimestamp(
+ "Concurrently marked %" PRIuS "KB\n",
+ heap_->concurrent_marking()->TotalMarkedBytes() / KB);
}
return bytes_processed;
}
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 0395ab3a48..0579c9c676 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -82,6 +82,35 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
bool paused_;
};
+ // It's hard to know how much work the incremental marker should do to make
+ // progress in the face of the mutator creating new work for it. We start
+ // of at a moderate rate of work and gradually increase the speed of the
+ // incremental marker until it completes.
+ // Do some marking every time this much memory has been allocated or that many
+ // heavy (color-checking) write barriers have been invoked.
+ static const size_t kYoungGenerationAllocatedThreshold = 64 * KB;
+ static const size_t kOldGenerationAllocatedThreshold = 256 * KB;
+ static const size_t kMinStepSizeInBytes = 64 * KB;
+
+ static const int kStepSizeInMs = 1;
+ static const int kMaxStepSizeInMs = 5;
+
+ // This is the upper bound for how many times we allow finalization of
+ // incremental marking to be postponed.
+ static const int kMaxIdleMarkingDelayCounter = 3;
+
+#ifndef DEBUG
+ static const intptr_t kActivationThreshold = 8 * MB;
+#else
+ static const intptr_t kActivationThreshold = 0;
+#endif
+
+#ifdef V8_CONCURRENT_MARKING
+ static const AccessMode kAtomicity = AccessMode::ATOMIC;
+#else
+ static const AccessMode kAtomicity = AccessMode::NON_ATOMIC;
+#endif
+
explicit IncrementalMarking(Heap* heap);
MarkingState* marking_state() { return &marking_state_; }
@@ -112,13 +141,12 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
}
}
-
- State state() {
+ State state() const {
DCHECK(state_ == STOPPED || FLAG_incremental_marking);
return state_;
}
- bool should_hurry() { return should_hurry_; }
+ bool should_hurry() const { return should_hurry_; }
void set_should_hurry(bool val) { should_hurry_ = val; }
bool finalize_marking_completed() const {
@@ -129,15 +157,15 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
finalize_marking_completed_ = val;
}
- inline bool IsStopped() { return state() == STOPPED; }
+ inline bool IsStopped() const { return state() == STOPPED; }
- inline bool IsSweeping() { return state() == SWEEPING; }
+ inline bool IsSweeping() const { return state() == SWEEPING; }
- INLINE(bool IsMarking()) { return state() >= MARKING; }
+ inline bool IsMarking() const { return state() >= MARKING; }
- inline bool IsMarkingIncomplete() { return state() == MARKING; }
+ inline bool IsMarkingIncomplete() const { return state() == MARKING; }
- inline bool IsComplete() { return state() == COMPLETE; }
+ inline bool IsComplete() const { return state() == COMPLETE; }
inline bool IsReadyToOverApproximateWeakClosure() const {
return request_type_ == FINALIZATION && !finalize_marking_completed_;
@@ -182,33 +210,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
ForceCompletionAction force_completion,
StepOrigin step_origin);
- // It's hard to know how much work the incremental marker should do to make
- // progress in the face of the mutator creating new work for it. We start
- // of at a moderate rate of work and gradually increase the speed of the
- // incremental marker until it completes.
- // Do some marking every time this much memory has been allocated or that many
- // heavy (color-checking) write barriers have been invoked.
- static const size_t kAllocatedThreshold = 64 * KB;
-
- static const int kStepSizeInMs = 1;
- static const int kMaxStepSizeInMs = 5;
-
- // This is the upper bound for how many times we allow finalization of
- // incremental marking to be postponed.
- static const int kMaxIdleMarkingDelayCounter = 3;
-
-#ifndef DEBUG
- static const intptr_t kActivationThreshold = 8 * MB;
-#else
- static const intptr_t kActivationThreshold = 0;
-#endif
-
-#ifdef V8_CONCURRENT_MARKING
- static const AccessMode kAtomicity = AccessMode::ATOMIC;
-#else
- static const AccessMode kAtomicity = AccessMode::NON_ATOMIC;
-#endif
-
void FinalizeSweeping();
size_t Step(size_t bytes_to_process, CompletionAction action,
@@ -225,10 +226,11 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// No slots in white objects should be recorded, as some slots are typed and
// cannot be interpreted correctly if the underlying object does not survive
// the incremental cycle (stays white).
- INLINE(bool BaseRecordWrite(HeapObject* obj, Object* value));
- INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
- INLINE(void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value));
- INLINE(void RecordWrites(HeapObject* obj));
+ V8_INLINE bool BaseRecordWrite(HeapObject* obj, Object* value);
+ V8_INLINE void RecordWrite(HeapObject* obj, Object** slot, Object* value);
+ V8_INLINE void RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
+ Object* value);
+ V8_INLINE void RecordWrites(HeapObject* obj);
void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
@@ -290,6 +292,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
marking_worklist_ = marking_worklist;
}
+ void Deactivate();
+
private:
class Observer : public AllocationObserver {
public:
@@ -303,7 +307,10 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
IncrementalMarking& incremental_marking_;
};
- int64_t SpaceLeftInOldSpace();
+ static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking,
+ bool is_compacting);
+
+ static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
void StartMarking();
@@ -317,25 +324,21 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void RetainMaps();
void ActivateIncrementalWriteBarrier(PagedSpace* space);
- static void ActivateIncrementalWriteBarrier(NewSpace* space);
+ void ActivateIncrementalWriteBarrier(NewSpace* space);
void ActivateIncrementalWriteBarrier();
- static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
- static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
+ void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
+ void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
void DeactivateIncrementalWriteBarrier();
- static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking,
- bool is_compacting);
-
- static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
-
- INLINE(intptr_t ProcessMarkingWorklist(
+ V8_INLINE intptr_t ProcessMarkingWorklist(
intptr_t bytes_to_process,
- ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION));
+ ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION);
+
+ V8_INLINE bool IsFixedArrayWithProgressBar(HeapObject* object);
- INLINE(bool IsFixedArrayWithProgressBar(HeapObject* object));
// Visits the object and returns its size.
- INLINE(int VisitObject(Map* map, HeapObject* obj));
+ V8_INLINE int VisitObject(Map* map, HeapObject* obj);
void RevisitObject(HeapObject* obj);
@@ -346,6 +349,11 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
size_t StepSizeToKeepUpWithAllocations();
size_t StepSizeToMakeProgress();
+ void SetState(State s) {
+ state_ = s;
+ heap_->SetIsMarkingFlag(s >= MARKING);
+ }
+
Heap* heap_;
MarkCompactCollector::MarkingWorklist* marking_worklist_;
@@ -356,11 +364,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
size_t bytes_marked_ahead_of_schedule_;
size_t unscanned_bytes_of_large_object_;
- void SetState(State s) {
- state_ = s;
- heap_->SetIsMarkingFlag(s >= MARKING);
- }
-
// Must use SetState() above to update state_
State state_;
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 2023f1debb..e914ec1f6c 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -12,16 +12,9 @@
namespace v8 {
namespace internal {
-void MarkCompactCollector::PushBlack(HeapObject* obj) {
- DCHECK(non_atomic_marking_state()->IsBlack(obj));
- if (!marking_worklist()->Push(obj)) {
- non_atomic_marking_state()->BlackToGrey(obj);
- }
-}
-
void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) {
- if (non_atomic_marking_state()->WhiteToBlack(obj)) {
- PushBlack(obj);
+ if (atomic_marking_state()->WhiteToGrey(obj)) {
+ marking_worklist()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainer(host, obj);
}
@@ -29,8 +22,8 @@ void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) {
}
void MarkCompactCollector::MarkRootObject(Root root, HeapObject* obj) {
- if (non_atomic_marking_state()->WhiteToBlack(obj)) {
- PushBlack(obj);
+ if (atomic_marking_state()->WhiteToGrey(obj)) {
+ marking_worklist()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(root, obj);
}
@@ -38,8 +31,8 @@ void MarkCompactCollector::MarkRootObject(Root root, HeapObject* obj) {
}
void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject* obj) {
- if (non_atomic_marking_state()->WhiteToBlack(obj)) {
- PushBlack(obj);
+ if (atomic_marking_state()->WhiteToGrey(obj)) {
+ marking_worklist()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWrapperTracing, obj);
}
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 194415e949..3d28a18c7a 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -459,11 +459,10 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
}
void MarkCompactCollector::SetUp() {
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- marking_worklist()->SetUp();
+ DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
+ DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
+ DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern, "10"));
+ DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
}
void MinorMarkCompactCollector::SetUp() {}
@@ -471,7 +470,9 @@ void MinorMarkCompactCollector::SetUp() {}
void MarkCompactCollector::TearDown() {
AbortCompaction();
AbortWeakObjects();
- marking_worklist()->TearDown();
+ if (heap()->incremental_marking()->IsMarking()) {
+ marking_worklist()->Clear();
+ }
}
void MinorMarkCompactCollector::TearDown() {}
@@ -522,20 +523,11 @@ void MarkCompactCollector::CollectGarbage() {
heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
MarkLiveObjects();
-
- DCHECK(heap_->incremental_marking()->IsStopped());
-
ClearNonLiveReferences();
+ VerifyMarking();
RecordObjectStats();
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- FullMarkingVerifier verifier(heap());
- verifier.Run();
- }
-#endif
-
StartSweepSpaces();
Evacuate();
@@ -834,7 +826,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
? nullptr
: Page::FromAllocationAreaAddress(space->top());
for (Page* p : *space) {
- if (p->NeverEvacuate() || p == owner_of_linear_allocation_area) continue;
+ if (p->NeverEvacuate() || (p == owner_of_linear_allocation_area) ||
+ !p->CanAllocate())
+ continue;
// Invariant: Evacuation candidates are just created when marking is
// started. This means that sweeping has finished. Furthermore, at the end
// of a GC all evacuation candidates are cleared and their slot buffers are
@@ -981,19 +975,12 @@ void MarkCompactCollector::Prepare() {
// them here.
heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
- heap()->concurrent_marking()->EnsureCompleted();
- heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
-
-#ifdef VERIFY_HEAP
- heap()->old_space()->VerifyLiveBytes();
- heap()->map_space()->VerifyLiveBytes();
- heap()->code_space()->VerifyLiveBytes();
-#endif
-
// Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
heap()->incremental_marking()->Stop();
heap()->incremental_marking()->AbortBlackAllocation();
+ FinishConcurrentMarking();
+ heap()->incremental_marking()->Deactivate();
ClearMarkbits();
AbortWeakCollections();
AbortWeakObjects();
@@ -1028,6 +1015,28 @@ void MarkCompactCollector::Prepare() {
#endif
}
+void MarkCompactCollector::FinishConcurrentMarking() {
+ if (FLAG_concurrent_marking) {
+ heap()->concurrent_marking()->EnsureCompleted();
+ heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
+ }
+}
+
+void MarkCompactCollector::VerifyMarking() {
+ CHECK(marking_worklist()->IsEmpty());
+ DCHECK(heap_->incremental_marking()->IsStopped());
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ FullMarkingVerifier verifier(heap());
+ verifier.Run();
+ }
+#endif
+#ifdef VERIFY_HEAP
+ heap()->old_space()->VerifyLiveBytes();
+ heap()->map_space()->VerifyLiveBytes();
+ heap()->code_space()->VerifyLiveBytes();
+#endif
+}
void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
@@ -1123,7 +1132,7 @@ class MarkCompactMarkingVisitor final
// Marks the object black without pushing it on the marking stack. Returns
// true if object needed marking and false otherwise.
V8_INLINE bool MarkObjectWithoutPush(HeapObject* host, HeapObject* object) {
- if (collector_->non_atomic_marking_state()->WhiteToBlack(object)) {
+ if (collector_->atomic_marking_state()->WhiteToBlack(object)) {
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainer(host, object);
}
@@ -1168,7 +1177,6 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
if (!(*p)->IsHeapObject()) return;
collector_->MarkRootObject(root, HeapObject::cast(*p));
- collector_->EmptyMarkingWorklist();
}
MarkCompactCollector* const collector_;
@@ -1370,39 +1378,6 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
};
-
-// Fill the marking stack with overflowed objects returned by the given
-// iterator. Stop when the marking stack is filled or the end of the space
-// is reached, whichever comes first.
-template <class T>
-void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
- // The caller should ensure that the marking stack is initially not full,
- // so that we don't waste effort pointlessly scanning for objects.
- DCHECK(!marking_worklist()->IsFull());
-
- Map* filler_map = heap()->one_pointer_filler_map();
- for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
- if ((object->map() != filler_map) &&
- non_atomic_marking_state()->GreyToBlack(object)) {
- PushBlack(object);
- if (marking_worklist()->IsFull()) return;
- }
- }
-}
-
-void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
- DCHECK(!marking_worklist()->IsFull());
- for (auto object_and_size : LiveObjectRange<kGreyObjects>(
- p, non_atomic_marking_state()->bitmap(p))) {
- HeapObject* const object = object_and_size.first;
- bool success = non_atomic_marking_state()->GreyToBlack(object);
- DCHECK(success);
- USE(success);
- PushBlack(object);
- if (marking_worklist()->IsFull()) return;
- }
-}
-
class RecordMigratedSlotVisitor : public ObjectVisitor {
public:
explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
@@ -1811,23 +1786,6 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
Heap* heap_;
};
-void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
- for (Page* p : *space) {
- DiscoverGreyObjectsOnPage(p);
- if (marking_worklist()->IsFull()) return;
- }
-}
-
-
-void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
- NewSpace* space = heap()->new_space();
- for (Page* page : PageRange(space->bottom(), space->top())) {
- DiscoverGreyObjectsOnPage(page);
- if (marking_worklist()->IsFull()) return;
- }
-}
-
-
bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
Object* o = *p;
if (!o->IsHeapObject()) return false;
@@ -1842,10 +1800,9 @@ void MarkCompactCollector::MarkStringTable(
ObjectVisitor* custom_root_body_visitor) {
StringTable* string_table = heap()->string_table();
// Mark the string table itself.
- if (non_atomic_marking_state()->WhiteToBlack(string_table)) {
+ if (atomic_marking_state()->WhiteToBlack(string_table)) {
// Explicitly mark the prefix.
string_table->IteratePrefix(custom_root_body_visitor);
- ProcessMarkingWorklist();
}
}
@@ -1858,78 +1815,29 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
// Custom marking for string table and top optimized frame.
MarkStringTable(custom_root_body_visitor);
ProcessTopOptimizedFrame(custom_root_body_visitor);
-
- // There may be overflowed objects in the heap. Visit them now.
- while (marking_worklist()->overflowed()) {
- RefillMarkingWorklist();
- EmptyMarkingWorklist();
- }
}
-// Mark all objects reachable from the objects on the marking stack.
-// Before: the marking stack contains zero or more heap object pointers.
-// After: the marking stack is empty, and all objects reachable from the
-// marking stack have been marked, or are overflowed in the heap.
-void MarkCompactCollector::EmptyMarkingWorklist() {
+void MarkCompactCollector::ProcessMarkingWorklist() {
HeapObject* object;
MarkCompactMarkingVisitor visitor(this);
while ((object = marking_worklist()->Pop()) != nullptr) {
DCHECK(!object->IsFiller());
DCHECK(object->IsHeapObject());
DCHECK(heap()->Contains(object));
- DCHECK(!(non_atomic_marking_state()->IsWhite(object)));
-
+ DCHECK(!(atomic_marking_state()->IsWhite(object)));
+ atomic_marking_state()->GreyToBlack(object);
Map* map = object->map();
MarkObject(object, map);
visitor.Visit(map, object);
}
- DCHECK(marking_worklist()->IsEmpty());
-}
-
-
-// Sweep the heap for overflowed objects, clear their overflow bits, and
-// push them on the marking stack. Stop early if the marking stack fills
-// before sweeping completes. If sweeping completes, there are no remaining
-// overflowed objects in the heap so the overflow flag on the markings stack
-// is cleared.
-void MarkCompactCollector::RefillMarkingWorklist() {
- isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
- DCHECK(marking_worklist()->overflowed());
-
- DiscoverGreyObjectsInNewSpace();
- if (marking_worklist()->IsFull()) return;
-
- DiscoverGreyObjectsInSpace(heap()->old_space());
- if (marking_worklist()->IsFull()) return;
- DiscoverGreyObjectsInSpace(heap()->code_space());
- if (marking_worklist()->IsFull()) return;
- DiscoverGreyObjectsInSpace(heap()->map_space());
- if (marking_worklist()->IsFull()) return;
- LargeObjectIterator lo_it(heap()->lo_space());
- DiscoverGreyObjectsWithIterator(&lo_it);
- if (marking_worklist()->IsFull()) return;
-
- marking_worklist()->ClearOverflowed();
-}
-
-// Mark all objects reachable (transitively) from objects on the marking
-// stack. Before: the marking stack contains zero or more heap object
-// pointers. After: the marking stack is empty and there are no overflowed
-// objects in the heap.
-void MarkCompactCollector::ProcessMarkingWorklist() {
- EmptyMarkingWorklist();
- while (marking_worklist()->overflowed()) {
- RefillMarkingWorklist();
- EmptyMarkingWorklist();
- }
- DCHECK(marking_worklist()->IsEmpty());
+ DCHECK(marking_worklist()->IsBailoutEmpty());
}
// Mark all objects reachable (transitively) from objects on the marking
// stack including references only considered in the atomic marking pause.
void MarkCompactCollector::ProcessEphemeralMarking(
bool only_process_harmony_weak_collections) {
- DCHECK(marking_worklist()->IsEmpty() && !marking_worklist()->overflowed());
+ DCHECK(marking_worklist()->IsEmpty());
bool work_to_do = true;
while (work_to_do) {
if (!only_process_harmony_weak_collections) {
@@ -1959,7 +1867,7 @@ void MarkCompactCollector::ProcessEphemeralMarking(
void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
!it.done(); it.Advance()) {
- if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
+ if (it.frame()->type() == StackFrame::INTERPRETED) {
return;
}
if (it.frame()->type() == StackFrame::OPTIMIZED) {
@@ -1967,7 +1875,6 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
if (!code->CanDeoptAt(it.frame()->pc())) {
Code::BodyDescriptor::IterateBody(code, visitor);
}
- ProcessMarkingWorklist();
return;
}
}
@@ -2102,7 +2009,7 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
if (marking_state_->WhiteToGrey(object)) {
collector_->main_marking_visitor()->Visit(object);
- collector_->EmptyMarkingWorklist();
+ collector_->ProcessMarkingWorklist();
}
}
@@ -2293,12 +2200,12 @@ class GlobalHandlesMarkingItem : public MarkingItem {
: task_(task) {}
void VisitRootPointer(Root root, Object** p) override {
- DCHECK(Root::kGlobalHandles == root);
+ DCHECK_EQ(Root::kGlobalHandles, root);
task_->MarkObject(*p);
}
void VisitRootPointers(Root root, Object** start, Object** end) override {
- DCHECK(Root::kGlobalHandles == root);
+ DCHECK_EQ(Root::kGlobalHandles, root);
for (Object** p = start; p < end; p++) {
task_->MarkObject(*p);
}
@@ -2419,10 +2326,6 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
}
void MinorMarkCompactCollector::ProcessMarkingWorklist() {
- EmptyMarkingWorklist();
-}
-
-void MinorMarkCompactCollector::EmptyMarkingWorklist() {
MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
HeapObject* object = nullptr;
while (marking_worklist.Pop(&object)) {
@@ -2488,7 +2391,7 @@ void MinorMarkCompactCollector::MakeIterable(
// remove here.
MarkCompactCollector* full_collector = heap()->mark_compact_collector();
Address free_start = p->area_start();
- DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
+ DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
for (auto object_and_size :
LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
@@ -2563,6 +2466,8 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
void MinorMarkCompactCollector::EvacuateEpilogue() {
heap()->new_space()->set_age_mark(heap()->new_space()->top());
+ // Give pages that are queued to be freed back to the OS.
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
void MinorMarkCompactCollector::Evacuate() {
@@ -2588,9 +2493,6 @@ void MinorMarkCompactCollector::Evacuate() {
}
}
- // Give pages that are queued to be freed back to the OS.
- heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
-
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
for (Page* p : new_space_evacuation_pages_) {
@@ -2633,8 +2535,6 @@ void MarkCompactCollector::MarkLiveObjects() {
state_ = MARK_LIVE_OBJECTS;
#endif
- marking_worklist()->StartUsing();
-
heap_->local_embedder_heap_tracer()->EnterFinalPause();
RootMarkingVisitor root_visitor(this);
@@ -2646,8 +2546,21 @@ void MarkCompactCollector::MarkLiveObjects() {
}
{
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
+ if (FLAG_concurrent_marking) {
+ heap_->concurrent_marking()->RescheduleTasksIfNeeded();
+ }
+ ProcessMarkingWorklist();
+
+ FinishConcurrentMarking();
+ ProcessMarkingWorklist();
+ }
+
+ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
+ DCHECK(marking_worklist()->IsEmpty());
+
// The objects reachable from the roots are marked, yet unreachable
// objects are unmarked. Mark objects reachable due to host
// application specific logic or through Harmony weak maps.
@@ -2655,6 +2568,7 @@ void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
ProcessEphemeralMarking(false);
+ DCHECK(marking_worklist()->IsEmpty());
}
// The objects reachable from the roots, weak maps or object groups
@@ -2671,12 +2585,12 @@ void MarkCompactCollector::MarkLiveObjects() {
&IsUnmarkedHeapObject);
ProcessMarkingWorklist();
}
- // Then we mark the objects.
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
- heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
+ heap()->isolate()->global_handles()->IterateWeakRootsForFinalizers(
+ &root_visitor);
ProcessMarkingWorklist();
}
@@ -2692,8 +2606,18 @@ void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
heap()->local_embedder_heap_tracer()->TraceEpilogue();
}
+ DCHECK(marking_worklist()->IsEmpty());
+ }
+
+ {
+ heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
+ &IsUnmarkedHeapObject);
}
}
+
+ if (was_marked_incrementally_) {
+ heap()->incremental_marking()->Deactivate();
+ }
}
@@ -2876,12 +2800,8 @@ bool MarkCompactCollector::CompactTransitionArray(
RecordSlot(transitions, key_slot, key);
Object* raw_target = transitions->GetRawTarget(i);
transitions->SetTarget(transition_index, raw_target);
- // Maps are not compacted, but for cached handlers the target slot
- // must be recorded.
- if (!raw_target->IsMap()) {
- Object** target_slot = transitions->GetTargetSlot(transition_index);
- RecordSlot(transitions, target_slot, raw_target);
- }
+ Object** target_slot = transitions->GetTargetSlot(transition_index);
+ RecordSlot(transitions, target_slot, raw_target);
}
transition_index++;
}
@@ -2920,7 +2840,7 @@ void MarkCompactCollector::TrimDescriptorArray(Map* map,
to_trim * DescriptorArray::kEntrySize);
descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
- if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
+ TrimEnumCache(map, descriptors);
descriptors->Sort();
if (FLAG_unbox_double_fields) {
@@ -2942,16 +2862,17 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
live_enum = map->NumberOfEnumerableProperties();
}
if (live_enum == 0) return descriptors->ClearEnumCache();
+ EnumCache* enum_cache = descriptors->GetEnumCache();
- FixedArray* enum_cache = descriptors->GetEnumCache();
-
- int to_trim = enum_cache->length() - live_enum;
+ FixedArray* keys = enum_cache->keys();
+ int to_trim = keys->length() - live_enum;
if (to_trim <= 0) return;
- heap_->RightTrimFixedArray(descriptors->GetEnumCache(), to_trim);
+ heap_->RightTrimFixedArray(keys, to_trim);
- if (!descriptors->HasEnumIndicesCache()) return;
- FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
- heap_->RightTrimFixedArray(enum_indices_cache, to_trim);
+ FixedArray* indices = enum_cache->indices();
+ to_trim = indices->length() - live_enum;
+ if (to_trim <= 0) return;
+ heap_->RightTrimFixedArray(indices, to_trim);
}
@@ -3194,8 +3115,12 @@ void MarkCompactCollector::EvacuatePrologue() {
void MarkCompactCollector::EvacuateEpilogue() {
// New space.
heap()->new_space()->set_age_mark(heap()->new_space()->top());
+ // Deallocate unmarked large objects.
+ heap()->lo_space()->FreeUnmarkedObjects();
// Old space. Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
+ // Give pages that are queued to be freed back to the OS.
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
#ifdef DEBUG
// Old-to-old slot sets must be empty after evacuation.
for (Page* p : *heap()->old_space()) {
@@ -3645,7 +3570,7 @@ int MarkCompactCollector::Sweeper::RawSweep(
ArrayBufferTracker::FreeDead(p, marking_state_);
Address free_start = p->area_start();
- DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
+ DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
// If we use the skip list for code space pages, we have to lock the skip
// list because it could be accessed concurrently by the runtime or the
@@ -3933,9 +3858,9 @@ class UpdatingItem : public ItemParallelJob::Item {
virtual void Process() = 0;
};
-class PointersUpatingTask : public ItemParallelJob::Task {
+class PointersUpdatingTask : public ItemParallelJob::Task {
public:
- explicit PointersUpatingTask(Isolate* isolate)
+ explicit PointersUpdatingTask(Isolate* isolate)
: ItemParallelJob::Task(isolate) {}
void RunInParallel() override {
@@ -4300,7 +4225,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpatingTask(isolate()));
+ updating_job.AddTask(new PointersUpdatingTask(isolate()));
}
updating_job.Run();
}
@@ -4322,7 +4247,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
remembered_set_pages, old_to_new_slots_);
if (num_tasks > 0) {
for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpatingTask(isolate()));
+ updating_job.AddTask(new PointersUpdatingTask(isolate()));
}
updating_job.Run();
}
@@ -4370,7 +4295,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
remembered_set_pages, old_to_new_slots_);
const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpatingTask(isolate()));
+ updating_job.AddTask(new PointersUpdatingTask(isolate()));
}
{
@@ -4464,7 +4389,6 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
}
old_space_evacuation_pages_.clear();
compacting_ = false;
- heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
@@ -4633,9 +4557,6 @@ void MarkCompactCollector::StartSweepSpaces() {
}
sweeper().StartSweeping();
}
-
- // Deallocate unmarked large objects.
- heap_->lo_space()->FreeUnmarkedObjects();
}
} // namespace internal
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index a6b6ead8de..1784a32e16 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -9,7 +9,6 @@
#include <vector>
#include "src/heap/marking.h"
-#include "src/heap/sequential-marking-deque.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
@@ -62,14 +61,6 @@ class MarkingStateBase {
return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
}
- V8_INLINE bool BlackToGrey(HeapObject* obj) {
- MemoryChunk* p = MemoryChunk::FromAddress(obj->address());
- MarkBit markbit = MarkBitFrom(p, obj->address());
- if (!Marking::BlackToGrey<access_mode>(markbit)) return false;
- static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, -obj->Size());
- return true;
- }
-
V8_INLINE bool WhiteToGrey(HeapObject* obj) {
return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
}
@@ -274,8 +265,7 @@ class MarkCompactCollectorBase {
// Marking operations for objects reachable from roots.
virtual void MarkLiveObjects() = 0;
// Mark objects reachable (transitively) from objects in the marking
- // stack.
- virtual void EmptyMarkingWorklist() = 0;
+ // work list.
virtual void ProcessMarkingWorklist() = 0;
// Clear non-live references held in side data structures.
virtual void ClearNonLiveReferences() = 0;
@@ -401,7 +391,6 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void MarkLiveObjects() override;
void MarkRootSetInParallel();
void ProcessMarkingWorklist() override;
- void EmptyMarkingWorklist() override;
void ClearNonLiveReferences() override;
void EvacuatePrologue() override;
@@ -487,6 +476,7 @@ struct WeakObjects {
// Collector for young and old generation.
class MarkCompactCollector final : public MarkCompactCollectorBase {
public:
+ using AtomicMarkingState = MajorAtomicMarkingState;
using NonAtomicMarkingState = MajorNonAtomicMarkingState;
static const int kMainThread = 0;
@@ -498,10 +488,16 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// The heap parameter is not used but needed to match the sequential case.
explicit MarkingWorklist(Heap* heap) {}
- bool Push(HeapObject* object) { return shared_.Push(kMainThread, object); }
+ void Push(HeapObject* object) {
+ bool success = shared_.Push(kMainThread, object);
+ USE(success);
+ DCHECK(success);
+ }
- bool PushBailout(HeapObject* object) {
- return bailout_.Push(kMainThread, object);
+ void PushBailout(HeapObject* object) {
+ bool success = bailout_.Push(kMainThread, object);
+ USE(success);
+ DCHECK(success);
}
HeapObject* Pop() {
@@ -510,25 +506,34 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
if (bailout_.Pop(kMainThread, &result)) return result;
#endif
if (shared_.Pop(kMainThread, &result)) return result;
+#ifdef V8_CONCURRENT_MARKING
+ // The expectation is that this work list is empty almost all the time
+ // and we can thus avoid the emptiness checks by putting it last.
+ if (on_hold_.Pop(kMainThread, &result)) return result;
+#endif
return nullptr;
}
void Clear() {
bailout_.Clear();
shared_.Clear();
+ on_hold_.Clear();
}
- bool IsFull() { return false; }
+ bool IsBailoutEmpty() { return bailout_.IsLocalEmpty(kMainThread); }
bool IsEmpty() {
return bailout_.IsLocalEmpty(kMainThread) &&
shared_.IsLocalEmpty(kMainThread) &&
- bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty();
+ on_hold_.IsLocalEmpty(kMainThread) &&
+ bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty() &&
+ on_hold_.IsGlobalPoolEmpty();
}
int Size() {
return static_cast<int>(bailout_.LocalSize(kMainThread) +
- shared_.LocalSize(kMainThread));
+ shared_.LocalSize(kMainThread) +
+ on_hold_.LocalSize(kMainThread));
}
// Calls the specified callback on each element of the deques and replaces
@@ -539,24 +544,17 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void Update(Callback callback) {
bailout_.Update(callback);
shared_.Update(callback);
+ on_hold_.Update(callback);
}
ConcurrentMarkingWorklist* shared() { return &shared_; }
ConcurrentMarkingWorklist* bailout() { return &bailout_; }
-
- // These empty functions are needed to match the interface
- // of the sequential marking deque.
- void SetUp() {}
- void TearDown() { Clear(); }
- void StartUsing() {}
- void StopUsing() {}
- void ClearOverflowed() {}
- void SetOverflowed() {}
- bool overflowed() const { return false; }
+ ConcurrentMarkingWorklist* on_hold() { return &on_hold_; }
void Print() {
PrintWorklist("shared", &shared_);
PrintWorklist("bailout", &bailout_);
+ PrintWorklist("on_hold", &on_hold_);
}
private:
@@ -586,6 +584,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
}
ConcurrentMarkingWorklist shared_;
ConcurrentMarkingWorklist bailout_;
+ ConcurrentMarkingWorklist on_hold_;
};
class RootMarkingVisitor;
@@ -672,6 +671,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
kClearMarkbits,
};
+ AtomicMarkingState* atomic_marking_state() { return &atomic_marking_state_; }
+
NonAtomicMarkingState* non_atomic_marking_state() {
return &non_atomic_marking_state_;
}
@@ -689,6 +690,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// choosing spaces to compact.
void Prepare();
+ void FinishConcurrentMarking();
+
bool StartCompaction();
void AbortCompaction();
@@ -748,6 +751,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
#endif
+ void VerifyMarking();
#ifdef VERIFY_HEAP
void VerifyValidStoreAndSlotsBufferEntries();
void VerifyMarkbitsAreClean();
@@ -774,9 +778,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void MarkLiveObjects() override;
- // Pushes a black object onto the marking work list.
- V8_INLINE void PushBlack(HeapObject* obj);
-
// Marks the object black and adds it to the marking work list.
// This is for non-incremental marking only.
V8_INLINE void MarkObject(HeapObject* host, HeapObject* obj);
@@ -796,8 +797,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// the string table are weak.
void MarkStringTable(ObjectVisitor* visitor);
- void ProcessMarkingWorklist() override;
-
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap. This respects references only considered in
// the final atomic marking pause including the following:
@@ -814,22 +813,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Collects a list of dependent code from maps embedded in optimize code.
DependentCode* DependentCodeListFromNonLiveMaps();
- // This function empties the marking stack, but may leave overflowed objects
- // in the heap, in which case the marking stack's overflow flag will be set.
- void EmptyMarkingWorklist() override;
-
- // Refill the marking stack with overflowed objects from the heap. This
- // function either leaves the marking stack full or clears the overflow
- // flag on the marking stack.
- void RefillMarkingWorklist();
-
- // Helper methods for refilling the marking stack by discovering grey objects
- // on various pages of the heap. Used by {RefillMarkingWorklist} only.
- template <class T>
- void DiscoverGreyObjectsWithIterator(T* it);
- void DiscoverGreyObjectsOnPage(MemoryChunk* p);
- void DiscoverGreyObjectsInSpace(PagedSpace* space);
- void DiscoverGreyObjectsInNewSpace();
+ // Drains the main thread marking work list. Will mark all pending objects
+ // if no concurrent threads are running.
+ void ProcessMarkingWorklist() override;
// Callback function for telling whether the object *p is an unmarked
// heap object.
@@ -943,6 +929,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
Sweeper sweeper_;
+ AtomicMarkingState atomic_marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
friend class FullEvacuator;
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
index c76302218f..9b1fe61236 100644
--- a/deps/v8/src/heap/marking.h
+++ b/deps/v8/src/heap/marking.h
@@ -267,13 +267,6 @@ class Marking : public AllStatic {
}
template <AccessMode mode = AccessMode::NON_ATOMIC>
- INLINE(static bool BlackToGrey(MarkBit markbit)) {
- STATIC_ASSERT(mode == AccessMode::NON_ATOMIC);
- DCHECK(IsBlack(markbit));
- return markbit.Next().Clear<mode>();
- }
-
- template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool WhiteToGrey(MarkBit markbit)) {
return markbit.Set<mode>();
}
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 0e1449bb92..a269873024 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -201,7 +201,7 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
- DCHECK(delay_ms > 0);
+ DCHECK_LT(0, delay_ms);
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 84d1f61859..0ffe75c84a 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -9,7 +9,6 @@
#include "src/counters.h"
#include "src/heap/heap-inl.h"
#include "src/isolate.h"
-#include "src/objects/code-cache-inl.h"
#include "src/objects/compilation-cache-inl.h"
#include "src/utils.h"
@@ -432,24 +431,10 @@ void ObjectStatsCollector::RecordMapDetails(Map* map_obj) {
if (map_obj->owns_descriptors() && array != heap_->empty_descriptor_array() &&
SameLiveness(map_obj, array)) {
RecordFixedArrayHelper(map_obj, array, DESCRIPTOR_ARRAY_SUB_TYPE, 0);
- if (array->HasEnumCache()) {
- RecordFixedArrayHelper(array, array->GetEnumCache(), ENUM_CACHE_SUB_TYPE,
- 0);
- }
- if (array->HasEnumIndicesCache()) {
- RecordFixedArrayHelper(array, array->GetEnumIndicesCache(),
- ENUM_INDICES_CACHE_SUB_TYPE, 0);
- }
- }
-
- FixedArray* code_cache = map_obj->code_cache();
- if (code_cache->length() > 0) {
- if (code_cache->IsCodeCacheHashTable()) {
- RecordHashTableHelper(map_obj, CodeCacheHashTable::cast(code_cache),
- MAP_CODE_CACHE_SUB_TYPE);
- } else {
- RecordFixedArrayHelper(map_obj, code_cache, MAP_CODE_CACHE_SUB_TYPE, 0);
- }
+ EnumCache* enum_cache = array->GetEnumCache();
+ RecordFixedArrayHelper(array, enum_cache->keys(), ENUM_CACHE_SUB_TYPE, 0);
+ RecordFixedArrayHelper(array, enum_cache->indices(),
+ ENUM_INDICES_CACHE_SUB_TYPE, 0);
}
for (DependentCode* cur_dependent_code = map_obj->dependent_code();
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 64532e74bb..dbd1e3b370 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -194,23 +194,10 @@ template <typename ConcreteVisitor>
int MarkingVisitor<ConcreteVisitor>::VisitTransitionArray(
Map* map, TransitionArray* array) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- // Visit strong references.
- if (array->HasPrototypeTransitions()) {
- visitor->VisitPointer(array, array->GetPrototypeTransitionsSlot());
- }
- int num_transitions = array->number_of_entries();
- for (int i = 0; i < num_transitions; ++i) {
- visitor->VisitPointer(array, array->GetKeySlot(i));
- // A TransitionArray can hold maps or (transitioning StoreIC) handlers.
- // Maps have custom weak handling; handlers (which in turn weakly point
- // to maps) are marked strongly for now, and will be cleared during
- // compaction when the maps they refer to are dead.
- if (!array->GetRawTarget(i)->IsMap()) {
- visitor->VisitPointer(array, array->GetTargetSlot(i));
- }
- }
+ int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
+ TransitionArray::BodyDescriptor::IterateBody(array, size, visitor);
collector_->AddTransitionArray(array);
- return TransitionArray::BodyDescriptor::SizeOf(map, array);
+ return size;
}
template <typename ConcreteVisitor>
@@ -329,11 +316,6 @@ template <typename ConcreteVisitor>
int MarkingVisitor<ConcreteVisitor>::VisitMap(Map* map, Map* object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- // Clears the cache of ICs related to this map.
- if (FLAG_cleanup_code_caches_at_gc) {
- object->ClearCodeCache(heap_);
- }
-
// When map collection is enabled we have to mark through map's transitions
// and back pointers in a special way to make these links weak.
if (object->CanTransition()) {
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index c7fb313ff6..93bbd0f524 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -83,25 +83,6 @@ static void ClearWeakList(Heap* heap, Object* list) {
}
}
-
-template <>
-struct WeakListVisitor<JSFunction> {
- static void SetWeakNext(JSFunction* function, Object* next) {
- function->set_next_function_link(next, UPDATE_WEAK_WRITE_BARRIER);
- }
-
- static Object* WeakNext(JSFunction* function) {
- return function->next_function_link();
- }
-
- static int WeakNextOffset() { return JSFunction::kNextFunctionLinkOffset; }
-
- static void VisitLiveObject(Heap*, JSFunction*, WeakObjectRetainer*) {}
-
- static void VisitPhantomObject(Heap*, JSFunction*) {}
-};
-
-
template <>
struct WeakListVisitor<Code> {
static void SetWeakNext(Code* code, Object* next) {
@@ -134,10 +115,6 @@ struct WeakListVisitor<Context> {
static void VisitLiveObject(Heap* heap, Context* context,
WeakObjectRetainer* retainer) {
- // Process the three weak lists linked off the context.
- DoWeakList<JSFunction>(heap, context, retainer,
- Context::OPTIMIZED_FUNCTIONS_LIST);
-
if (heap->gc_state() == Heap::MARK_COMPACT) {
// Record the slots of the weak entries in the native context.
for (int idx = Context::FIRST_WEAK_SLOT;
@@ -146,8 +123,7 @@ struct WeakListVisitor<Context> {
MarkCompactCollector::RecordSlot(context, slot, *slot);
}
// Code objects are always allocated in Code space, we do not have to
- // visit
- // them during scavenges.
+ // visit them during scavenges.
DoWeakList<Code>(heap, context, retainer, Context::OPTIMIZED_CODE_LIST);
DoWeakList<Code>(heap, context, retainer, Context::DEOPTIMIZED_CODE_LIST);
}
@@ -171,8 +147,6 @@ struct WeakListVisitor<Context> {
}
static void VisitPhantomObject(Heap* heap, Context* context) {
- ClearWeakList<JSFunction>(heap,
- context->get(Context::OPTIMIZED_FUNCTIONS_LIST));
ClearWeakList<Code>(heap, context->get(Context::OPTIMIZED_CODE_LIST));
ClearWeakList<Code>(heap, context->get(Context::DEOPTIMIZED_CODE_LIST));
}
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index b0befeb2f0..01708e7655 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -15,8 +15,11 @@
namespace v8 {
namespace internal {
+class BigInt;
+
#define TYPED_VISITOR_ID_LIST(V) \
V(AllocationSite) \
+ V(BigInt) \
V(ByteArray) \
V(BytecodeArray) \
V(Cell) \
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index dd74b47945..1ea2f3493c 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -11,10 +11,8 @@
namespace v8 {
namespace internal {
-namespace {
-
// White list for objects that for sure only contain data.
-bool ContainsOnlyData(VisitorId visitor_id) {
+bool Scavenger::ContainsOnlyData(VisitorId visitor_id) {
switch (visitor_id) {
case kVisitSeqOneByteString:
return true;
@@ -32,8 +30,6 @@ bool ContainsOnlyData(VisitorId visitor_id) {
return false;
}
-} // namespace
-
void Scavenger::PageMemoryFence(Object* object) {
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index ac55d011a0..fc70f60483 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -4,6 +4,7 @@
#include "src/heap/scavenger.h"
+#include "src/heap/barrier.h"
#include "src/heap/heap-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/objects-visiting-inl.h"
@@ -72,26 +73,20 @@ Scavenger::Scavenger(Heap* heap, bool is_logging, CopiedList* copied_list,
is_compacting_(heap->incremental_marking()->IsCompacting()) {}
void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
- // We are not collecting slots on new space objects during mutation
- // thus we have to scan for pointers to evacuation candidates when we
- // promote objects. But we should not record any slots in non-black
- // objects. Grey object's slots would be rescanned.
- // White object might not survive until the end of collection
- // it would be a violation of the invariant to record it's slots.
+ // We are not collecting slots on new space objects during mutation thus we
+ // have to scan for pointers to evacuation candidates when we promote
+ // objects. But we should not record any slots in non-black objects. Grey
+ // object's slots would be rescanned. White object might not survive until
+ // the end of collection it would be a violation of the invariant to record
+ // its slots.
const bool record_slots =
is_compacting_ &&
heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
IterateAndScavengePromotedObjectsVisitor visitor(heap(), this, record_slots);
- if (target->IsJSFunction()) {
- // JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for
- // this links are recorded during processing of weak lists.
- JSFunction::BodyDescriptorWeak::IterateBody(target, size, &visitor);
- } else {
- target->IterateBody(target->map()->instance_type(), size, &visitor);
- }
+ target->IterateBody(target->map()->instance_type(), size, &visitor);
}
-void Scavenger::Process(Barrier* barrier) {
+void Scavenger::Process(OneshotBarrier* barrier) {
// Threshold when to switch processing the promotion list to avoid
// allocating too much backing store in the worklist.
const int kProcessPromotionListThreshold = kPromotionListSegmentSize / 2;
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 4f80a25357..1437092874 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -14,54 +14,16 @@
namespace v8 {
namespace internal {
-static const int kCopiedListSegmentSize = 256;
-static const int kPromotionListSegmentSize = 256;
-
-using AddressRange = std::pair<Address, Address>;
-using ObjectAndSize = std::pair<HeapObject*, int>;
-using CopiedList = Worklist<ObjectAndSize, kCopiedListSegmentSize>;
-using PromotionList = Worklist<ObjectAndSize, kPromotionListSegmentSize>;
+class OneshotBarrier;
class Scavenger {
public:
- class Barrier {
- public:
- Barrier() : tasks_(0), waiting_(0), done_(false) {}
-
- void Start() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- tasks_++;
- }
-
- void NotifyAll() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- if (waiting_ > 0) condition_.NotifyAll();
- }
-
- void Wait() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- waiting_++;
- if (waiting_ == tasks_) {
- done_ = true;
- condition_.NotifyAll();
- } else {
- // Spurious wakeup is ok here.
- condition_.Wait(&mutex_);
- }
- waiting_--;
- }
-
- void Reset() { done_ = false; }
-
- bool Done() { return done_; }
-
- private:
- base::ConditionVariable condition_;
- base::Mutex mutex_;
- int tasks_;
- int waiting_;
- bool done_;
- };
+ static const int kCopiedListSegmentSize = 256;
+ static const int kPromotionListSegmentSize = 256;
+
+ using ObjectAndSize = std::pair<HeapObject*, int>;
+ using CopiedList = Worklist<ObjectAndSize, kCopiedListSegmentSize>;
+ using PromotionList = Worklist<ObjectAndSize, kPromotionListSegmentSize>;
Scavenger(Heap* heap, bool is_logging, CopiedList* copied_list,
PromotionList* promotion_list, int task_id);
@@ -77,7 +39,7 @@ class Scavenger {
// Processes remaining work (=objects) after single objects have been
// manually scavenged using ScavengeObject or CheckAndScavengeObject.
- void Process(Barrier* barrier = nullptr);
+ void Process(OneshotBarrier* barrier = nullptr);
// Finalize the Scavenger. Needs to be called from the main thread.
void Finalize();
@@ -130,6 +92,8 @@ class Scavenger {
void RecordCopiedObject(HeapObject* obj);
+ static inline bool ContainsOnlyData(VisitorId visitor_id);
+
Heap* const heap_;
PromotionList::View promotion_list_;
CopiedList::View copied_list_;
diff --git a/deps/v8/src/heap/sequential-marking-deque.cc b/deps/v8/src/heap/sequential-marking-deque.cc
deleted file mode 100644
index 4f3edb0e69..0000000000
--- a/deps/v8/src/heap/sequential-marking-deque.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/sequential-marking-deque.h"
-
-#include "src/allocation.h"
-#include "src/base/bits.h"
-#include "src/heap/heap-inl.h"
-#include "src/heap/heap.h"
-
-namespace v8 {
-namespace internal {
-
-void SequentialMarkingDeque::SetUp() {
- base::VirtualMemory reservation;
- if (!AllocVirtualMemory(kMaxSize, heap_->GetRandomMmapAddr(), &reservation)) {
- V8::FatalProcessOutOfMemory("SequentialMarkingDeque::SetUp");
- }
- backing_store_committed_size_ = 0;
- backing_store_.TakeControl(&reservation);
-}
-
-void SequentialMarkingDeque::TearDown() {
- if (backing_store_.IsReserved()) backing_store_.Release();
-}
-
-void SequentialMarkingDeque::StartUsing() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- if (in_use_) {
- // This can happen in mark-compact GC if the incremental marker already
- // started using the marking deque.
- return;
- }
- in_use_ = true;
- EnsureCommitted();
- array_ = reinterpret_cast<HeapObject**>(backing_store_.address());
- size_t size = FLAG_force_marking_deque_overflows
- ? 64 * kPointerSize
- : backing_store_committed_size_;
- DCHECK(base::bits::IsPowerOfTwo(static_cast<uint32_t>(size / kPointerSize)));
- mask_ = static_cast<int>((size / kPointerSize) - 1);
- top_ = bottom_ = 0;
- overflowed_ = false;
-}
-
-void SequentialMarkingDeque::StopUsing() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- if (!in_use_) return;
- DCHECK(IsEmpty());
- DCHECK(!overflowed_);
- top_ = bottom_ = mask_ = 0;
- in_use_ = false;
- if (FLAG_concurrent_sweeping) {
- StartUncommitTask();
- } else {
- Uncommit();
- }
-}
-
-void SequentialMarkingDeque::Clear() {
- DCHECK(in_use_);
- top_ = bottom_ = 0;
- overflowed_ = false;
-}
-
-void SequentialMarkingDeque::Uncommit() {
- DCHECK(!in_use_);
- bool success = backing_store_.Uncommit(backing_store_.address(),
- backing_store_committed_size_);
- backing_store_committed_size_ = 0;
- CHECK(success);
-}
-
-void SequentialMarkingDeque::EnsureCommitted() {
- DCHECK(in_use_);
- if (backing_store_committed_size_ > 0) return;
-
- for (size_t size = kMaxSize; size >= kMinSize; size /= 2) {
- if (backing_store_.Commit(backing_store_.address(), size, false)) {
- backing_store_committed_size_ = size;
- break;
- }
- }
- if (backing_store_committed_size_ == 0) {
- V8::FatalProcessOutOfMemory("SequentialMarkingDeque::EnsureCommitted");
- }
-}
-
-void SequentialMarkingDeque::StartUncommitTask() {
- if (!uncommit_task_pending_) {
- uncommit_task_pending_ = true;
- UncommitTask* task = new UncommitTask(heap_->isolate(), this);
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/heap/sequential-marking-deque.h b/deps/v8/src/heap/sequential-marking-deque.h
deleted file mode 100644
index 670a12ca0e..0000000000
--- a/deps/v8/src/heap/sequential-marking-deque.h
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_SEQUENTIAL_MARKING_DEQUE_
-#define V8_HEAP_SEQUENTIAL_MARKING_DEQUE_
-
-#include <deque>
-
-#include "src/base/platform/mutex.h"
-#include "src/base/platform/platform.h"
-#include "src/cancelable-task.h"
-
-namespace v8 {
-namespace internal {
-
-class Heap;
-class Isolate;
-class HeapObject;
-
-// ----------------------------------------------------------------------------
-// Marking deque for tracing live objects.
-class SequentialMarkingDeque {
- public:
- explicit SequentialMarkingDeque(Heap* heap)
- : backing_store_committed_size_(0),
- array_(nullptr),
- top_(0),
- bottom_(0),
- mask_(0),
- overflowed_(false),
- in_use_(false),
- uncommit_task_pending_(false),
- heap_(heap) {}
-
- void SetUp();
- void TearDown();
-
- // Ensures that the marking deque is committed and will stay committed until
- // StopUsing() is called.
- void StartUsing();
- void StopUsing();
- void Clear();
-
- inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
-
- inline bool IsEmpty() { return top_ == bottom_; }
-
- int Size() {
- // Return (top - bottom + capacity) % capacity, where capacity = mask + 1.
- return (top_ - bottom_ + mask_ + 1) & mask_;
- }
-
- bool overflowed() const { return overflowed_; }
-
- void ClearOverflowed() { overflowed_ = false; }
-
- void SetOverflowed() { overflowed_ = true; }
-
- // Push the object on the marking stack if there is room, otherwise mark the
- // deque as overflowed and wait for a rescan of the heap.
- INLINE(bool Push(HeapObject* object)) {
- if (IsFull()) {
- SetOverflowed();
- return false;
- } else {
- array_[top_] = object;
- top_ = ((top_ + 1) & mask_);
- return true;
- }
- }
-
- INLINE(HeapObject* Pop()) {
- if (IsEmpty()) return nullptr;
- top_ = ((top_ - 1) & mask_);
- HeapObject* object = array_[top_];
- return object;
- }
-
- // Calls the specified callback on each element of the deque and replaces
- // the element with the result of the callback. If the callback returns
- // nullptr then the element is removed from the deque.
- // The callback must accept HeapObject* and return HeapObject*.
- template <typename Callback>
- void Update(Callback callback) {
- int i = bottom_;
- int new_top = bottom_;
- while (i != top_) {
- if (callback(array_[i], &array_[new_top])) {
- new_top = (new_top + 1) & mask_;
- }
- i = (i + 1) & mask_;
- }
- top_ = new_top;
- }
-
- private:
- // This task uncommits the marking_deque backing store if
- // markin_deque->in_use_ is false.
- class UncommitTask : public CancelableTask {
- public:
- explicit UncommitTask(Isolate* isolate,
- SequentialMarkingDeque* marking_deque)
- : CancelableTask(isolate), marking_deque_(marking_deque) {}
-
- private:
- // CancelableTask override.
- void RunInternal() override {
- base::LockGuard<base::Mutex> guard(&marking_deque_->mutex_);
- if (!marking_deque_->in_use_) {
- marking_deque_->Uncommit();
- }
- marking_deque_->uncommit_task_pending_ = false;
- }
-
- SequentialMarkingDeque* marking_deque_;
- DISALLOW_COPY_AND_ASSIGN(UncommitTask);
- };
-
- static const size_t kMaxSize = 4 * MB;
- static const size_t kMinSize = 256 * KB;
-
- // Must be called with mutex lock.
- void EnsureCommitted();
-
- // Must be called with mutex lock.
- void Uncommit();
-
- // Must be called with mutex lock.
- void StartUncommitTask();
-
- base::Mutex mutex_;
-
- base::VirtualMemory backing_store_;
- size_t backing_store_committed_size_;
- HeapObject** array_;
- // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
- // empty when top_ == bottom_. It is full when top_ + 1 == bottom
- // (mod mask + 1).
- int top_;
- int bottom_;
- int mask_;
- bool overflowed_;
- // in_use_ == true after taking mutex lock implies that the marking deque is
- // committed and will stay committed at least until in_use_ == false.
- bool in_use_;
- bool uncommit_task_pending_;
- Heap* heap_;
-
- DISALLOW_COPY_AND_ASSIGN(SequentialMarkingDeque);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SEQUENTIAL_MARKING_DEQUE_
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
new file mode 100644
index 0000000000..592fb53a7f
--- /dev/null
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -0,0 +1,629 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/setup-isolate.h"
+
+#include "src/ast/context-slot-cache.h"
+#include "src/compilation-cache.h"
+#include "src/contexts.h"
+#include "src/factory.h"
+#include "src/heap-symbols.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/layout-descriptor.h"
+#include "src/lookup-cache.h"
+#include "src/objects-inl.h"
+#include "src/objects/arguments.h"
+#include "src/objects/debug-objects.h"
+#include "src/objects/descriptor-array.h"
+#include "src/objects/dictionary.h"
+#include "src/objects/map.h"
+#include "src/objects/module.h"
+#include "src/objects/script.h"
+#include "src/objects/shared-function-info.h"
+#include "src/objects/string.h"
+#include "src/regexp/jsregexp.h"
+
+namespace v8 {
+namespace internal {
+
+bool SetupIsolateDelegate::SetupHeapInternal(Heap* heap) {
+ return heap->CreateHeapObjects();
+}
+
+bool Heap::CreateHeapObjects() {
+ // Create initial maps.
+ if (!CreateInitialMaps()) return false;
+ if (!CreateApiObjects()) return false;
+
+ // Create initial objects
+ CreateInitialObjects();
+ CHECK_EQ(0u, gc_count_);
+
+ set_native_contexts_list(undefined_value());
+ set_allocation_sites_list(undefined_value());
+
+ return true;
+}
+
+const Heap::StringTypeTable Heap::string_type_table[] = {
+#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
+ {type, size, k##camel_name##MapRootIndex},
+ STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
+#undef STRING_TYPE_ELEMENT
+};
+
+const Heap::ConstantStringTable Heap::constant_string_table[] = {
+ {"", kempty_stringRootIndex},
+#define CONSTANT_STRING_ELEMENT(name, contents) {contents, k##name##RootIndex},
+ INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
+#undef CONSTANT_STRING_ELEMENT
+};
+
+const Heap::StructTable Heap::struct_table[] = {
+#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
+ {NAME##_TYPE, Name::kSize, k##Name##MapRootIndex},
+ STRUCT_LIST(STRUCT_TABLE_ELEMENT)
+#undef STRUCT_TABLE_ELEMENT
+};
+
+namespace {
+
+void FinalizePartialMap(Heap* heap, Map* map) {
+ map->set_dependent_code(DependentCode::cast(heap->empty_fixed_array()));
+ map->set_raw_transitions(Smi::kZero);
+ map->set_instance_descriptors(heap->empty_descriptor_array());
+ if (FLAG_unbox_double_fields) {
+ map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ }
+ map->set_prototype(heap->null_value());
+ map->set_constructor_or_backpointer(heap->null_value());
+}
+
+} // namespace
+
+bool Heap::CreateInitialMaps() {
+ HeapObject* obj = nullptr;
+ {
+ AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
+ if (!allocation.To(&obj)) return false;
+ }
+ // Map::cast cannot be used due to uninitialized map field.
+ Map* new_meta_map = reinterpret_cast<Map*>(obj);
+ set_meta_map(new_meta_map);
+ new_meta_map->set_map_after_allocation(new_meta_map);
+
+ { // Partial map allocation
+#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
+ { \
+ Map* map; \
+ if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
+ set_##field_name##_map(map); \
+ }
+
+ ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
+ ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel,
+ fixed_cow_array)
+ DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
+
+ ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
+ ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
+ ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
+
+#undef ALLOCATE_PARTIAL_MAP
+ }
+
+ // Allocate the empty array.
+ {
+ AllocationResult allocation = AllocateEmptyFixedArray();
+ if (!allocation.To(&obj)) return false;
+ }
+ set_empty_fixed_array(FixedArray::cast(obj));
+
+ {
+ AllocationResult allocation = Allocate(null_map(), OLD_SPACE);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_null_value(Oddball::cast(obj));
+ Oddball::cast(obj)->set_kind(Oddball::kNull);
+
+ {
+ AllocationResult allocation = Allocate(undefined_map(), OLD_SPACE);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_undefined_value(Oddball::cast(obj));
+ Oddball::cast(obj)->set_kind(Oddball::kUndefined);
+ DCHECK(!InNewSpace(undefined_value()));
+ {
+ AllocationResult allocation = Allocate(the_hole_map(), OLD_SPACE);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_the_hole_value(Oddball::cast(obj));
+ Oddball::cast(obj)->set_kind(Oddball::kTheHole);
+
+ // Set preliminary exception sentinel value before actually initializing it.
+ set_exception(null_value());
+
+ // Setup the struct maps first (needed for the EnumCache).
+ for (unsigned i = 0; i < arraysize(struct_table); i++) {
+ const StructTable& entry = struct_table[i];
+ Map* map;
+ if (!AllocatePartialMap(entry.type, entry.size).To(&map)) return false;
+ roots_[entry.index] = map;
+ }
+
+ // Allocate the empty enum cache.
+ {
+ AllocationResult allocation = Allocate(tuple2_map(), OLD_SPACE);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_empty_enum_cache(EnumCache::cast(obj));
+ EnumCache::cast(obj)->set_keys(empty_fixed_array());
+ EnumCache::cast(obj)->set_indices(empty_fixed_array());
+
+ // Allocate the empty descriptor array.
+ {
+ AllocationResult allocation =
+ AllocateUninitializedFixedArray(DescriptorArray::kFirstIndex, TENURED);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_empty_descriptor_array(DescriptorArray::cast(obj));
+ DescriptorArray::cast(obj)->set(DescriptorArray::kDescriptorLengthIndex,
+ Smi::kZero);
+ DescriptorArray::cast(obj)->set(DescriptorArray::kEnumCacheIndex,
+ empty_enum_cache());
+
+ // Fix the instance_descriptors for the existing maps.
+ FinalizePartialMap(this, meta_map());
+ FinalizePartialMap(this, fixed_array_map());
+ FinalizePartialMap(this, fixed_cow_array_map());
+ FinalizePartialMap(this, undefined_map());
+ undefined_map()->set_is_undetectable();
+ FinalizePartialMap(this, null_map());
+ null_map()->set_is_undetectable();
+ FinalizePartialMap(this, the_hole_map());
+ for (unsigned i = 0; i < arraysize(struct_table); ++i) {
+ const StructTable& entry = struct_table[i];
+ FinalizePartialMap(this, Map::cast(roots_[entry.index]));
+ }
+
+ { // Map allocation
+#define ALLOCATE_MAP(instance_type, size, field_name) \
+ { \
+ Map* map; \
+ if (!AllocateMap((instance_type), size).To(&map)) return false; \
+ set_##field_name##_map(map); \
+ }
+
+#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
+ ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
+
+#define ALLOCATE_PRIMITIVE_MAP(instance_type, size, field_name, \
+ constructor_function_index) \
+ { \
+ ALLOCATE_MAP((instance_type), (size), field_name); \
+ field_name##_map()->SetConstructorFunctionIndex( \
+ (constructor_function_index)); \
+ }
+
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
+ ALLOCATE_VARSIZE_MAP(FEEDBACK_VECTOR_TYPE, feedback_vector)
+ ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
+ Context::NUMBER_FUNCTION_INDEX)
+ ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
+ mutable_heap_number)
+ ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
+ Context::SYMBOL_FUNCTION_INDEX)
+ ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
+
+ ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
+ Context::BOOLEAN_FUNCTION_INDEX);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, stale_register);
+ ALLOCATE_VARSIZE_MAP(BIGINT_TYPE, bigint);
+
+ for (unsigned i = 0; i < arraysize(string_type_table); i++) {
+ const StringTypeTable& entry = string_type_table[i];
+ {
+ AllocationResult allocation = AllocateMap(entry.type, entry.size);
+ if (!allocation.To(&obj)) return false;
+ }
+ Map* map = Map::cast(obj);
+ map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
+ // Mark cons string maps as unstable, because their objects can change
+ // maps during GC.
+ if (StringShape(entry.type).IsCons()) map->mark_unstable();
+ roots_[entry.index] = map;
+ }
+
+ { // Create a separate external one byte string map for native sources.
+ AllocationResult allocation =
+ AllocateMap(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE,
+ ExternalOneByteString::kShortSize);
+ if (!allocation.To(&obj)) return false;
+ Map* map = Map::cast(obj);
+ map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
+ set_native_source_string_map(map);
+ }
+
+ ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
+ fixed_double_array_map()->set_elements_kind(HOLEY_DOUBLE_ELEMENTS);
+ ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
+ ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
+ ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
+ ALLOCATE_VARSIZE_MAP(PROPERTY_ARRAY_TYPE, property_array)
+ ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_MAP_TYPE, small_ordered_hash_map)
+ ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_SET_TYPE, small_ordered_hash_set)
+
+#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
+ ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
+
+ TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
+#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
+
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
+
+ ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
+
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
+ ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
+ ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, no_closures_cell)
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, one_closure_cell)
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, many_closures_cell)
+ ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
+ ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
+
+ ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
+
+ ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, hash_table)
+ ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, ordered_hash_table)
+ ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, unseeded_number_dictionary)
+
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, debug_evaluate_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, eval_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
+
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
+ native_context_map()->set_visitor_id(kVisitNativeContext);
+
+ ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
+ shared_function_info)
+
+ ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
+ ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
+ external_map()->set_is_extensible(false);
+#undef ALLOCATE_PRIMITIVE_MAP
+#undef ALLOCATE_VARSIZE_MAP
+#undef ALLOCATE_MAP
+ }
+
+ {
+ AllocationResult allocation = AllocateEmptyScopeInfo();
+ if (!allocation.To(&obj)) return false;
+ }
+
+ set_empty_scope_info(ScopeInfo::cast(obj));
+ {
+ AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_true_value(Oddball::cast(obj));
+ Oddball::cast(obj)->set_kind(Oddball::kTrue);
+
+ {
+ AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_false_value(Oddball::cast(obj));
+ Oddball::cast(obj)->set_kind(Oddball::kFalse);
+
+ { // Empty arrays
+ {
+ ByteArray * byte_array;
+ if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
+ set_empty_byte_array(byte_array);
+ }
+
+ {
+ PropertyArray* property_array;
+ if (!AllocatePropertyArray(0, TENURED).To(&property_array)) return false;
+ set_empty_property_array(property_array);
+ }
+
+#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ { \
+ FixedTypedArrayBase* obj; \
+ if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
+ return false; \
+ set_empty_fixed_##type##_array(obj); \
+ }
+
+ TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
+#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
+ }
+ DCHECK(!InNewSpace(empty_fixed_array()));
+ return true;
+}
+
+bool Heap::CreateApiObjects() {
+ HandleScope scope(isolate());
+ set_message_listeners(*TemplateList::New(isolate(), 2));
+ HeapObject* obj = nullptr;
+ {
+ AllocationResult allocation = AllocateStruct(INTERCEPTOR_INFO_TYPE);
+ if (!allocation.To(&obj)) return false;
+ }
+ InterceptorInfo* info = InterceptorInfo::cast(obj);
+ info->set_flags(0);
+ set_noop_interceptor_info(info);
+ return true;
+}
+
+void Heap::CreateInitialObjects() {
+ HandleScope scope(isolate());
+ Factory* factory = isolate()->factory();
+
+ // The -0 value must be set before NewNumber works.
+ set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
+ DCHECK(std::signbit(minus_zero_value()->Number()));
+
+ set_nan_value(*factory->NewHeapNumber(
+ std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
+ set_hole_nan_value(
+ *factory->NewHeapNumberFromBits(kHoleNanInt64, IMMUTABLE, TENURED));
+ set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
+ set_minus_infinity_value(
+ *factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED));
+
+ // Allocate initial string table.
+ set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
+
+ // Allocate
+
+ // Finish initializing oddballs after creating the string table.
+ Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
+ factory->nan_value(), "undefined", Oddball::kUndefined);
+
+ // Initialize the null_value.
+ Oddball::Initialize(isolate(), factory->null_value(), "null",
+ handle(Smi::kZero, isolate()), "object", Oddball::kNull);
+
+ // Initialize the_hole_value.
+ Oddball::Initialize(isolate(), factory->the_hole_value(), "hole",
+ factory->hole_nan_value(), "undefined",
+ Oddball::kTheHole);
+
+ // Initialize the true_value.
+ Oddball::Initialize(isolate(), factory->true_value(), "true",
+ handle(Smi::FromInt(1), isolate()), "boolean",
+ Oddball::kTrue);
+
+ // Initialize the false_value.
+ Oddball::Initialize(isolate(), factory->false_value(), "false",
+ handle(Smi::kZero, isolate()), "boolean",
+ Oddball::kFalse);
+
+ set_uninitialized_value(
+ *factory->NewOddball(factory->uninitialized_map(), "uninitialized",
+ handle(Smi::FromInt(-1), isolate()), "undefined",
+ Oddball::kUninitialized));
+
+ set_arguments_marker(
+ *factory->NewOddball(factory->arguments_marker_map(), "arguments_marker",
+ handle(Smi::FromInt(-4), isolate()), "undefined",
+ Oddball::kArgumentsMarker));
+
+ set_termination_exception(*factory->NewOddball(
+ factory->termination_exception_map(), "termination_exception",
+ handle(Smi::FromInt(-3), isolate()), "undefined", Oddball::kOther));
+
+ set_exception(*factory->NewOddball(factory->exception_map(), "exception",
+ handle(Smi::FromInt(-5), isolate()),
+ "undefined", Oddball::kException));
+
+ set_optimized_out(*factory->NewOddball(factory->optimized_out_map(),
+ "optimized_out",
+ handle(Smi::FromInt(-6), isolate()),
+ "undefined", Oddball::kOptimizedOut));
+
+ set_stale_register(
+ *factory->NewOddball(factory->stale_register_map(), "stale_register",
+ handle(Smi::FromInt(-7), isolate()), "undefined",
+ Oddball::kStaleRegister));
+
+ for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
+ Handle<String> str =
+ factory->InternalizeUtf8String(constant_string_table[i].contents);
+ roots_[constant_string_table[i].index] = *str;
+ }
+
+ // Create the code_stubs dictionary. The initial size is set to avoid
+ // expanding the dictionary during bootstrapping.
+ set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
+
+ {
+ HandleScope scope(isolate());
+#define SYMBOL_INIT(name) \
+ { \
+ Handle<Symbol> symbol(isolate()->factory()->NewPrivateSymbol()); \
+ roots_[k##name##RootIndex] = *symbol; \
+ }
+ PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
+#undef SYMBOL_INIT
+ }
+
+ {
+ HandleScope scope(isolate());
+#define SYMBOL_INIT(name, description) \
+ Handle<Symbol> name = factory->NewSymbol(); \
+ Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
+ name->set_name(*name##d); \
+ roots_[k##name##RootIndex] = *name;
+ PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
+#undef SYMBOL_INIT
+
+#define SYMBOL_INIT(name, description) \
+ Handle<Symbol> name = factory->NewSymbol(); \
+ Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
+ name->set_is_well_known_symbol(true); \
+ name->set_name(*name##d); \
+ roots_[k##name##RootIndex] = *name;
+ WELL_KNOWN_SYMBOL_LIST(SYMBOL_INIT)
+#undef SYMBOL_INIT
+
+ // Mark "Interesting Symbols" appropriately.
+ to_string_tag_symbol->set_is_interesting_symbol(true);
+ }
+
+ Handle<NameDictionary> empty_property_dictionary =
+ NameDictionary::New(isolate(), 1, TENURED, USE_CUSTOM_MINIMUM_CAPACITY);
+ DCHECK(!empty_property_dictionary->HasSufficientCapacityToAdd(1));
+ set_empty_property_dictionary(*empty_property_dictionary);
+
+ set_public_symbol_table(*empty_property_dictionary);
+ set_api_symbol_table(*empty_property_dictionary);
+ set_api_private_symbol_table(*empty_property_dictionary);
+
+ set_number_string_cache(
+ *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
+
+ // Allocate cache for single character one byte strings.
+ set_single_character_string_cache(
+ *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
+
+ // Allocate cache for string split and regexp-multiple.
+ set_string_split_cache(*factory->NewFixedArray(
+ RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
+ set_regexp_multiple_cache(*factory->NewFixedArray(
+ RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
+
+ set_undefined_cell(*factory->NewCell(factory->undefined_value()));
+
+ // Microtask queue uses the empty fixed array as a sentinel for "empty".
+ // Number of queued microtasks stored in Isolate::pending_microtask_count().
+ set_microtask_queue(empty_fixed_array());
+
+ {
+ Handle<FixedArray> empty_sloppy_arguments_elements =
+ factory->NewFixedArray(2, TENURED);
+ empty_sloppy_arguments_elements->set_map_after_allocation(
+ sloppy_arguments_elements_map(), SKIP_WRITE_BARRIER);
+ set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
+ }
+
+ {
+ Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
+ set_empty_weak_cell(*cell);
+ cell->clear();
+ }
+
+ set_detached_contexts(empty_fixed_array());
+ set_retained_maps(ArrayList::cast(empty_fixed_array()));
+ set_retaining_path_targets(undefined_value());
+
+ set_weak_object_to_code_table(*WeakHashTable::New(isolate(), 16, TENURED));
+
+ set_weak_new_space_object_to_code_list(
+ ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
+ weak_new_space_object_to_code_list()->SetLength(0);
+
+ set_code_coverage_list(undefined_value());
+
+ set_script_list(Smi::kZero);
+
+ Handle<SeededNumberDictionary> slow_element_dictionary =
+ SeededNumberDictionary::New(isolate(), 1, TENURED,
+ USE_CUSTOM_MINIMUM_CAPACITY);
+ DCHECK(!slow_element_dictionary->HasSufficientCapacityToAdd(1));
+ slow_element_dictionary->set_requires_slow_elements();
+ set_empty_slow_element_dictionary(*slow_element_dictionary);
+
+ set_materialized_objects(*factory->NewFixedArray(0, TENURED));
+
+ // Handling of script id generation is in Heap::NextScriptId().
+ set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
+ set_next_template_serial_number(Smi::kZero);
+
+ // Allocate the empty OrderedHashTable.
+ Handle<FixedArray> empty_ordered_hash_table =
+ factory->NewFixedArray(OrderedHashMap::kHashTableStartIndex, TENURED);
+ empty_ordered_hash_table->set_map_no_write_barrier(
+ *factory->ordered_hash_table_map());
+ for (int i = 0; i < empty_ordered_hash_table->length(); ++i) {
+ empty_ordered_hash_table->set(i, Smi::kZero);
+ }
+ set_empty_ordered_hash_table(*empty_ordered_hash_table);
+
+ // Allocate the empty script.
+ Handle<Script> script = factory->NewScript(factory->empty_string());
+ script->set_type(Script::TYPE_NATIVE);
+ set_empty_script(*script);
+
+ Handle<Cell> array_constructor_cell = factory->NewCell(
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
+ set_array_constructor_protector(*array_constructor_cell);
+
+ Handle<PropertyCell> cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_array_protector(*cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(the_hole_value());
+ set_empty_property_cell(*cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_array_iterator_protector(*cell);
+
+ Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
+ set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_species_protector(*cell);
+
+ Handle<Cell> string_length_overflow_cell = factory->NewCell(
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
+ set_string_length_protector(*string_length_overflow_cell);
+
+ Handle<Cell> fast_array_iteration_cell = factory->NewCell(
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
+ set_fast_array_iteration_protector(*fast_array_iteration_cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_array_buffer_neutering_protector(*cell);
+
+ set_serialized_templates(empty_fixed_array());
+ set_serialized_global_proxy_sizes(empty_fixed_array());
+
+ set_weak_stack_trace_list(Smi::kZero);
+
+ set_noscript_shared_function_infos(Smi::kZero);
+
+ // Initialize context slot cache.
+ isolate_->context_slot_cache()->Clear();
+
+ // Initialize descriptor cache.
+ isolate_->descriptor_lookup_cache()->Clear();
+
+ // Initialize compilation cache.
+ isolate_->compilation_cache()->Clear();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index be5bdb37e2..a33d22f80c 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -201,6 +201,7 @@ void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner()->identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
SetFlag(NEVER_ALLOCATE_ON_PAGE);
+ SetFlag(NEVER_EVACUATE);
reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
}
@@ -279,20 +280,6 @@ bool FreeListCategory::is_linked() {
return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
}
-// Try linear allocation in the page of alloc_info's allocation top. Does
-// not contain slow case logic (e.g. move to the next page or try free list
-// allocation) so it can be used by all the allocation functions and for all
-// the paged spaces.
-HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
- Address current_top = allocation_info_.top();
- Address new_top = current_top + size_in_bytes;
- if (new_top > allocation_info_.limit()) return NULL;
-
- allocation_info_.set_top(new_top);
- return HeapObject::FromAddress(current_top);
-}
-
-
AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
@@ -310,14 +297,28 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
return AllocationResult(HeapObject::FromAddress(current_top));
}
+bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes) {
+ if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit())
+ return true;
+ if (free_list_.Allocate(size_in_bytes)) return true;
+ return SlowAllocateRaw(size_in_bytes);
+}
-HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
- AllocationAlignment alignment) {
+HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
+ Address current_top = allocation_info_.top();
+ Address new_top = current_top + size_in_bytes;
+ DCHECK_LE(new_top, allocation_info_.limit());
+ allocation_info_.set_top(new_top);
+ return HeapObject::FromAddress(current_top);
+}
+
+HeapObject* PagedSpace::TryAllocateLinearlyAligned(
+ int* size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
Address new_top = current_top + filler_size + *size_in_bytes;
- if (new_top > allocation_info_.limit()) return NULL;
+ if (new_top > allocation_info_.limit()) return nullptr;
allocation_info_.set_top(new_top);
if (filler_size > 0) {
@@ -329,79 +330,55 @@ HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
return HeapObject::FromAddress(current_top);
}
-
-// Raw allocation.
AllocationResult PagedSpace::AllocateRawUnaligned(
int size_in_bytes, UpdateSkipList update_skip_list) {
- HeapObject* object = AllocateLinearly(size_in_bytes);
-
- if (object == NULL) {
- object = free_list_.Allocate(size_in_bytes);
- if (object == NULL) {
- object = SlowAllocateRaw(size_in_bytes);
- }
- if (object != NULL && heap()->incremental_marking()->black_allocation()) {
- Address start = object->address();
- Address end = object->address() + size_in_bytes;
- Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
- }
+ if (!EnsureLinearAllocationArea(size_in_bytes)) {
+ return AllocationResult::Retry(identity());
}
-
- if (object != NULL) {
- if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
- SkipList::Update(object->address(), size_in_bytes);
- }
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
- return object;
+ HeapObject* object = AllocateLinearly(size_in_bytes);
+ DCHECK_NOT_NULL(object);
+ if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
+ SkipList::Update(object->address(), size_in_bytes);
}
-
- return AllocationResult::Retry(identity());
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
+ return object;
}
-// Raw allocation.
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
DCHECK(identity() == OLD_SPACE);
int allocation_size = size_in_bytes;
- HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
-
- if (object == NULL) {
+ HeapObject* object = TryAllocateLinearlyAligned(&allocation_size, alignment);
+ if (object == nullptr) {
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
- object = free_list_.Allocate(allocation_size);
- if (object == NULL) {
- object = SlowAllocateRaw(allocation_size);
- }
- if (object != NULL) {
- if (heap()->incremental_marking()->black_allocation()) {
- Address start = object->address();
- Address end = object->address() + allocation_size;
- Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
- }
- if (filler_size != 0) {
- object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
- alignment);
- // Filler objects are initialized, so mark only the aligned object
- // memory as uninitialized.
- allocation_size = size_in_bytes;
- }
+ if (!EnsureLinearAllocationArea(allocation_size)) {
+ return AllocationResult::Retry(identity());
}
+ allocation_size = size_in_bytes;
+ object = TryAllocateLinearlyAligned(&allocation_size, alignment);
+ DCHECK_NOT_NULL(object);
}
-
- if (object != NULL) {
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
- return object;
- }
-
- return AllocationResult::Retry(identity());
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
+ return object;
}
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
+ if (top() < top_on_previous_step_) {
+ // Generated code decreased the top() pointer to do folded allocations
+ DCHECK_EQ(Page::FromAddress(top()),
+ Page::FromAddress(top_on_previous_step_));
+ top_on_previous_step_ = top();
+ }
+ size_t bytes_since_last =
+ top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
+
+ DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
#ifdef V8_HOST_ARCH_32_BIT
AllocationResult result =
alignment == kDoubleAligned
@@ -411,8 +388,13 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationResult result = AllocateRawUnaligned(size_in_bytes);
#endif
HeapObject* heap_obj = nullptr;
- if (!result.IsRetry() && result.To(&heap_obj)) {
- AllocationStep(heap_obj->address(), size_in_bytes);
+ if (!result.IsRetry() && result.To(&heap_obj) && !is_local()) {
+ AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
+ heap_obj->address(), size_in_bytes);
+ DCHECK_IMPLIES(
+ heap()->incremental_marking()->black_allocation(),
+ heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
+ StartNextInlineAllocationStep();
}
return result;
}
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 74fee75673..f654c6689e 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -8,7 +8,6 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
-#include "src/base/platform/platform.h"
#include "src/base/platform/semaphore.h"
#include "src/counters.h"
#include "src/heap/array-buffer-tracker.h"
@@ -118,12 +117,12 @@ bool CodeRange::SetUp(size_t requested) {
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
- base::VirtualMemory reservation;
+ VirtualMemory reservation;
if (!AlignedAllocVirtualMemory(
requested,
Max(kCodeRangeAreaAlignment,
static_cast<size_t>(base::OS::AllocateAlignment())),
- base::OS::GetRandomMmapAddr(), &reservation)) {
+ v8::internal::GetRandomMmapAddr(), &reservation)) {
return false;
}
@@ -140,7 +139,7 @@ bool CodeRange::SetUp(size_t requested) {
}
Address aligned_base = ::RoundUp(base, MemoryChunk::kAlignment);
size_t size = reservation.size() - (aligned_base - base) - reserved_area;
- allocation_list_.Add(FreeBlock(aligned_base, size));
+ allocation_list_.emplace_back(aligned_base, size);
current_allocation_block_index_ = 0;
LOG(isolate_, NewEvent("CodeRange", reservation.address(), requested));
@@ -148,19 +147,15 @@ bool CodeRange::SetUp(size_t requested) {
return true;
}
-
-int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
- const FreeBlock* right) {
- // The entire point of CodeRange is that the difference between two
- // addresses in the range can be represented as a signed 32-bit int,
- // so the cast is semantically correct.
- return static_cast<int>(left->start - right->start);
+bool CodeRange::CompareFreeBlockAddress(const FreeBlock& left,
+ const FreeBlock& right) {
+ return left.start < right.start;
}
bool CodeRange::GetNextAllocationBlock(size_t requested) {
for (current_allocation_block_index_++;
- current_allocation_block_index_ < allocation_list_.length();
+ current_allocation_block_index_ < allocation_list_.size();
current_allocation_block_index_++) {
if (requested <= allocation_list_[current_allocation_block_index_].size) {
return true; // Found a large enough allocation block.
@@ -168,26 +163,27 @@ bool CodeRange::GetNextAllocationBlock(size_t requested) {
}
// Sort and merge the free blocks on the free list and the allocation list.
- free_list_.AddAll(allocation_list_);
- allocation_list_.Clear();
- free_list_.Sort(&CompareFreeBlockAddress);
- for (int i = 0; i < free_list_.length();) {
+ free_list_.insert(free_list_.end(), allocation_list_.begin(),
+ allocation_list_.end());
+ allocation_list_.clear();
+ std::sort(free_list_.begin(), free_list_.end(), &CompareFreeBlockAddress);
+ for (size_t i = 0; i < free_list_.size();) {
FreeBlock merged = free_list_[i];
i++;
// Add adjacent free blocks to the current merged block.
- while (i < free_list_.length() &&
+ while (i < free_list_.size() &&
free_list_[i].start == merged.start + merged.size) {
merged.size += free_list_[i].size;
i++;
}
if (merged.size > 0) {
- allocation_list_.Add(merged);
+ allocation_list_.push_back(merged);
}
}
- free_list_.Clear();
+ free_list_.clear();
for (current_allocation_block_index_ = 0;
- current_allocation_block_index_ < allocation_list_.length();
+ current_allocation_block_index_ < allocation_list_.size();
current_allocation_block_index_++) {
if (requested <= allocation_list_[current_allocation_block_index_].size) {
return true; // Found a large enough allocation block.
@@ -238,24 +234,15 @@ bool CodeRange::UncommitRawMemory(Address start, size_t length) {
void CodeRange::FreeRawMemory(Address address, size_t length) {
DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
- free_list_.Add(FreeBlock(address, length));
+ free_list_.emplace_back(address, length);
virtual_memory_.Uncommit(address, length);
}
-
-void CodeRange::TearDown() {
- if (virtual_memory_.IsReserved()) virtual_memory_.Release();
- base::LockGuard<base::Mutex> guard(&code_range_mutex_);
- free_list_.Free();
- allocation_list_.Free();
-}
-
-
bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
- DCHECK(allocation_list_.length() == 0 ||
- current_allocation_block_index_ < allocation_list_.length());
- if (allocation_list_.length() == 0 ||
+ DCHECK(allocation_list_.empty() ||
+ current_allocation_block_index_ < allocation_list_.size());
+ if (allocation_list_.empty() ||
requested_size > allocation_list_[current_allocation_block_index_].size) {
// Find an allocation block large enough.
if (!GetNextAllocationBlock(requested_size)) return false;
@@ -276,7 +263,7 @@ bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
void CodeRange::ReleaseBlock(const FreeBlock* block) {
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
- free_list_.Add(*block);
+ free_list_.push_back(*block);
}
@@ -313,7 +300,7 @@ void MemoryAllocator::TearDown() {
// Check that spaces were torn down before MemoryAllocator.
DCHECK_EQ(size_.Value(), 0u);
// TODO(gc) this will be true again when we fix FreeMemory.
- // DCHECK(size_executable_ == 0);
+ // DCHECK_EQ(0, size_executable_);
capacity_ = 0;
if (last_chunk_.IsReserved()) {
@@ -429,16 +416,14 @@ bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
bool MemoryAllocator::CommitMemory(Address base, size_t size,
Executability executable) {
- if (!base::VirtualMemory::CommitRegion(base, size,
- executable == EXECUTABLE)) {
+ if (!base::OS::CommitRegion(base, size, executable == EXECUTABLE)) {
return false;
}
UpdateAllocatedSpaceLimits(base, base + size);
return true;
}
-
-void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
+void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
// Code which is part of the code-range does not have its own VirtualMemory.
@@ -460,7 +445,7 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
code_range()->FreeRawMemory(base, size);
} else {
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
- bool result = base::VirtualMemory::ReleaseRegion(base, size);
+ bool result = base::OS::ReleaseRegion(base, size);
USE(result);
DCHECK(result);
}
@@ -468,8 +453,8 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
void* hint,
- base::VirtualMemory* controller) {
- base::VirtualMemory reservation;
+ VirtualMemory* controller) {
+ VirtualMemory reservation;
if (!AlignedAllocVirtualMemory(size, alignment, hint, &reservation))
return nullptr;
@@ -486,9 +471,9 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
Address MemoryAllocator::AllocateAlignedMemory(
size_t reserve_size, size_t commit_size, size_t alignment,
- Executability executable, void* hint, base::VirtualMemory* controller) {
+ Executability executable, void* hint, VirtualMemory* controller) {
DCHECK(commit_size <= reserve_size);
- base::VirtualMemory reservation;
+ VirtualMemory reservation;
Address base =
ReserveAlignedMemory(reserve_size, alignment, hint, &reservation);
if (base == NULL) return NULL;
@@ -546,7 +531,7 @@ void MemoryChunk::InitializationMemoryFence() {
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
- base::VirtualMemory* reservation) {
+ VirtualMemory* reservation) {
MemoryChunk* chunk = FromAddress(base);
DCHECK(base == chunk->address());
@@ -579,7 +564,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(chunk);
- DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
+ DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
if (executable == EXECUTABLE) {
chunk->SetFlag(IS_EXECUTABLE);
@@ -593,7 +578,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
Page* page = static_cast<Page*>(chunk);
- DCHECK(page->area_size() <= Page::kAllocatableMemory);
+ DCHECK_GE(Page::kAllocatableMemory, page->area_size());
// Make sure that categories are initialized before freeing the area.
page->InitializeFreeListCategories();
page->ResetAllocatedBytes();
@@ -689,7 +674,7 @@ bool MemoryChunk::CommitArea(size_t requested) {
heap_->memory_allocator()->ZapBlock(start, length);
}
} else if (commit_size < committed_size) {
- DCHECK(commit_size > 0);
+ DCHECK_LT(0, commit_size);
// Shrink the committed area.
size_t length = committed_size - commit_size;
Address start = address() + committed_size + guard_size - length;
@@ -707,7 +692,7 @@ bool MemoryChunk::CommitArea(size_t requested) {
}
size_t MemoryChunk::CommittedPhysicalMemory() {
- if (!base::VirtualMemory::HasLazyCommits() || owner()->identity() == LO_SPACE)
+ if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
return size();
return high_water_mark_.Value();
}
@@ -740,7 +725,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t chunk_size;
Heap* heap = isolate_->heap();
Address base = nullptr;
- base::VirtualMemory reservation;
+ VirtualMemory reservation;
Address area_start = nullptr;
Address area_end = nullptr;
void* address_hint = heap->GetRandomMmapAddr();
@@ -881,7 +866,7 @@ size_t Page::AvailableInFreeList() {
size_t Page::ShrinkToHighWaterMark() {
// Shrinking only makes sense outside of the CodeRange, where we don't care
// about address space fragmentation.
- base::VirtualMemory* reservation = reserved_memory();
+ VirtualMemory* reservation = reserved_memory();
if (!reservation->IsReserved()) return 0;
// Shrink pages to high water mark. The water mark points either to a filler
@@ -959,7 +944,7 @@ void Page::DestroyBlackArea(Address start, Address end) {
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
size_t bytes_to_free,
Address new_area_end) {
- base::VirtualMemory* reservation = chunk->reserved_memory();
+ VirtualMemory* reservation = chunk->reserved_memory();
DCHECK(reservation->IsReserved());
chunk->size_ -= bytes_to_free;
chunk->area_end_ = new_area_end;
@@ -987,7 +972,7 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate());
- base::VirtualMemory* reservation = chunk->reserved_memory();
+ VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size();
DCHECK_GE(size_.Value(), static_cast<size_t>(size));
@@ -1006,7 +991,7 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
chunk->ReleaseAllocatedMemory();
- base::VirtualMemory* reservation = chunk->reserved_memory();
+ VirtualMemory* reservation = chunk->reserved_memory();
if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
} else {
@@ -1099,7 +1084,7 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
if (!CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE)) {
return nullptr;
}
- base::VirtualMemory reservation(start, size);
+ VirtualMemory reservation(start, size);
MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
NOT_EXECUTABLE, owner, &reservation);
size_.Increment(size);
@@ -1120,7 +1105,7 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size,
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
- if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
+ if (!base::OS::UncommitRegion(start, size)) return false;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
return true;
}
@@ -1172,9 +1157,8 @@ intptr_t MemoryAllocator::GetCommitPageSize() {
}
}
-
-bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
- Address start, size_t commit_size,
+bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
+ size_t commit_size,
size_t reserved_size) {
// Commit page header (not executable).
Address header = start;
@@ -1229,7 +1213,7 @@ void MemoryChunk::ReleaseAllocatedMemory() {
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
- DCHECK(pages > 0);
+ DCHECK_LT(0, pages);
SlotSet* slot_set = new SlotSet[pages];
for (size_t i = 0; i < pages; i++) {
slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
@@ -1353,6 +1337,7 @@ STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
void Space::AddAllocationObserver(AllocationObserver* observer) {
allocation_observers_.push_back(observer);
+ StartNextInlineAllocationStep();
}
void Space::RemoveAllocationObserver(AllocationObserver* observer) {
@@ -1360,6 +1345,7 @@ void Space::RemoveAllocationObserver(AllocationObserver* observer) {
allocation_observers_.end(), observer);
DCHECK(allocation_observers_.end() != it);
allocation_observers_.erase(it);
+ StartNextInlineAllocationStep();
}
void Space::PauseAllocationObservers() { allocation_observers_paused_ = true; }
@@ -1368,11 +1354,12 @@ void Space::ResumeAllocationObservers() {
allocation_observers_paused_ = false;
}
-void Space::AllocationStep(Address soon_object, int size) {
+void Space::AllocationStep(int bytes_since_last, Address soon_object,
+ int size) {
if (!allocation_observers_paused_) {
heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
for (AllocationObserver* observer : allocation_observers_) {
- observer->AllocationStep(size, soon_object, size);
+ observer->AllocationStep(bytes_since_last, soon_object, size);
}
}
}
@@ -1392,7 +1379,8 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
: Space(heap, space, executable),
anchor_(this),
free_list_(this),
- locked_page_(nullptr) {
+ locked_page_(nullptr),
+ top_on_previous_step_(0) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
@@ -1462,8 +1450,8 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
other->EmptyAllocationInfo();
// The linear allocation area of {other} should be destroyed now.
- DCHECK(other->top() == nullptr);
- DCHECK(other->limit() == nullptr);
+ DCHECK_NULL(other->top());
+ DCHECK_NULL(other->limit());
// Move over pages.
for (auto it = other->begin(); it != other->end();) {
@@ -1480,7 +1468,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
size_t PagedSpace::CommittedPhysicalMemory() {
- if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
+ if (!base::OS::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = 0;
for (Page* page : *this) {
@@ -1621,6 +1609,48 @@ void PagedSpace::SetAllocationInfo(Address top, Address limit) {
}
}
+void PagedSpace::DecreaseLimit(Address new_limit) {
+ Address old_limit = limit();
+ DCHECK_LE(top(), new_limit);
+ DCHECK_GE(old_limit, new_limit);
+ if (new_limit != old_limit) {
+ SetTopAndLimit(top(), new_limit);
+ Free(new_limit, old_limit - new_limit);
+ if (heap()->incremental_marking()->black_allocation()) {
+ Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
+ old_limit);
+ }
+ }
+}
+
+Address PagedSpace::ComputeLimit(Address start, Address end,
+ size_t size_in_bytes) {
+ DCHECK_GE(end - start, size_in_bytes);
+
+ if (heap()->inline_allocation_disabled()) {
+ // Keep the linear allocation area to fit exactly the requested size.
+ return start + size_in_bytes;
+ } else if (!allocation_observers_paused_ && !allocation_observers_.empty() &&
+ identity() == OLD_SPACE && !is_local()) {
+ // Generated code may allocate inline from the linear allocation area for
+ // Old Space. To make sure we can observe these allocations, we use a lower
+ // limit.
+ size_t step = RoundSizeDownToObjectAlignment(
+ static_cast<int>(GetNextInlineAllocationStepSize()));
+ return Max(start + size_in_bytes, Min(start + step, end));
+ } else {
+ // The entire node can be used as the linear allocation area.
+ return end;
+ }
+}
+
+void PagedSpace::StartNextInlineAllocationStep() {
+ if (!allocation_observers_paused_ && SupportsInlineAllocation()) {
+ top_on_previous_step_ = allocation_observers_.empty() ? 0 : top();
+ DecreaseLimit(ComputeLimit(top(), limit(), 0));
+ }
+}
+
void PagedSpace::MarkAllocationInfoBlack() {
DCHECK(heap()->incremental_marking()->black_allocation());
Address current_top = top();
@@ -1647,7 +1677,7 @@ void PagedSpace::EmptyAllocationInfo() {
Address current_top = top();
Address current_limit = limit();
if (current_top == nullptr) {
- DCHECK(current_limit == nullptr);
+ DCHECK_NULL(current_limit);
return;
}
@@ -1666,6 +1696,12 @@ void PagedSpace::EmptyAllocationInfo() {
}
}
+ if (top_on_previous_step_) {
+ DCHECK(current_top >= top_on_previous_step_);
+ AllocationStep(static_cast<int>(current_top - top_on_previous_step_),
+ nullptr, 0);
+ top_on_previous_step_ = 0;
+ }
SetTopAndLimit(NULL, NULL);
DCHECK_GE(current_limit, current_top);
Free(current_top, current_limit - current_top);
@@ -1681,6 +1717,7 @@ void PagedSpace::ReleasePage(Page* page) {
DCHECK(!free_list_.ContainsPageFreeListItems(page));
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
+ DCHECK(!top_on_previous_step_);
allocation_info_.Reset(nullptr, nullptr);
}
@@ -1770,7 +1807,7 @@ void PagedSpace::VerifyCountersAfterSweeping() {
size_t total_capacity = 0;
size_t total_allocated = 0;
for (Page* page : *this) {
- CHECK(page->SweepingDone());
+ DCHECK(page->SweepingDone());
total_capacity += page->area_size();
HeapObjectIterator it(page);
size_t real_allocated = 0;
@@ -1790,6 +1827,11 @@ void PagedSpace::VerifyCountersAfterSweeping() {
}
void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
+ // We need to refine the counters on pages that are already swept and have
+ // not been moved over to the actual space. Otherwise, the AccountingStats
+ // are just an over approximation.
+ RefillFreeList();
+
size_t total_capacity = 0;
size_t total_allocated = 0;
auto marking_state =
@@ -2108,16 +2150,6 @@ void NewSpace::StartNextInlineAllocationStep() {
}
}
-void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
- Space::AddAllocationObserver(observer);
- StartNextInlineAllocationStep();
-}
-
-void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
- Space::RemoveAllocationObserver(observer);
- StartNextInlineAllocationStep();
-}
-
void NewSpace::PauseAllocationObservers() {
// Do a step to account for memory allocated so far.
InlineAllocationStep(top(), top(), nullptr, 0);
@@ -2126,12 +2158,28 @@ void NewSpace::PauseAllocationObservers() {
UpdateInlineAllocationLimit(0);
}
+void PagedSpace::PauseAllocationObservers() {
+ // Do a step to account for memory allocated so far.
+ if (top_on_previous_step_) {
+ int bytes_allocated = static_cast<int>(top() - top_on_previous_step_);
+ AllocationStep(bytes_allocated, nullptr, 0);
+ }
+ Space::PauseAllocationObservers();
+ top_on_previous_step_ = 0;
+}
+
void NewSpace::ResumeAllocationObservers() {
- DCHECK(top_on_previous_step_ == 0);
+ DCHECK_NULL(top_on_previous_step_);
Space::ResumeAllocationObservers();
StartNextInlineAllocationStep();
}
+// TODO(ofrobots): refactor into SpaceWithLinearArea
+void PagedSpace::ResumeAllocationObservers() {
+ DCHECK(top_on_previous_step_ == 0);
+ Space::ResumeAllocationObservers();
+ StartNextInlineAllocationStep();
+}
void NewSpace::InlineAllocationStep(Address top, Address new_top,
Address soon_object, size_t size) {
@@ -2467,16 +2515,16 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
Page* page = Page::FromAllocationAreaAddress(start);
Page* end_page = Page::FromAllocationAreaAddress(end);
SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
- CHECK_EQ(space, end_page->owner());
+ DCHECK_EQ(space, end_page->owner());
// Start address is before end address, either on same page,
// or end address is on a later page in the linked list of
// semi-space pages.
if (page == end_page) {
- CHECK_LE(start, end);
+ DCHECK_LE(start, end);
} else {
while (page != end_page) {
page = page->next_page();
- CHECK_NE(page, space->anchor());
+ DCHECK_NE(page, space->anchor());
}
}
}
@@ -2651,7 +2699,7 @@ void NewSpace::RecordPromotion(HeapObject* obj) {
size_t NewSpace::CommittedPhysicalMemory() {
- if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
+ if (!base::OS::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = to_space_.CommittedPhysicalMemory();
if (from_space_.is_committed()) {
@@ -2877,12 +2925,8 @@ FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
return node;
}
-// Allocation on the old space free list. If it succeeds then a new linear
-// allocation space has been set up with the top and limit of the space. If
-// the allocation fails then NULL is returned, and the caller can perform a GC
-// or allocate a new page before retrying.
-HeapObject* FreeList::Allocate(size_t size_in_bytes) {
- DCHECK(size_in_bytes <= kMaxBlockSize);
+bool FreeList::Allocate(size_t size_in_bytes) {
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
DCHECK(IsAligned(size_in_bytes, kPointerSize));
DCHECK_LE(owner_->top(), owner_->limit());
#ifdef DEBUG
@@ -2907,10 +2951,9 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
size_t new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
- if (new_node == nullptr) return nullptr;
+ if (new_node == nullptr) return false;
DCHECK_GE(new_node_size, size_in_bytes);
- size_t bytes_left = new_node_size - size_in_bytes;
#ifdef DEBUG
for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
@@ -2924,41 +2967,22 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
// candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
- const size_t kThreshold = IncrementalMarking::kAllocatedThreshold;
-
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
owner_->IncreaseAllocatedBytes(new_node_size,
Page::FromAddress(new_node->address()));
- if (owner_->heap()->inline_allocation_disabled()) {
- // Keep the linear allocation area empty if requested to do so, just
- // return area back to the free list instead.
- owner_->Free(new_node->address() + size_in_bytes, bytes_left);
- owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
- new_node->address() + size_in_bytes);
- } else if (bytes_left > kThreshold &&
- owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
- FLAG_incremental_marking &&
- !owner_->is_local()) { // Not needed on CompactionSpaces.
- size_t linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
- // We don't want to give too large linear areas to the allocator while
- // incremental marking is going on, because we won't check again whether
- // we want to do another increment until the linear area is used up.
- DCHECK_GE(new_node_size, size_in_bytes + linear_size);
- owner_->Free(new_node->address() + size_in_bytes + linear_size,
- new_node_size - size_in_bytes - linear_size);
- owner_->SetAllocationInfo(
- new_node->address() + size_in_bytes,
- new_node->address() + size_in_bytes + linear_size);
- } else {
- // Normally we give the rest of the node to the allocator as its new
- // linear allocation area.
- owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
- new_node->address() + new_node_size);
+ Address start = new_node->address();
+ Address end = new_node->address() + new_node_size;
+ Address limit = owner_->ComputeLimit(start, end, size_in_bytes);
+ DCHECK_LE(limit, end);
+ DCHECK_LE(size_in_bytes, limit - start);
+ if (limit != end) {
+ owner_->Free(limit, end - limit);
}
+ owner_->SetAllocationInfo(start, limit);
- return new_node;
+ return true;
}
size_t FreeList::EvictFreeListItems(Page* page) {
@@ -3124,7 +3148,7 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
}
}
-HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
+bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
// Wait for the sweeper threads here and complete the sweeping phase.
@@ -3134,30 +3158,30 @@ HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
// entries.
return free_list_.Allocate(size_in_bytes);
}
- return nullptr;
+ return false;
}
-HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
+bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->SweepAndRefill(this);
return free_list_.Allocate(size_in_bytes);
}
- return nullptr;
+ return false;
}
-HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
+bool PagedSpace::SlowAllocateRaw(int size_in_bytes) {
VMState<GC> state(heap()->isolate());
RuntimeCallTimerScope runtime_timer(
heap()->isolate(), &RuntimeCallStats::GC_Custom_SlowAllocateRaw);
return RawSlowAllocateRaw(size_in_bytes);
}
-HeapObject* CompactionSpace::SlowAllocateRaw(int size_in_bytes) {
+bool CompactionSpace::SlowAllocateRaw(int size_in_bytes) {
return RawSlowAllocateRaw(size_in_bytes);
}
-HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
+bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
DCHECK_GE(size_in_bytes, 0);
const int kMaxPagesToSweep = 1;
@@ -3175,17 +3199,13 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
RefillFreeList();
// Retry the free list allocation.
- HeapObject* object =
- free_list_.Allocate(static_cast<size_t>(size_in_bytes));
- if (object != NULL) return object;
+ if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
if (locked_page_ != nullptr) {
DCHECK_EQ(locked_page_->owner()->identity(), identity());
collector->sweeper().ParallelSweepPage(locked_page_, identity());
locked_page_ = nullptr;
- HeapObject* object =
- free_list_.Allocate(static_cast<size_t>(size_in_bytes));
- if (object != nullptr) return object;
+ if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
}
// If sweeping is still in progress try to sweep pages.
@@ -3193,8 +3213,7 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
identity(), size_in_bytes, kMaxPagesToSweep);
RefillFreeList();
if (max_freed >= size_in_bytes) {
- object = free_list_.Allocate(static_cast<size_t>(size_in_bytes));
- if (object != nullptr) return object;
+ if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
}
} else if (is_local()) {
// Sweeping not in progress and we are on a {CompactionSpace}. This can
@@ -3203,9 +3222,7 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
Page* page = main_space->RemovePageSafe(size_in_bytes);
if (page != nullptr) {
AddPage(page);
- HeapObject* object =
- free_list_.Allocate(static_cast<size_t>(size_in_bytes));
- if (object != nullptr) return object;
+ if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
}
}
@@ -3347,14 +3364,15 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
- AllocationStep(object->address(), object_size);
-
heap()->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo);
-
if (heap()->incremental_marking()->black_allocation()) {
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
}
+ AllocationStep(object_size, object->address(), object_size);
+ DCHECK_IMPLIES(
+ heap()->incremental_marking()->black_allocation(),
+ heap()->incremental_marking()->marking_state()->IsBlack(object));
return object;
}
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 4f4de139e4..d386d11425 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -22,7 +22,6 @@
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/marking.h"
-#include "src/list.h"
#include "src/objects.h"
#include "src/objects/map.h"
#include "src/utils.h"
@@ -355,7 +354,7 @@ class MemoryChunk {
+ kUIntptrSize // uintptr_t flags_
+ kPointerSize // Address area_start_
+ kPointerSize // Address area_end_
- + 2 * kPointerSize // base::VirtualMemory reservation_
+ + 2 * kPointerSize // VirtualMemory reservation_
+ kPointerSize // Address owner_
+ kPointerSize // Heap* heap_
+ kIntptrSize // intptr_t progress_bar_
@@ -632,12 +631,12 @@ class MemoryChunk {
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
- base::VirtualMemory* reservation);
+ VirtualMemory* reservation);
// Should be called when memory chunk is about to be freed.
void ReleaseAllocatedMemory();
- base::VirtualMemory* reserved_memory() { return &reservation_; }
+ VirtualMemory* reserved_memory() { return &reservation_; }
size_t size_;
uintptr_t flags_;
@@ -647,7 +646,7 @@ class MemoryChunk {
Address area_end_;
// If the chunk needs to remember its memory reservation, it is stored here.
- base::VirtualMemory reservation_;
+ VirtualMemory reservation_;
// The identity of the owning space. This is tagged as a failure pointer, but
// no failure can be in an object, so this can be distinguished from any entry
@@ -904,17 +903,17 @@ class Space : public Malloced {
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
- V8_EXPORT_PRIVATE virtual void AddAllocationObserver(
- AllocationObserver* observer);
+ void AddAllocationObserver(AllocationObserver* observer);
- V8_EXPORT_PRIVATE virtual void RemoveAllocationObserver(
- AllocationObserver* observer);
+ void RemoveAllocationObserver(AllocationObserver* observer);
V8_EXPORT_PRIVATE virtual void PauseAllocationObservers();
V8_EXPORT_PRIVATE virtual void ResumeAllocationObservers();
- void AllocationStep(Address soon_object, int size);
+ V8_EXPORT_PRIVATE virtual void StartNextInlineAllocationStep() {}
+
+ void AllocationStep(int bytes_since_last, Address soon_object, int size);
// Return the total amount committed memory for this space, i.e., allocatable
// memory and page headers.
@@ -1004,7 +1003,9 @@ class MemoryChunkValidator {
class CodeRange {
public:
explicit CodeRange(Isolate* isolate);
- ~CodeRange() { TearDown(); }
+ ~CodeRange() {
+ if (virtual_memory_.IsReserved()) virtual_memory_.Release();
+ }
// Reserves a range of virtual memory, but does not commit any of it.
// Can only be called once, at heap initialization time.
@@ -1055,25 +1056,21 @@ class CodeRange {
size_t size;
};
- // Frees the range of virtual memory, and frees the data structures used to
- // manage it.
- void TearDown();
-
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
// If none can be found, returns false.
bool GetNextAllocationBlock(size_t requested);
// Compares the start addresses of two free blocks.
- static int CompareFreeBlockAddress(const FreeBlock* left,
- const FreeBlock* right);
+ static bool CompareFreeBlockAddress(const FreeBlock& left,
+ const FreeBlock& right);
bool ReserveBlock(const size_t requested_size, FreeBlock* block);
void ReleaseBlock(const FreeBlock* block);
Isolate* isolate_;
// The reserved range of virtual memory that all code objects are put in.
- base::VirtualMemory virtual_memory_;
+ VirtualMemory virtual_memory_;
// The global mutex guards free_list_ and allocation_list_ as GC threads may
// access both lists concurrently to the main thread.
@@ -1082,12 +1079,12 @@ class CodeRange {
// Freed blocks of memory are added to the free list. When the allocation
// list is exhausted, the free list is sorted and merged to make the new
// allocation list.
- List<FreeBlock> free_list_;
+ std::vector<FreeBlock> free_list_;
// Memory is allocated from the free blocks on the allocation list.
// The block at current_allocation_block_index_ is the current block.
- List<FreeBlock> allocation_list_;
- int current_allocation_block_index_;
+ std::vector<FreeBlock> allocation_list_;
+ size_t current_allocation_block_index_;
DISALLOW_COPY_AND_ASSIGN(CodeRange);
};
@@ -1348,14 +1345,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
Executability executable, Space* space);
Address ReserveAlignedMemory(size_t requested, size_t alignment, void* hint,
- base::VirtualMemory* controller);
+ VirtualMemory* controller);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
size_t alignment, Executability executable,
- void* hint, base::VirtualMemory* controller);
+ void* hint, VirtualMemory* controller);
bool CommitMemory(Address addr, size_t size, Executability executable);
- void FreeMemory(base::VirtualMemory* reservation, Executability executable);
+ void FreeMemory(VirtualMemory* reservation, Executability executable);
void FreeMemory(Address addr, size_t size, Executability executable);
// Partially release |bytes_to_free| bytes starting at |start_free|. Note that
@@ -1381,8 +1378,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// filling it up with a recognizable non-NULL bit pattern.
void ZapBlock(Address start, size_t size);
- MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
- Address start, size_t commit_size,
+ MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm, Address start,
+ size_t commit_size,
size_t reserved_size);
CodeRange* code_range() { return code_range_; }
@@ -1445,7 +1442,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
base::AtomicValue<void*> lowest_ever_allocated_;
base::AtomicValue<void*> highest_ever_allocated_;
- base::VirtualMemory last_chunk_;
+ VirtualMemory last_chunk_;
Unmapper unmapper_;
friend class heap::TestCodeRangeScope;
@@ -1758,10 +1755,10 @@ class V8_EXPORT_PRIVATE FreeList {
// and the size should be a non-zero multiple of the word size.
size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
- // Allocate a block of size {size_in_bytes} from the free list. The block is
- // unitialized. A failure is returned if no block is available. The size
- // should be a non-zero multiple of the word size.
- MUST_USE_RESULT HeapObject* Allocate(size_t size_in_bytes);
+ // Finds a node of size at least size_in_bytes and sets up a linear allocation
+ // area using this node. Returns false if there is no such node and the caller
+ // has to retry allocation after collecting garbage.
+ MUST_USE_RESULT bool Allocate(size_t size_in_bytes);
// Clear the free list.
void Reset();
@@ -2081,15 +2078,8 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
void ResetFreeList() { free_list_.Reset(); }
- // Set space allocation info.
- void SetTopAndLimit(Address top, Address limit) {
- DCHECK(top == limit ||
- Page::FromAddress(top) == Page::FromAddress(limit - 1));
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.Reset(top, limit);
- }
-
- void SetAllocationInfo(Address top, Address limit);
+ void PauseAllocationObservers() override;
+ void ResumeAllocationObservers() override;
// Empty space allocation info, returning unused area to free list.
void EmptyAllocationInfo();
@@ -2194,6 +2184,21 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// multiple tasks hold locks on pages while trying to sweep each others pages.
void AnnounceLockedPage(Page* page) { locked_page_ = page; }
+ Address ComputeLimit(Address start, Address end, size_t size_in_bytes);
+ void SetAllocationInfo(Address top, Address limit);
+
+ private:
+ // Set space allocation info.
+ void SetTopAndLimit(Address top, Address limit) {
+ DCHECK(top == limit ||
+ Page::FromAddress(top) == Page::FromAddress(limit - 1));
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.Reset(top, limit);
+ }
+ void DecreaseLimit(Address new_limit);
+ void StartNextInlineAllocationStep() override;
+ bool SupportsInlineAllocation() { return identity() == OLD_SPACE; }
+
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
@@ -2210,26 +2215,33 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// size limit has been hit.
bool Expand();
- // Generic fast case allocation function that tries linear allocation at the
- // address denoted by top in allocation_info_.
+ // Sets up a linear allocation area that fits the given number of bytes.
+ // Returns false if there is not enough space and the caller has to retry
+ // after collecting garbage.
+ inline bool EnsureLinearAllocationArea(int size_in_bytes);
+ // Allocates an object from the linear allocation area. Assumes that the
+ // linear allocation area is large enought to fit the object.
inline HeapObject* AllocateLinearly(int size_in_bytes);
-
- // Generic fast case allocation function that tries aligned linear allocation
- // at the address denoted by top in allocation_info_. Writes the aligned
- // allocation size, which includes the filler size, to size_in_bytes.
- inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
- AllocationAlignment alignment);
-
+ // Tries to allocate an aligned object from the linear allocation area.
+ // Returns nullptr if the linear allocation area does not fit the object.
+ // Otherwise, returns the object pointer and writes the allocation size
+ // (object size + alignment filler size) to the size_in_bytes.
+ inline HeapObject* TryAllocateLinearlyAligned(int* size_in_bytes,
+ AllocationAlignment alignment);
// If sweeping is still in progress try to sweep unswept pages. If that is
- // not successful, wait for the sweeper threads and re-try free-list
- // allocation.
- MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
- int size_in_bytes);
+ // not successful, wait for the sweeper threads and retry free-list
+ // allocation. Returns false if there is not enough space and the caller
+ // has to retry after collecting garbage.
+ MUST_USE_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
- // Slow path of AllocateRaw. This function is space-dependent.
- MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
+ // Slow path of AllocateRaw. This function is space-dependent. Returns false
+ // if there is not enough space and the caller has to retry after
+ // collecting garbage.
+ MUST_USE_RESULT virtual bool SlowAllocateRaw(int size_in_bytes);
- MUST_USE_RESULT HeapObject* RawSlowAllocateRaw(int size_in_bytes);
+ // Implementation of SlowAllocateRaw. Returns false if there is not enough
+ // space and the caller has to retry after collecting garbage.
+ MUST_USE_RESULT bool RawSlowAllocateRaw(int size_in_bytes);
size_t area_size_;
@@ -2249,6 +2261,7 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
base::Mutex space_mutex_;
Page* locked_page_;
+ Address top_on_previous_step_;
friend class IncrementalMarking;
friend class MarkCompactCollector;
@@ -2603,8 +2616,13 @@ class NewSpace : public Space {
return allocation_info_.limit();
}
- Address original_top() { return original_top_.Value(); }
+ void ResetOriginalTop() {
+ DCHECK_GE(top(), original_top());
+ DCHECK_LE(top(), original_limit());
+ original_top_.SetValue(top());
+ }
+ Address original_top() { return original_top_.Value(); }
Address original_limit() { return original_limit_.Value(); }
// Return the address of the first object in the active semispace.
@@ -2650,14 +2668,6 @@ class NewSpace : public Space {
UpdateInlineAllocationLimit(0);
}
- // Allows observation of inline allocation. The observer->Step() method gets
- // called after every step_size bytes have been allocated (approximately).
- // This works by adjusting the allocation limit to a lower value and adjusting
- // it after each step.
- void AddAllocationObserver(AllocationObserver* observer) override;
-
- void RemoveAllocationObserver(AllocationObserver* observer) override;
-
// Get the extent of the inactive semispace (for use as a marking stack,
// or to zap it). Notice: space-addresses are not necessarily on the
// same page, so FromSpaceStart() might be above FromSpaceEnd().
@@ -2749,8 +2759,7 @@ class NewSpace : public Space {
// The semispaces.
SemiSpace to_space_;
SemiSpace from_space_;
- base::VirtualMemory reservation_;
-
+ VirtualMemory reservation_;
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
@@ -2765,7 +2774,7 @@ class NewSpace : public Space {
// different when we cross a page boundary or reset the space.
void InlineAllocationStep(Address top, Address new_top, Address soon_object,
size_t size);
- void StartNextInlineAllocationStep();
+ void StartNextInlineAllocationStep() override;
friend class SemiSpaceIterator;
};
@@ -2794,10 +2803,9 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
// The space is temporary and not included in any snapshots.
bool snapshotable() override { return false; }
- MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
- int size_in_bytes) override;
+ MUST_USE_RESULT bool SweepAndRetryAllocation(int size_in_bytes) override;
- MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes) override;
+ MUST_USE_RESULT bool SlowAllocateRaw(int size_in_bytes) override;
};
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index 981aa76649..ccefd1a058 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -32,7 +32,7 @@ void StoreBuffer::SetUp() {
// Allocate 3x the buffer size, so that we can start the new store buffer
// aligned to 2x the size. This lets us use a bit test to detect the end of
// the area.
- base::VirtualMemory reservation;
+ VirtualMemory reservation;
if (!AllocVirtualMemory(kStoreBufferSize * 3, heap_->GetRandomMmapAddr(),
&reservation)) {
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
@@ -53,7 +53,7 @@ void StoreBuffer::SetUp() {
DCHECK(reinterpret_cast<Address>(limit_[i]) >= reservation.address());
DCHECK(start_[i] <= vm_limit);
DCHECK(limit_[i] <= vm_limit);
- DCHECK((reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask) == 0);
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask);
}
if (!reservation.Commit(reinterpret_cast<Address>(start_[0]),
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index 2c6142792a..75da76490e 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -208,7 +208,7 @@ class StoreBuffer {
// IN_GC mode.
StoreBufferMode mode_;
- base::VirtualMemory virtual_memory_;
+ VirtualMemory virtual_memory_;
// Callbacks are more efficient than reading out the gc state for every
// store buffer operation.
diff --git a/deps/v8/src/heap/worklist.h b/deps/v8/src/heap/worklist.h
index f6530ec183..3421e16611 100644
--- a/deps/v8/src/heap/worklist.h
+++ b/deps/v8/src/heap/worklist.h
@@ -6,6 +6,7 @@
#define V8_HEAP_WORKLIST_
#include <cstddef>
+#include <utility>
#include "src/base/atomic-utils.h"
#include "src/base/logging.h"
@@ -168,6 +169,11 @@ class Worklist {
PublishPopSegmentToGlobal(task_id);
}
+ void MergeGlobalPool(Worklist* other) {
+ auto pair = other->global_pool_.Extract();
+ global_pool_.MergeList(pair.first, pair.second);
+ }
+
private:
FRIEND_TEST(WorkListTest, SegmentCreate);
FRIEND_TEST(WorkListTest, SegmentPush);
@@ -305,6 +311,28 @@ class Worklist {
}
}
+ std::pair<Segment*, Segment*> Extract() {
+ Segment* top = nullptr;
+ {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ if (top_ == nullptr) return std::make_pair(nullptr, nullptr);
+ top = top_;
+ set_top(nullptr);
+ }
+ Segment* end = top;
+ while (end->next() != nullptr) end = end->next();
+ return std::make_pair(top, end);
+ }
+
+ void MergeList(Segment* start, Segment* end) {
+ if (start == nullptr) return;
+ {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ end->set_next(top_);
+ set_top(start);
+ }
+ }
+
private:
void set_top(Segment* segment) {
base::AsAtomicPointer::Relaxed_Store(&top_, segment);
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 5cfa5cafd8..6ef0c25905 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -177,45 +177,6 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
}
}
-Immediate::Immediate(int x) {
- value_.immediate = x;
- rmode_ = RelocInfo::NONE32;
-}
-
-Immediate::Immediate(Address x, RelocInfo::Mode rmode) {
- value_.immediate = reinterpret_cast<int32_t>(x);
- rmode_ = rmode;
-}
-
-Immediate::Immediate(const ExternalReference& ext) {
- value_.immediate = reinterpret_cast<int32_t>(ext.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-
-Immediate::Immediate(Label* internal_offset) {
- value_.immediate = reinterpret_cast<int32_t>(internal_offset);
- rmode_ = RelocInfo::INTERNAL_REFERENCE;
-}
-
-Immediate::Immediate(Handle<HeapObject> handle) {
- value_.immediate = reinterpret_cast<intptr_t>(handle.address());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
-}
-
-
-Immediate::Immediate(Smi* value) {
- value_.immediate = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE32;
-}
-
-
-Immediate::Immediate(Address addr) {
- value_.immediate = reinterpret_cast<int32_t>(addr);
- rmode_ = RelocInfo::NONE32;
-}
-
-
void Assembler::emit(uint32_t x) {
*reinterpret_cast<uint32_t*>(pc_) = x;
pc_ += sizeof(uint32_t);
@@ -316,6 +277,10 @@ Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
}
+void Assembler::deserialization_set_special_target_at(
+ Isolate* isolate, Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(isolate, instruction_payload, code, target);
+}
Displacement Assembler::disp_at(Label* L) {
return Displacement(long_at(L->pos()));
@@ -352,18 +317,11 @@ void Assembler::deserialization_set_target_internal_reference_at(
}
-void Operand::set_modrm(int mod, Register rm) {
- DCHECK((mod & -4) == 0);
- buf_[0] = mod << 6 | rm.code();
- len_ = 1;
-}
-
-
void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
DCHECK(len_ == 1);
DCHECK((scale & -4) == 0);
// Use SIB with no index register only for base esp.
- DCHECK(!index.is(esp) || base.is(esp));
+ DCHECK(index != esp || base == esp);
buf_[1] = scale << 6 | index.code() << 3 | base.code();
len_ = 2;
}
@@ -375,33 +333,6 @@ void Operand::set_disp8(int8_t disp) {
}
-void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) {
- DCHECK(len_ == 1 || len_ == 2);
- int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
- *p = disp;
- len_ += sizeof(int32_t);
- rmode_ = rmode;
-}
-
-Operand::Operand(Register reg) {
- // reg
- set_modrm(3, reg);
-}
-
-
-Operand::Operand(XMMRegister xmm_reg) {
- Register reg = { xmm_reg.code() };
- set_modrm(3, reg);
-}
-
-
-Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
- // [disp/r]
- set_modrm(0, ebp);
- set_dispr(disp, rmode);
-}
-
-
Operand::Operand(Immediate imm) {
// [disp/r]
set_modrm(0, ebp);
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 7f65b8a1fe..d7fbce907a 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -47,9 +47,11 @@
#include <sys/sysctl.h>
#endif
+#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/code-stubs.h"
+#include "src/conversions-inl.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
#include "src/v8.h"
@@ -226,19 +228,19 @@ void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) {
// [base + disp/r]
- if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
+ if (disp == 0 && RelocInfo::IsNone(rmode) && base != ebp) {
// [base]
set_modrm(0, base);
- if (base.is(esp)) set_sib(times_1, esp, base);
+ if (base == esp) set_sib(times_1, esp, base);
} else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
// [base + disp8]
set_modrm(1, base);
- if (base.is(esp)) set_sib(times_1, esp, base);
+ if (base == esp) set_sib(times_1, esp, base);
set_disp8(disp);
} else {
// [base + disp/r]
set_modrm(2, base);
- if (base.is(esp)) set_sib(times_1, esp, base);
+ if (base == esp) set_sib(times_1, esp, base);
set_dispr(disp, rmode);
}
}
@@ -249,9 +251,9 @@ Operand::Operand(Register base,
ScaleFactor scale,
int32_t disp,
RelocInfo::Mode rmode) {
- DCHECK(!index.is(esp)); // illegal addressing mode
+ DCHECK(index != esp); // illegal addressing mode
// [base + index*scale + disp/r]
- if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
+ if (disp == 0 && RelocInfo::IsNone(rmode) && base != ebp) {
// [base + index*scale]
set_modrm(0, esp);
set_sib(scale, index, base);
@@ -273,7 +275,7 @@ Operand::Operand(Register index,
ScaleFactor scale,
int32_t disp,
RelocInfo::Mode rmode) {
- DCHECK(!index.is(esp)); // illegal addressing mode
+ DCHECK(index != esp); // illegal addressing mode
// [index*scale + disp/r]
set_modrm(0, esp);
set_sib(scale, index, ebp);
@@ -731,8 +733,8 @@ void Assembler::stos() {
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
- if (src.is(eax) || dst.is(eax)) { // Single-byte encoding.
- EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
+ if (src == eax || dst == eax) { // Single-byte encoding.
+ EMIT(0x90 | (src == eax ? dst.code() : src.code()));
} else {
EMIT(0x87);
EMIT(0xC0 | src.code() << 3 | dst.code());
@@ -1293,7 +1295,7 @@ void Assembler::test(Register reg, const Immediate& imm) {
EnsureSpace ensure_space(this);
// This is not using emit_arith because test doesn't support
// sign-extension of 8-bit operands.
- if (reg.is(eax)) {
+ if (reg == eax) {
EMIT(0xA9);
} else {
EMIT(0xF7);
@@ -1337,7 +1339,7 @@ void Assembler::test_b(Register reg, Immediate imm8) {
EnsureSpace ensure_space(this);
// Only use test against byte for registers that have a byte
// variant: eax, ebx, ecx, and edx.
- if (reg.is(eax)) {
+ if (reg == eax) {
EMIT(0xA8);
emit_b(imm8);
} else if (reg.is_byte_register()) {
@@ -1364,7 +1366,7 @@ void Assembler::test_b(const Operand& op, Immediate imm8) {
void Assembler::test_w(Register reg, Immediate imm16) {
DCHECK(imm16.is_int16() || imm16.is_uint16());
EnsureSpace ensure_space(this);
- if (reg.is(eax)) {
+ if (reg == eax) {
EMIT(0xA9);
emit_w(imm16);
} else {
@@ -2523,7 +2525,7 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) {
void Assembler::movups(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
- EMIT(0x11);
+ EMIT(0x10);
emit_sse_operand(dst, src);
}
@@ -2971,37 +2973,37 @@ void Assembler::vcmpps(XMMRegister dst, XMMRegister src1, const Operand& src2,
}
void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8) {
- XMMRegister iop = {6};
+ XMMRegister iop = XMMRegister::from_code(6);
vinstr(0x71, iop, dst, Operand(src), k66, k0F, kWIG);
EMIT(imm8);
}
void Assembler::vpslld(XMMRegister dst, XMMRegister src, int8_t imm8) {
- XMMRegister iop = {6};
+ XMMRegister iop = XMMRegister::from_code(6);
vinstr(0x72, iop, dst, Operand(src), k66, k0F, kWIG);
EMIT(imm8);
}
void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int8_t imm8) {
- XMMRegister iop = {2};
+ XMMRegister iop = XMMRegister::from_code(2);
vinstr(0x71, iop, dst, Operand(src), k66, k0F, kWIG);
EMIT(imm8);
}
void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int8_t imm8) {
- XMMRegister iop = {2};
+ XMMRegister iop = XMMRegister::from_code(2);
vinstr(0x72, iop, dst, Operand(src), k66, k0F, kWIG);
EMIT(imm8);
}
void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int8_t imm8) {
- XMMRegister iop = {4};
+ XMMRegister iop = XMMRegister::from_code(4);
vinstr(0x71, iop, dst, Operand(src), k66, k0F, kWIG);
EMIT(imm8);
}
void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int8_t imm8) {
- XMMRegister iop = {4};
+ XMMRegister iop = XMMRegister::from_code(4);
vinstr(0x72, iop, dst, Operand(src), k66, k0F, kWIG);
EMIT(imm8);
}
@@ -3101,7 +3103,7 @@ void Assembler::bmi2(SIMDPrefix pp, byte op, Register reg, Register vreg,
void Assembler::rorx(Register dst, const Operand& src, byte imm8) {
DCHECK(IsEnabled(BMI2));
DCHECK(is_uint8(imm8));
- Register vreg = {0}; // VEX.vvvv unused
+ Register vreg = Register::from_code<0>(); // VEX.vvvv unused
EnsureSpace ensure_space(this);
emit_vex_prefix(vreg, kLZ, kF2, k0F3A, kW0);
EMIT(0xF0);
@@ -3151,7 +3153,7 @@ void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
}
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
- Register ireg = { reg.code() };
+ Register ireg = Register::from_code(reg.code());
emit_operand(ireg, adr);
}
@@ -3187,7 +3189,7 @@ void Assembler::emit_vex_prefix(XMMRegister vreg, VectorLength l, SIMDPrefix pp,
void Assembler::emit_vex_prefix(Register vreg, VectorLength l, SIMDPrefix pp,
LeadingOpcode mm, VexW w) {
- XMMRegister ivreg = {vreg.code()};
+ XMMRegister ivreg = XMMRegister::from_code(vreg.code());
emit_vex_prefix(ivreg, l, pp, mm, w);
}
@@ -3255,7 +3257,7 @@ void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
DCHECK((0 <= sel) && (sel <= 7));
- Register ireg = { sel };
+ Register ireg = Register::from_code(sel);
if (x.is_int8()) {
EMIT(0x83); // using a sign-extended 8-bit immediate.
emit_operand(ireg, dst);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 44482c5300..e2d88dc851 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -103,95 +103,45 @@ const int kNumJSCallerSaved = 5;
// Number of registers for which space is reserved in safepoints.
const int kNumSafepointRegisters = 8;
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-//
-struct Register {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = -1
- };
-
- static constexpr int kNumRegisters = Code::kAfterLast;
-
- static Register from_code(int code) {
- DCHECK(code >= 0);
- DCHECK(code < kNumRegisters);
- Register r = {code};
- return r;
- }
- bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
- bool is(Register reg) const { return reg_code == reg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
+ kRegAfterLast
+};
- bool is_byte_register() const { return reg_code <= 3; }
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ public:
+ bool is_byte_register() const { return reg_code_ <= 3; }
- // Unfortunately we can't make this private in a struct.
- int reg_code;
+ private:
+ friend class RegisterBase<Register, kRegAfterLast>;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
};
-#define DEFINE_REGISTER(R) constexpr Register R = {Register::kCode_##R};
+static_assert(IS_TRIVIALLY_COPYABLE(Register) &&
+ sizeof(Register) == sizeof(int),
+ "Register can efficiently be passed by value");
+
+#define DEFINE_REGISTER(R) \
+ constexpr Register R = Register::from_code<kRegCode_##R>();
GENERAL_REGISTERS(DEFINE_REGISTER)
#undef DEFINE_REGISTER
-constexpr Register no_reg = {Register::kCode_no_reg};
+constexpr Register no_reg = Register::no_reg();
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
-struct XMMRegister {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
+enum DoubleCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = -1
- };
-
- static constexpr int kMaxNumRegisters = Code::kAfterLast;
-
- static XMMRegister from_code(int code) {
- XMMRegister result = {code};
- return result;
- }
-
- bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
-
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
-
- bool is(XMMRegister reg) const { return reg_code == reg.reg_code; }
+ kDoubleAfterLast
+};
- int reg_code;
+class XMMRegister : public RegisterBase<XMMRegister, kDoubleAfterLast> {
+ friend class RegisterBase<XMMRegister, kDoubleAfterLast>;
+ explicit constexpr XMMRegister(int code) : RegisterBase(code) {}
};
typedef XMMRegister FloatRegister;
@@ -201,10 +151,10 @@ typedef XMMRegister DoubleRegister;
typedef XMMRegister Simd128Register;
#define DEFINE_REGISTER(R) \
- constexpr DoubleRegister R = {DoubleRegister::kCode_##R};
+ constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
DOUBLE_REGISTERS(DEFINE_REGISTER)
#undef DEFINE_REGISTER
-constexpr DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
+constexpr DoubleRegister no_double_reg = DoubleRegister::no_reg();
enum Condition {
// any value < 0 is considered no_condition
@@ -283,12 +233,30 @@ enum RoundingMode {
class Immediate BASE_EMBEDDED {
public:
- inline explicit Immediate(int x);
- inline explicit Immediate(const ExternalReference& ext);
- inline explicit Immediate(Handle<HeapObject> handle);
- inline explicit Immediate(Smi* value);
- inline explicit Immediate(Address addr);
- inline explicit Immediate(Address x, RelocInfo::Mode rmode);
+ inline explicit Immediate(int x) {
+ value_.immediate = x;
+ rmode_ = RelocInfo::NONE32;
+ }
+ inline explicit Immediate(const ExternalReference& ext) {
+ value_.immediate = reinterpret_cast<int32_t>(ext.address());
+ rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+ }
+ inline explicit Immediate(Handle<HeapObject> handle) {
+ value_.immediate = reinterpret_cast<intptr_t>(handle.address());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ }
+ inline explicit Immediate(Smi* value) {
+ value_.immediate = reinterpret_cast<intptr_t>(value);
+ rmode_ = RelocInfo::NONE32;
+ }
+ inline explicit Immediate(Address addr) {
+ value_.immediate = reinterpret_cast<int32_t>(addr);
+ rmode_ = RelocInfo::NONE32;
+ }
+ inline explicit Immediate(Address x, RelocInfo::Mode rmode) {
+ value_.immediate = reinterpret_cast<int32_t>(x);
+ rmode_ = rmode;
+ }
static Immediate EmbeddedNumber(double number); // Smi or HeapNumber.
static Immediate EmbeddedCode(CodeStub* code);
@@ -332,7 +300,10 @@ class Immediate BASE_EMBEDDED {
RelocInfo::Mode rmode() const { return rmode_; }
private:
- inline explicit Immediate(Label* value);
+ inline explicit Immediate(Label* value) {
+ value_.immediate = reinterpret_cast<int32_t>(value);
+ rmode_ = RelocInfo::INTERNAL_REFERENCE;
+ }
union Value {
Value() {}
@@ -366,13 +337,19 @@ enum ScaleFactor {
class Operand BASE_EMBEDDED {
public:
// reg
- INLINE(explicit Operand(Register reg));
+ INLINE(explicit Operand(Register reg)) { set_modrm(3, reg); }
// XMM reg
- INLINE(explicit Operand(XMMRegister xmm_reg));
+ INLINE(explicit Operand(XMMRegister xmm_reg)) {
+ Register reg = Register::from_code(xmm_reg.code());
+ set_modrm(3, reg);
+ }
// [disp/r]
- INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode));
+ INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode)) {
+ set_modrm(0, ebp);
+ set_dispr(disp, rmode);
+ }
// [disp/r]
INLINE(explicit Operand(Immediate imm));
@@ -428,11 +405,21 @@ class Operand BASE_EMBEDDED {
private:
// Set the ModRM byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
- inline void set_modrm(int mod, Register rm);
+ inline void set_modrm(int mod, Register rm) {
+ DCHECK((mod & -4) == 0);
+ buf_[0] = mod << 6 | rm.code();
+ len_ = 1;
+ }
inline void set_sib(ScaleFactor scale, Register index, Register base);
inline void set_disp8(int8_t disp);
- inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
+ inline void set_dispr(int32_t disp, RelocInfo::Mode rmode) {
+ DCHECK(len_ == 1 || len_ == 2);
+ int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
+ *p = disp;
+ len_ += sizeof(int32_t);
+ rmode_ = rmode;
+ }
byte buf_[6];
// The number of bytes in buf_.
@@ -550,9 +537,7 @@ class Assembler : public AssemblerBase {
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code,
- Address target) {
- set_target_address_at(isolate, instruction_payload, code, target);
- }
+ Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
@@ -1490,20 +1475,11 @@ class Assembler : public AssemblerBase {
bmi1(0xf7, dst, src2, src1);
}
void blsi(Register dst, Register src) { blsi(dst, Operand(src)); }
- void blsi(Register dst, const Operand& src) {
- Register ireg = {3};
- bmi1(0xf3, ireg, dst, src);
- }
+ void blsi(Register dst, const Operand& src) { bmi1(0xf3, ebx, dst, src); }
void blsmsk(Register dst, Register src) { blsmsk(dst, Operand(src)); }
- void blsmsk(Register dst, const Operand& src) {
- Register ireg = {2};
- bmi1(0xf3, ireg, dst, src);
- }
+ void blsmsk(Register dst, const Operand& src) { bmi1(0xf3, edx, dst, src); }
void blsr(Register dst, Register src) { blsr(dst, Operand(src)); }
- void blsr(Register dst, const Operand& src) {
- Register ireg = {1};
- bmi1(0xf3, ireg, dst, src);
- }
+ void blsr(Register dst, const Operand& src) { bmi1(0xf3, ecx, dst, src); }
void tzcnt(Register dst, Register src) { tzcnt(dst, Operand(src)); }
void tzcnt(Register dst, const Operand& src);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index a257a057df..46c386b149 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -5,12 +5,14 @@
#if V8_TARGET_ARCH_IA32
#include "src/api-arguments.h"
+#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+#include "src/heap/heap-inl.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
@@ -43,8 +45,8 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// restore them.
__ pushad();
if (save_doubles()) {
- __ sub(esp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
+ __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
__ movsd(Operand(esp, i * kDoubleSize), reg);
}
@@ -59,11 +61,11 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
if (save_doubles()) {
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
__ movsd(reg, Operand(esp, i * kDoubleSize));
}
- __ add(esp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
}
__ popad();
__ ret(0);
@@ -108,31 +110,31 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
int double_offset = offset();
// Account for return address and saved regs if input is esp.
- if (input_reg.is(esp)) double_offset += 3 * kPointerSize;
+ if (input_reg == esp) double_offset += 3 * kPointerSize;
MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
MemOperand exponent_operand(MemOperand(input_reg,
double_offset + kDoubleSize / 2));
- Register scratch1;
+ Register scratch1 = no_reg;
{
Register scratch_candidates[3] = { ebx, edx, edi };
for (int i = 0; i < 3; i++) {
scratch1 = scratch_candidates[i];
- if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
+ if (final_result_reg != scratch1 && input_reg != scratch1) break;
}
}
// Since we must use ecx for shifts below, use some other register (eax)
// to calculate the result if ecx is the requested return register.
- Register result_reg = final_result_reg.is(ecx) ? eax : final_result_reg;
+ Register result_reg = final_result_reg == ecx ? eax : final_result_reg;
// Save ecx if it isn't the return register and therefore volatile, or if it
// is the return register, then save the temp register we use in its stead for
// the result.
- Register save_reg = final_result_reg.is(ecx) ? eax : ecx;
+ Register save_reg = final_result_reg == ecx ? eax : ecx;
__ push(scratch1);
__ push(save_reg);
- bool stash_exponent_copy = !input_reg.is(esp);
+ bool stash_exponent_copy = input_reg != esp;
__ mov(scratch1, mantissa_operand);
if (CpuFeatures::IsSupported(SSE3)) {
CpuFeatureScope scope(masm, SSE3);
@@ -212,8 +214,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ add(esp, Immediate(kDoubleSize / 2));
}
__ bind(&done_no_stash);
- if (!final_result_reg.is(result_reg)) {
- DCHECK(final_result_reg.is(ecx));
+ if (final_result_reg != result_reg) {
+ DCHECK(final_result_reg == ecx);
__ mov(final_result_reg, result_reg);
}
__ pop(save_reg);
@@ -296,7 +298,7 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(exponent.is(eax));
+ DCHECK(exponent == eax);
const Register scratch = ecx;
const XMMRegister double_result = xmm3;
const XMMRegister double_base = xmm2;
@@ -486,7 +488,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// result size is greater than can be returned in registers, also reserve
// space for the hidden argument for the result location, and space for the
// result itself.
- int arg_stack_space = result_size() < 3 ? 3 : 4 + result_size();
+ int arg_stack_space = 3;
// Enter the exit frame that transitions from JavaScript to C++.
if (argv_in_register()) {
@@ -516,35 +518,13 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ CheckStackAlignment();
}
// Call C function.
- if (result_size() <= 2) {
- __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
- __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
- } else {
- DCHECK_EQ(3, result_size());
- // Pass a pointer to the result location as the first argument.
- __ lea(eax, Operand(esp, 4 * kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), eax);
- __ mov(Operand(esp, 1 * kPointerSize), edi); // argc.
- __ mov(Operand(esp, 2 * kPointerSize), esi); // argv.
- __ mov(Operand(esp, 3 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
- }
+ __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
+ __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
+ __ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
__ call(ebx);
- if (result_size() > 2) {
- DCHECK_EQ(3, result_size());
-#ifndef _WIN32
- // Restore the "hidden" argument on the stack which was popped by caller.
- __ sub(esp, Immediate(kPointerSize));
-#endif
- // Read result values stored on stack. Result is stored above the arguments.
- __ mov(kReturnRegister0, Operand(esp, 4 * kPointerSize));
- __ mov(kReturnRegister1, Operand(esp, 5 * kPointerSize));
- __ mov(kReturnRegister2, Operand(esp, 6 * kPointerSize));
- }
- // Result is in eax, edx:eax or edi:edx:eax - do not destroy these registers!
+ // Result is in eax or edx:eax - do not destroy these registers!
// Check result for exception sentinel.
Label exception_returned;
@@ -672,9 +652,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ bind(&invoke);
__ PushStackHandler();
- // Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
-
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return. Notice that we cannot store a
// reference to the trampoline code directly in this stub, because the
@@ -997,6 +974,46 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
stub2.GetCode();
}
+RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
+ byte first_instruction = stub->instruction_start()[0];
+ byte second_instruction = stub->instruction_start()[2];
+
+ if (first_instruction == kTwoByteJumpInstruction) {
+ return INCREMENTAL;
+ }
+
+ DCHECK(first_instruction == kTwoByteNopInstruction);
+
+ if (second_instruction == kFiveByteJumpInstruction) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ DCHECK(second_instruction == kFiveByteNopInstruction);
+
+ return STORE_BUFFER_ONLY;
+}
+
+void RecordWriteStub::Patch(Code* stub, Mode mode) {
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ DCHECK(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ stub->instruction_start()[0] = kTwoByteNopInstruction;
+ stub->instruction_start()[2] = kFiveByteNopInstruction;
+ break;
+ case INCREMENTAL:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ stub->instruction_start()[0] = kTwoByteJumpInstruction;
+ break;
+ case INCREMENTAL_COMPACTION:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ stub->instruction_start()[0] = kTwoByteNopInstruction;
+ stub->instruction_start()[2] = kFiveByteJumpInstruction;
+ break;
+ }
+ DCHECK(GetMode(stub) == mode);
+ Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(), 7);
+}
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
@@ -1014,8 +1031,7 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
__ jmp(&skip_to_incremental_compacting, Label::kFar);
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ ret(0);
}
@@ -1055,8 +1071,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
mode);
InformIncrementalMarker(masm);
regs_.Restore(masm);
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
__ bind(&dont_need_remembered_set);
}
@@ -1088,6 +1103,9 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
+void RecordWriteStub::Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+}
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
@@ -1107,8 +1125,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ ret(0);
}
@@ -1152,8 +1169,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ ret(0);
}
@@ -1547,7 +1563,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
ExternalReference level_address =
ExternalReference::handle_scope_level_address(isolate);
- DCHECK(edx.is(function_address));
+ DCHECK(edx == function_address);
// Allocate HandleScope in callee-save registers.
__ mov(ebx, Operand::StaticVariable(next_address));
__ mov(edi, Operand::StaticVariable(limit_address));
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index 83b783f2d6..15e40600af 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -127,46 +127,9 @@ class RecordWriteStub: public PlatformCodeStub {
static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
- static Mode GetMode(Code* stub) {
- byte first_instruction = stub->instruction_start()[0];
- byte second_instruction = stub->instruction_start()[2];
+ static Mode GetMode(Code* stub);
- if (first_instruction == kTwoByteJumpInstruction) {
- return INCREMENTAL;
- }
-
- DCHECK(first_instruction == kTwoByteNopInstruction);
-
- if (second_instruction == kFiveByteJumpInstruction) {
- return INCREMENTAL_COMPACTION;
- }
-
- DCHECK(second_instruction == kFiveByteNopInstruction);
-
- return STORE_BUFFER_ONLY;
- }
-
- static void Patch(Code* stub, Mode mode) {
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteNopInstruction;
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteJumpInstruction;
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteJumpInstruction;
- break;
- }
- DCHECK(GetMode(stub) == mode);
- Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(), 7);
- }
+ static void Patch(Code* stub, Mode mode);
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
@@ -176,50 +139,48 @@ class RecordWriteStub: public PlatformCodeStub {
// that must be preserved and one scratch register provided by the caller.
class RegisterAllocation {
public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch0)
+ RegisterAllocation(Register object, Register address, Register scratch0)
: object_orig_(object),
address_orig_(address),
scratch0_orig_(scratch0),
object_(object),
address_(address),
- scratch0_(scratch0) {
+ scratch0_(scratch0),
+ scratch1_(no_reg) {
DCHECK(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
- if (scratch0.is(ecx)) {
+ if (scratch0 == ecx) {
scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
}
- if (object.is(ecx)) {
+ if (object == ecx) {
object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
}
- if (address.is(ecx)) {
+ if (address == ecx) {
address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
}
DCHECK(!AreAliased(scratch0_, object_, address_, ecx));
}
void Save(MacroAssembler* masm) {
- DCHECK(!address_orig_.is(object_));
- DCHECK(object_.is(object_orig_) || address_.is(address_orig_));
+ DCHECK(address_orig_ != object_);
+ DCHECK(object_ == object_orig_ || address_ == address_orig_);
DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
DCHECK(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
DCHECK(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
// We don't have to save scratch0_orig_ because it was given to us as
// a scratch register. But if we had to switch to a different reg then
// we should save the new scratch0_.
- if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
- if (!ecx.is(scratch0_orig_) &&
- !ecx.is(object_orig_) &&
- !ecx.is(address_orig_)) {
+ if (scratch0_ != scratch0_orig_) masm->push(scratch0_);
+ if (ecx != scratch0_orig_ && ecx != object_orig_ &&
+ ecx != address_orig_) {
masm->push(ecx);
}
masm->push(scratch1_);
- if (!address_.is(address_orig_)) {
+ if (address_ != address_orig_) {
masm->push(address_);
masm->mov(address_, address_orig_);
}
- if (!object_.is(object_orig_)) {
+ if (object_ != object_orig_) {
masm->push(object_);
masm->mov(object_, object_orig_);
}
@@ -229,21 +190,20 @@ class RecordWriteStub: public PlatformCodeStub {
// These will have been preserved the entire time, so we just need to move
// them back. Only in one case is the orig_ reg different from the plain
// one, since only one of them can alias with ecx.
- if (!object_.is(object_orig_)) {
+ if (object_ != object_orig_) {
masm->mov(object_orig_, object_);
masm->pop(object_);
}
- if (!address_.is(address_orig_)) {
+ if (address_ != address_orig_) {
masm->mov(address_orig_, address_);
masm->pop(address_);
}
masm->pop(scratch1_);
- if (!ecx.is(scratch0_orig_) &&
- !ecx.is(object_orig_) &&
- !ecx.is(address_orig_)) {
+ if (ecx != scratch0_orig_ && ecx != object_orig_ &&
+ ecx != address_orig_) {
masm->pop(ecx);
}
- if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
+ if (scratch0_ != scratch0_orig_) masm->pop(scratch0_);
}
// If we have to call into C then we need to save and restore all caller-
@@ -280,11 +240,10 @@ class RecordWriteStub: public PlatformCodeStub {
for (int i = 0; i < Register::kNumRegisters; i++) {
if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(i)) {
Register candidate = Register::from_code(i);
- if (candidate.is(ecx)) continue;
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
+ if (candidate != ecx && candidate != r1 && candidate != r2 &&
+ candidate != r3) {
+ return candidate;
+ }
}
}
UNREACHABLE();
@@ -307,9 +266,7 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- void Activate(Code* code) override {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
+ void Activate(Code* code) override;
Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_));
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 2ef216730d..66da29ebb0 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -7,6 +7,7 @@
#if V8_TARGET_ARCH_IA32
#include "src/codegen.h"
+#include "src/factory-inl.h"
#include "src/heap/heap.h"
#include "src/macro-assembler.h"
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 5a2481c77f..f2588a8e16 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -4,6 +4,7 @@
#if V8_TARGET_ARCH_IA32
+#include "src/assembler-inl.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
@@ -23,7 +24,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
- const int kDoubleRegsSize = kDoubleSize * XMMRegister::kMaxNumRegisters;
+ const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
__ sub(esp, Immediate(kDoubleRegsSize));
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
@@ -34,7 +35,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
STATIC_ASSERT(kFloatSize == kPointerSize);
- const int kFloatRegsSize = kFloatSize * XMMRegister::kMaxNumRegisters;
+ const int kFloatRegsSize = kFloatSize * XMMRegister::kNumRegisters;
__ sub(esp, Immediate(kFloatRegsSize));
for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
int code = config->GetAllocatableFloatCode(i);
@@ -95,7 +96,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int float_regs_offset = FrameDescription::float_registers_offset();
// Fill in the float input registers.
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
int dst_offset = i * kFloatSize + float_regs_offset;
__ pop(Operand(ebx, dst_offset));
}
@@ -183,12 +184,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ movsd(xmm_reg, Operand(ebx, src_offset));
}
- // Push state, pc, and continuation from the last output frame.
- __ push(Operand(ebx, FrameDescription::state_offset()));
+ // Push pc and continuation from the last output frame.
__ push(Operand(ebx, FrameDescription::pc_offset()));
__ push(Operand(ebx, FrameDescription::continuation_offset()));
-
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
@@ -216,6 +215,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&done);
}
+bool Deoptimizer::PadTopOfStackRegister() { return false; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
diff --git a/deps/v8/src/ia32/frame-constants-ia32.cc b/deps/v8/src/ia32/frame-constants-ia32.cc
index 090efcc532..9cf76604df 100644
--- a/deps/v8/src/ia32/frame-constants-ia32.cc
+++ b/deps/v8/src/ia32/frame-constants-ia32.cc
@@ -18,6 +18,10 @@ Register JavaScriptFrame::fp_register() { return ebp; }
Register JavaScriptFrame::context_register() { return esi; }
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
+int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
+ return register_count;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index d06a37b8ca..95c1dc4a5e 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -22,9 +22,15 @@ void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // TODO(albertnetymk): Use default for now; should call
- // RestrictAllocatableRegisters like src/x64/interface-descriptors-x64.cc
- DefaultInitializePlatformSpecific(data, kParameterCount);
+ static const Register default_stub_registers[] = {ebx, ecx, edx, edi,
+ kReturnRegister0};
+
+ data->RestrictAllocatableRegisters(default_stub_registers,
+ arraysize(default_stub_registers));
+
+ CHECK_LE(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
const Register FastNewFunctionContextDescriptor::FunctionRegister() {
@@ -84,27 +90,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void FastCloneRegExpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edi, eax, ecx, edx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {eax, ebx, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {eax, ebx, ecx, edx};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi};
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index def0296916..fe2fcffdd7 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -8,6 +8,7 @@
#include "src/base/division-by-constant.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
+#include "src/callable.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
@@ -15,6 +16,7 @@
#include "src/frames-inl.h"
#include "src/runtime/runtime.h"
+#include "src/ia32/assembler-ia32-inl.h"
#include "src/ia32/macro-assembler-ia32.h"
namespace v8 {
@@ -27,6 +29,15 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, size, create_code_object) {}
+TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
+ }
+}
+
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
Handle<Object> object = isolate()->heap()->root_handle(index);
@@ -90,54 +101,81 @@ void MacroAssembler::PushRoot(Heap::RootListIndex index) {
}
}
-#define REG(Name) \
- { Register::kCode_##Name }
+static constexpr Register saved_regs[] = {eax, ecx, edx};
+
+static constexpr int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
-static const Register saved_regs[] = {REG(eax), REG(ecx), REG(edx)};
+int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) const {
+ int bytes = 0;
+ for (int i = 0; i < kNumberOfSavedRegs; i++) {
+ Register reg = saved_regs[i];
+ if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
+ bytes += kPointerSize;
+ }
+ }
-#undef REG
+ if (fp_mode == kSaveFPRegs) {
+ // Count all XMM registers except XMM0.
+ bytes += kDoubleSize * (XMMRegister::kNumRegisters - 1);
+ }
-static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
+ return bytes;
+}
-void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1, Register exclusion2,
- Register exclusion3) {
+int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
+ int bytes = 0;
for (int i = 0; i < kNumberOfSavedRegs; i++) {
Register reg = saved_regs[i];
- if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+ if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
push(reg);
+ bytes += kPointerSize;
}
}
+
if (fp_mode == kSaveFPRegs) {
- sub(esp, Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
// Save all XMM registers except XMM0.
- for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
+ int delta = kDoubleSize * (XMMRegister::kNumRegisters - 1);
+ sub(esp, Immediate(delta));
+ for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
}
+ bytes += delta;
}
+
+ return bytes;
}
-void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
- Register exclusion2, Register exclusion3) {
+int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
if (fp_mode == kSaveFPRegs) {
// Restore all XMM registers except XMM0.
- for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
+ int delta = kDoubleSize * (XMMRegister::kNumRegisters - 1);
+ for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
}
- add(esp, Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
+ add(esp, Immediate(delta));
+ bytes += delta;
}
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
Register reg = saved_regs[i];
- if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+ if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
pop(reg);
+ bytes += kPointerSize;
}
}
+
+ return bytes;
}
void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc,
@@ -147,13 +185,9 @@ void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc,
condition_met, distance);
}
-
void MacroAssembler::RememberedSetHelper(
Register object, // Only used for debug checks.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- MacroAssembler::RememberedSetFinalAction and_then) {
+ Register addr, Register scratch, SaveFPRegsMode save_fp) {
Label done;
if (emit_debug_code()) {
Label ok;
@@ -174,23 +208,13 @@ void MacroAssembler::RememberedSetHelper(
// Call stub on end of buffer.
// Check for end of buffer.
test(scratch, Immediate(StoreBuffer::kStoreBufferMask));
- if (and_then == kReturnAtEnd) {
- Label buffer_overflowed;
- j(equal, &buffer_overflowed, Label::kNear);
- ret(0);
- bind(&buffer_overflowed);
- } else {
- DCHECK(and_then == kFallThroughAtEnd);
- j(not_equal, &done, Label::kNear);
- }
+ Label buffer_overflowed;
+ j(equal, &buffer_overflowed, Label::kNear);
+ ret(0);
+ bind(&buffer_overflowed);
StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
CallStub(&store_buffer_overflow);
- if (and_then == kReturnAtEnd) {
- ret(0);
- } else {
- DCHECK(and_then == kFallThroughAtEnd);
- bind(&done);
- }
+ ret(0);
}
void TurboAssembler::SlowTruncateToIDelayed(Zone* zone, Register result_reg,
@@ -204,7 +228,7 @@ void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
MinusZeroMode minus_zero_mode,
Label* lost_precision, Label* is_nan,
Label* minus_zero, Label::Distance dst) {
- DCHECK(!input_reg.is(scratch));
+ DCHECK(input_reg != scratch);
cvttsd2si(result_reg, Operand(input_reg));
Cvtsi2sd(scratch, Operand(result_reg));
ucomisd(scratch, input_reg);
@@ -236,16 +260,11 @@ void TurboAssembler::LoadUint32(XMMRegister dst, const Operand& src) {
bind(&done);
}
-
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
+void MacroAssembler::RecordWriteField(Register object, int offset,
+ Register value, Register dst,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -269,7 +288,7 @@ void MacroAssembler::RecordWriteField(
}
RecordWrite(object, dst, value, save_fp, remembered_set_action,
- OMIT_SMI_CHECK, pointers_to_here_check_for_value);
+ OMIT_SMI_CHECK);
bind(&done);
@@ -281,80 +300,71 @@ void MacroAssembler::RecordWriteField(
}
}
+void TurboAssembler::SaveRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ push(Register::from_code(i));
+ }
+ }
+}
-void MacroAssembler::RecordWriteForMap(
- Register object,
- Handle<Map> map,
- Register scratch1,
- Register scratch2,
- SaveFPRegsMode save_fp) {
- Label done;
-
- Register address = scratch1;
- Register value = scratch2;
- if (emit_debug_code()) {
- Label ok;
- lea(address, FieldOperand(object, HeapObject::kMapOffset));
- test_b(address, Immediate((1 << kPointerSizeLog2) - 1));
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
+void TurboAssembler::RestoreRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
+ if ((registers >> i) & 1u) {
+ pop(Register::from_code(i));
+ }
}
+}
- DCHECK(!object.is(value));
- DCHECK(!object.is(address));
- DCHECK(!value.is(address));
- AssertNotSmi(object);
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
+ // i.e. always emit remember set and save FP registers in RecordWriteStub. If
+ // large performance regression is observed, we should use these values to
+ // avoid unnecessary work.
- if (!FLAG_incremental_marking) {
- return;
- }
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
+ RegList registers = callable.descriptor().allocatable_registers();
- // Compute the address.
- lea(address, FieldOperand(object, HeapObject::kMapOffset));
+ SaveRegisters(registers);
- // A single check of the map's pages interesting flag suffices, since it is
- // only set during incremental collection, and then it's also guaranteed that
- // the from object's page's interesting flag is also set. This optimization
- // relies on the fact that maps can never be in new space.
- DCHECK(!isolate()->heap()->InNewSpace(*map));
- CheckPageFlagForMap(map,
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
+ Register object_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kObject));
+ Register slot_parameter(
+ callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register isolate_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kIsolate));
+ Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kFPMode));
- RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET,
- save_fp);
- CallStub(&stub);
+ push(object);
+ push(address);
- bind(&done);
+ pop(slot_parameter);
+ pop(object_parameter);
- // Count number of write barriers in generated code.
- isolate()->counters()->write_barriers_static()->Increment();
- IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+ mov(isolate_parameter,
+ Immediate(ExternalReference::isolate_address(isolate())));
+ Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
+ Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
+ Call(callable.code(), RelocInfo::CODE_TARGET);
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
- mov(scratch1, Immediate(bit_cast<int32_t>(kZapValue)));
- mov(scratch2, Immediate(bit_cast<int32_t>(kZapValue)));
- }
+ RestoreRegisters(registers);
}
-
-void MacroAssembler::RecordWrite(
- Register object,
- Register address,
- Register value,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
- DCHECK(!object.is(value));
- DCHECK(!object.is(address));
- DCHECK(!value.is(address));
+void MacroAssembler::RecordWrite(Register object, Register address,
+ Register value, SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ DCHECK(object != value);
+ DCHECK(object != address);
+ DCHECK(value != address);
AssertNotSmi(object);
if (remembered_set_action == OMIT_REMEMBERED_SET &&
@@ -379,14 +389,10 @@ void MacroAssembler::RecordWrite(
JumpIfSmi(value, &done, Label::kNear);
}
- if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
- }
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
+ Label::kNear);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -394,9 +400,13 @@ void MacroAssembler::RecordWrite(
&done,
Label::kNear);
+#ifdef V8_CSA_WRITE_BARRIER
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+#else
RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
fp_mode);
CallStub(&stub);
+#endif
bind(&done);
@@ -523,23 +533,6 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
-void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
- cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
-
- CompareMap(obj, map);
- j(not_equal, fail);
-}
-
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
@@ -716,11 +709,10 @@ void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type) {
void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
- argc * kPointerSize;
+ int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
sub(esp, Immediate(space));
const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
}
@@ -763,7 +755,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
// Optionally restore all XMM registers.
if (save_doubles) {
const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
}
@@ -838,143 +830,6 @@ void MacroAssembler::PopStackHandler() {
}
-void MacroAssembler::LoadAllocationTopHelper(Register result,
- Register scratch,
- AllocationFlags flags) {
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- // Just return if allocation top is already known.
- if ((flags & RESULT_CONTAINS_TOP) != 0) {
- // No use of scratch if allocation top is provided.
- DCHECK(scratch.is(no_reg));
-#ifdef DEBUG
- // Assert that result actually contains top on entry.
- cmp(result, Operand::StaticVariable(allocation_top));
- Check(equal, kUnexpectedAllocationTop);
-#endif
- return;
- }
-
- // Move address of new object to result. Use scratch register if available.
- if (scratch.is(no_reg)) {
- mov(result, Operand::StaticVariable(allocation_top));
- } else {
- mov(scratch, Immediate(allocation_top));
- mov(result, Operand(scratch, 0));
- }
-}
-
-
-void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
- Register scratch,
- AllocationFlags flags) {
- if (emit_debug_code()) {
- test(result_end, Immediate(kObjectAlignmentMask));
- Check(zero, kUnalignedAllocationInNewSpace);
- }
-
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- // Update new top. Use scratch if available.
- if (scratch.is(no_reg)) {
- mov(Operand::StaticVariable(allocation_top), result_end);
- } else {
- mov(Operand(scratch, 0), result_end);
- }
-}
-
-
-void MacroAssembler::Allocate(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- DCHECK(object_size <= kMaxRegularHeapObjectSize);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Immediate(0x7091));
- if (result_end.is_valid()) {
- mov(result_end, Immediate(0x7191));
- }
- if (scratch.is_valid()) {
- mov(scratch, Immediate(0x7291));
- }
- }
- jmp(gc_required);
- return;
- }
- DCHECK(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- Label aligned;
- test(result, Immediate(kDoubleAlignmentMask));
- j(zero, &aligned, Label::kNear);
- if ((flags & PRETENURE) != 0) {
- cmp(result, Operand::StaticVariable(allocation_limit));
- j(above_equal, gc_required);
- }
- mov(Operand(result, 0),
- Immediate(isolate()->factory()->one_pointer_filler_map()));
- add(result, Immediate(kDoubleSize / 2));
- bind(&aligned);
- }
-
- // Calculate new top and bail out if space is exhausted.
- Register top_reg = result_end.is_valid() ? result_end : result;
-
- if (!top_reg.is(result)) {
- mov(top_reg, result);
- }
- add(top_reg, Immediate(object_size));
- cmp(top_reg, Operand::StaticVariable(allocation_limit));
- j(above, gc_required);
-
- UpdateAllocationTopHelper(top_reg, scratch, flags);
-
- if (top_reg.is(result)) {
- sub(result, Immediate(object_size - kHeapObjectTag));
- } else {
- // Tag the result.
- DCHECK(kHeapObjectTag == 1);
- inc(result);
- }
-}
-
-void MacroAssembler::AllocateJSValue(Register result, Register constructor,
- Register value, Register scratch,
- Label* gc_required) {
- DCHECK(!result.is(constructor));
- DCHECK(!result.is(scratch));
- DCHECK(!result.is(value));
-
- // Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch, no_reg, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Initialize the JSValue.
- LoadGlobalFunctionInitialMap(constructor, scratch);
- mov(FieldOperand(result, HeapObject::kMapOffset), scratch);
- LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- mov(FieldOperand(result, JSObject::kPropertiesOrHashOffset), scratch);
- mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
- mov(FieldOperand(result, JSValue::kValueOffset), value);
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-}
-
void MacroAssembler::GetMapConstructor(Register result, Register map,
Register temp) {
Label done, loop;
@@ -1184,14 +1039,14 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
mov(eax, actual.immediate());
cmp(expected.reg(), actual.immediate());
j(equal, &invoke);
- DCHECK(expected.reg().is(ebx));
- } else if (!expected.reg().is(actual.reg())) {
+ DCHECK(expected.reg() == ebx);
+ } else if (expected.reg() != actual.reg()) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
cmp(expected.reg(), actual.reg());
j(equal, &invoke);
- DCHECK(actual.reg().is(eax));
- DCHECK(expected.reg().is(ebx));
+ DCHECK(actual.reg() == eax);
+ DCHECK(expected.reg() == ebx);
} else {
definitely_matches = true;
Move(eax, actual.reg());
@@ -1259,8 +1114,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
InvokeFlag flag) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- DCHECK(function.is(edi));
- DCHECK_IMPLIES(new_target.is_valid(), new_target.is(edx));
+ DCHECK(function == edi);
+ DCHECK_IMPLIES(new_target.is_valid(), new_target == edx);
// On function call, call into the debugger if necessary.
CheckDebugHook(function, new_target, expected, actual);
@@ -1296,7 +1151,7 @@ void MacroAssembler::InvokeFunction(Register fun, Register new_target,
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- DCHECK(fun.is(edi));
+ DCHECK(fun == edi);
mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -1312,7 +1167,7 @@ void MacroAssembler::InvokeFunction(Register fun,
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- DCHECK(fun.is(edi));
+ DCHECK(fun == edi);
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
InvokeFunctionCode(edi, no_reg, expected, actual, flag);
@@ -1338,20 +1193,6 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
mov(function, ContextOperand(function, index));
}
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map) {
- // Load the initial map. The global functions all have initial maps.
- mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
- jmp(&ok);
- bind(&fail);
- Abort(kGlobalFunctionsMustHaveInitialMap);
- bind(&ok);
- }
-}
-
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the lowest encoding,
// which means that lowest encodings are furthest away from
@@ -1393,7 +1234,7 @@ void MacroAssembler::Drop(int stack_elements) {
}
void TurboAssembler::Move(Register dst, Register src) {
- if (!dst.is(src)) {
+ if (dst != src) {
mov(dst, src);
}
}
@@ -1637,13 +1478,6 @@ void TurboAssembler::Popcnt(Register dst, const Operand& src) {
}
-void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
- }
-}
-
-
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
@@ -1670,35 +1504,6 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
}
-void MacroAssembler::IncrementCounter(Condition cc,
- StatsCounter* counter,
- int value) {
- DCHECK(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Label skip;
- j(NegateCondition(cc), &skip);
- pushfd();
- IncrementCounter(counter, value);
- popfd();
- bind(&skip);
- }
-}
-
-
-void MacroAssembler::DecrementCounter(Condition cc,
- StatsCounter* counter,
- int value) {
- DCHECK(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Label skip;
- j(NegateCondition(cc), &skip);
- pushfd();
- DecrementCounter(counter, value);
- popfd();
- bind(&skip);
- }
-}
-
void TurboAssembler::Assert(Condition cc, BailoutReason reason) {
if (emit_debug_code()) Check(cc, reason);
}
@@ -1775,39 +1580,6 @@ void MacroAssembler::LoadAccessor(Register dst, Register holder,
mov(dst, FieldOperand(dst, offset));
}
-void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Check that both objects are not smis.
- STATIC_ASSERT(kSmiTag == 0);
- mov(scratch1, object1);
- and_(scratch1, object2);
- JumpIfSmi(scratch1, failure);
-
- // Load instance type for both strings.
- mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
- mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
- movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
- movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
- // Check that both are flat one-byte strings.
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- // Interleave bits from both instance types and compare them in one check.
- const int kShift = 8;
- DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << kShift));
- and_(scratch1, kFlatOneByteStringMask);
- and_(scratch2, kFlatOneByteStringMask);
- shl(scratch2, kShift);
- or_(scratch1, scratch2);
- cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << kShift));
- j(not_equal, failure);
-}
-
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
Label* not_unique_name,
Label::Distance distance) {
@@ -1913,7 +1685,7 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met,
Label::Distance condition_met_distance) {
DCHECK(cc == zero || cc == not_zero);
- if (scratch.is(object)) {
+ if (scratch == object) {
and_(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
mov(scratch, Immediate(~Page::kPageAlignmentMask));
@@ -1927,30 +1699,6 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
j(cc, condition_met, condition_met_distance);
}
-
-void MacroAssembler::CheckPageFlagForMap(
- Handle<Map> map,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
- DCHECK(cc == zero || cc == not_zero);
- Page* page = Page::FromAddress(map->address());
- DCHECK(!serializer_enabled()); // Serializer cannot match page_flags.
- ExternalReference reference(ExternalReference::page_flags(page));
- // The inlined static address check of the page's flags relies
- // on maps never being compacted.
- DCHECK(!isolate()->heap()->mark_compact_collector()->
- IsOnEvacuationCandidate(*map));
- if (mask < (1 << kBitsPerByte)) {
- test_b(Operand::StaticVariable(reference), Immediate(mask));
- } else {
- test(Operand::StaticVariable(reference), Immediate(mask));
- }
- j(cc, condition_met, condition_met_distance);
-}
-
-
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 639bd5d3a0..745055ecda 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -8,25 +8,26 @@
#include "src/assembler.h"
#include "src/bailout-reason.h"
#include "src/globals.h"
+#include "src/ia32/assembler-ia32.h"
namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {Register::kCode_eax};
-const Register kReturnRegister1 = {Register::kCode_edx};
-const Register kReturnRegister2 = {Register::kCode_edi};
-const Register kJSFunctionRegister = {Register::kCode_edi};
-const Register kContextRegister = {Register::kCode_esi};
-const Register kAllocateSizeRegister = {Register::kCode_edx};
-const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
-const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
-const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};
-const Register kInterpreterDispatchTableRegister = {Register::kCode_esi};
-const Register kJavaScriptCallArgCountRegister = {Register::kCode_eax};
-const Register kJavaScriptCallNewTargetRegister = {Register::kCode_edx};
-const Register kRuntimeCallFunctionRegister = {Register::kCode_ebx};
-const Register kRuntimeCallArgCountRegister = {Register::kCode_eax};
+constexpr Register kReturnRegister0 = eax;
+constexpr Register kReturnRegister1 = edx;
+constexpr Register kReturnRegister2 = edi;
+constexpr Register kJSFunctionRegister = edi;
+constexpr Register kContextRegister = esi;
+constexpr Register kAllocateSizeRegister = edx;
+constexpr Register kInterpreterAccumulatorRegister = eax;
+constexpr Register kInterpreterBytecodeOffsetRegister = ecx;
+constexpr Register kInterpreterBytecodeArrayRegister = edi;
+constexpr Register kInterpreterDispatchTableRegister = esi;
+constexpr Register kJavaScriptCallArgCountRegister = eax;
+constexpr Register kJavaScriptCallNewTargetRegister = edx;
+constexpr Register kRuntimeCallFunctionRegister = ebx;
+constexpr Register kRuntimeCallArgCountRegister = eax;
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
@@ -34,10 +35,6 @@ typedef Operand MemOperand;
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum PointersToHereCheck {
- kPointersToHereMaybeInteresting,
- kPointersToHereAreAlwaysInteresting
-};
enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
@@ -53,13 +50,7 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
class TurboAssembler : public Assembler {
public:
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ =
- Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
- }
- }
+ CodeObjectRequired create_code_object);
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() const { return has_frame_; }
@@ -296,15 +287,35 @@ class TurboAssembler : public Assembler {
void Push(Handle<HeapObject> handle) { push(Immediate(handle)); }
void Push(Smi* smi) { Push(Immediate(smi)); }
- // These functions do not arrange the registers in any particular order so
- // they are not useful for calls that can cause a GC. The caller can
- // exclude up to 3 registers that do not need to be saved and restored.
- void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
- void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ void SaveRegisters(RegList registers);
+ void RestoreRegisters(RegList registers);
+
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode);
+
+ // Calculate how much stack space (in bytes) are required to store caller
+ // registers excluding those specified in the arguments.
+ int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg) const;
+
+ // PushCallerSaved and PopCallerSaved do not arrange the registers in any
+ // particular order so they are not useful for calls that can cause a GC.
+ // The caller can exclude up to 3 registers that do not need to be saved and
+ // restored.
+
+ // Push caller saved registers on the stack, and return the number of bytes
+ // stack pointer is adjusted.
+ int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
+ // Restore caller saved registers from the stack, and return the number of
+ // bytes stack pointer is adjusted.
+ int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
private:
bool has_frame_ = false;
@@ -367,19 +378,12 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// GC Support
- enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
-
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
Register addr, Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
-
- void CheckPageFlagForMap(
- Handle<Map> map, int mask, Condition cc, Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
+ SaveFPRegsMode save_fp);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
@@ -418,23 +422,7 @@ class MacroAssembler : public TurboAssembler {
Register object, int offset, Register value, Register scratch,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
-
- // As above, but the offset has the tag presubtracted. For use with
- // Operand(reg, off).
- void RecordWriteContextSlot(
- Register context, int offset, Register value, Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting) {
- RecordWriteField(context, offset + kHeapObjectTag, value, scratch, save_fp,
- remembered_set_action, smi_check,
- pointers_to_here_check_for_value);
- }
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// Notify the garbage collector that we wrote a pointer into a fixed array.
// |array| is the array being stored into, |value| is the
@@ -445,9 +433,7 @@ class MacroAssembler : public TurboAssembler {
void RecordWriteArray(
Register array, Register value, Register index, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// For page containing |object| mark region covering |address|
// dirty. |object| is the object being stored into, |value| is the
@@ -457,15 +443,7 @@ class MacroAssembler : public TurboAssembler {
void RecordWrite(
Register object, Register address, Register value, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
-
- // For page containing |object| mark the region covering the object's map
- // dirty. |object| is the object being stored into, |map| is the Map object
- // that was stored.
- void RecordWriteForMap(Register object, Handle<Map> map, Register scratch1,
- Register scratch2, SaveFPRegsMode save_fp);
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// Frame restart support
void MaybeDropFrames();
@@ -493,23 +471,10 @@ class MacroAssembler : public TurboAssembler {
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
- // Load the initial map from the global function. The registers
- // function and map can be the same.
- void LoadGlobalFunctionInitialMap(Register function, Register map);
-
// Push and pop the registers that can hold pointers.
void PushSafepointRegisters() { pushad(); }
void PopSafepointRegisters() { popad(); }
- void CmpObject(Register reg, Handle<Object> object) {
- AllowDeferredHandleDereference heap_object_check;
- if (object->IsHeapObject()) {
- cmp(reg, Handle<HeapObject>::cast(object));
- } else {
- cmp(reg, Immediate(Smi::cast(*object)));
- }
- }
-
void GetWeakValue(Register value, Handle<WeakCell> cell);
// Load the value of the weak cell in the value register. Branch to the given
@@ -550,16 +515,6 @@ class MacroAssembler : public TurboAssembler {
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
- // Compare an object's map with the specified map.
- void CompareMap(Register obj, Handle<Map> map);
-
- // Check if the map of an object is equal to a specified map and branch to
- // label if not. Skip the smi check if not required (object is known to be a
- // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj, Handle<Map> map, Label* fail,
- SmiCheckType smi_check_type);
-
void DoubleToI(Register result_reg, XMMRegister input_reg,
XMMRegister scratch, MinusZeroMode minus_zero_mode,
Label* lost_precision, Label* is_nan, Label* minus_zero,
@@ -641,27 +596,6 @@ class MacroAssembler : public TurboAssembler {
void PopStackHandler();
// ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space or old space. If the given space
- // is exhausted control continues at the gc_required label. The allocated
- // object is returned in result and end of the new object is returned in
- // result_end. The register scratch can be passed as no_reg in which case
- // an additional object reference will be added to the reloc info. The
- // returned pointers in result and result_end have not yet been tagged as
- // heap objects. If result_contains_top_on_entry is true the content of
- // result is known to be the allocation top on entry (could be result_end
- // from a previous call). If result_contains_top_on_entry is true scratch
- // should be no_reg as it is never used.
- void Allocate(int object_size, Register result, Register result_end,
- Register scratch, Label* gc_required, AllocationFlags flags);
-
- // Allocate and initialize a JSValue wrapper with the specified {constructor}
- // and {value}.
- void AllocateJSValue(Register result, Register constructor, Register value,
- Register scratch, Label* gc_required);
-
- // ---------------------------------------------------------------------------
// Support functions.
// Machine code version of Map::GetConstructor().
@@ -735,21 +669,12 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// StatsCounter support
- void SetCounter(StatsCounter* counter, int value);
void IncrementCounter(StatsCounter* counter, int value);
void DecrementCounter(StatsCounter* counter, int value);
- void IncrementCounter(Condition cc, StatsCounter* counter, int value);
- void DecrementCounter(Condition cc, StatsCounter* counter, int value);
// ---------------------------------------------------------------------------
// String utilities.
- // Checks if both objects are sequential one-byte strings, and jumps to label
- // if either is not.
- void JumpIfNotBothSequentialOneByteStrings(
- Register object1, Register object2, Register scratch1, Register scratch2,
- Label* on_not_flat_one_byte_strings);
-
// Checks if the given register or operand is a unique name
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
Label::Distance distance = Label::kFar) {
@@ -778,13 +703,6 @@ class MacroAssembler : public TurboAssembler {
void LeaveExitFrameEpilogue(bool restore_context);
- // Allocation support helpers.
- void LoadAllocationTopHelper(Register result, Register scratch,
- AllocationFlags flags);
-
- void UpdateAllocationTopHelper(Register result_end, Register scratch,
- AllocationFlags flags);
-
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object, Register scratch, Condition cc,
Label* condition_met,
diff --git a/deps/v8/src/ic/access-compiler-data.h b/deps/v8/src/ic/access-compiler-data.h
index dffcac7d05..28bdfd378b 100644
--- a/deps/v8/src/ic/access-compiler-data.h
+++ b/deps/v8/src/ic/access-compiler-data.h
@@ -20,11 +20,13 @@ class AccessCompilerData {
bool IsInitialized() const { return load_calling_convention_ != nullptr; }
void Initialize(int load_register_count, const Register* load_registers,
int store_register_count, const Register* store_registers) {
- load_calling_convention_.reset(NewArray<Register>(load_register_count));
+ load_calling_convention_.reset(
+ NewArray<Register>(load_register_count, no_reg));
for (int i = 0; i < load_register_count; ++i) {
load_calling_convention_[i] = load_registers[i];
}
- store_calling_convention_.reset(NewArray<Register>(store_register_count));
+ store_calling_convention_.reset(
+ NewArray<Register>(store_register_count, no_reg));
for (int i = 0; i < store_register_count; ++i) {
store_calling_convention_[i] = store_registers[i];
}
diff --git a/deps/v8/src/ic/access-compiler.cc b/deps/v8/src/ic/access-compiler.cc
index d06028030c..f338619d5e 100644
--- a/deps/v8/src/ic/access-compiler.cc
+++ b/deps/v8/src/ic/access-compiler.cc
@@ -16,33 +16,41 @@ void PropertyAccessCompiler::TailCallBuiltin(MacroAssembler* masm,
}
Register* PropertyAccessCompiler::GetCallingConvention(Isolate* isolate,
- Code::Kind kind) {
+ Type type) {
AccessCompilerData* data = isolate->access_compiler_data();
if (!data->IsInitialized()) {
InitializePlatformSpecific(data);
}
- if (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC) {
- return data->load_calling_convention();
+ switch (type) {
+ case LOAD:
+ return data->load_calling_convention();
+ case STORE:
+ return data->store_calling_convention();
}
- DCHECK(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
+ UNREACHABLE();
return data->store_calling_convention();
}
Register PropertyAccessCompiler::slot() const {
- if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
- return LoadDescriptor::SlotRegister();
+ switch (type_) {
+ case LOAD:
+ return LoadDescriptor::SlotRegister();
+ case STORE:
+ return StoreWithVectorDescriptor::SlotRegister();
}
- DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
+ UNREACHABLE();
return StoreWithVectorDescriptor::SlotRegister();
}
-
Register PropertyAccessCompiler::vector() const {
- if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
- return LoadWithVectorDescriptor::VectorRegister();
+ switch (type_) {
+ case LOAD:
+ return LoadWithVectorDescriptor::VectorRegister();
+ case STORE:
+ return StoreWithVectorDescriptor::VectorRegister();
}
- DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
+ UNREACHABLE();
return StoreWithVectorDescriptor::VectorRegister();
}
} // namespace internal
diff --git a/deps/v8/src/ic/access-compiler.h b/deps/v8/src/ic/access-compiler.h
index b91aa43aea..d6ddd54a7f 100644
--- a/deps/v8/src/ic/access-compiler.h
+++ b/deps/v8/src/ic/access-compiler.h
@@ -15,35 +15,22 @@ namespace internal {
class PropertyAccessCompiler BASE_EMBEDDED {
public:
- static Builtins::Name MissBuiltin(Code::Kind kind) {
- switch (kind) {
- case Code::LOAD_IC:
- return Builtins::kLoadIC_Miss;
- case Code::STORE_IC:
- return Builtins::kStoreIC_Miss;
- case Code::KEYED_LOAD_IC:
- return Builtins::kKeyedLoadIC_Miss;
- case Code::KEYED_STORE_IC:
- return Builtins::kKeyedStoreIC_Miss;
- default:
- UNREACHABLE();
- }
- return Builtins::kLoadIC_Miss;
- }
+ enum Type { LOAD, STORE };
static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name);
protected:
- PropertyAccessCompiler(Isolate* isolate, Code::Kind kind)
- : registers_(GetCallingConvention(isolate, kind)),
- kind_(kind),
+ PropertyAccessCompiler(Isolate* isolate, Type type)
+ : registers_(GetCallingConvention(isolate, type)),
+ type_(type),
isolate_(isolate),
masm_(isolate, NULL, 256, CodeObjectRequired::kYes) {
// TODO(yangguo): remove this once we can serialize IC stubs.
masm_.enable_serializer();
}
- Code::Kind kind() const { return kind_; }
+ Type type() const { return type_; }
+
MacroAssembler* masm() { return &masm_; }
Isolate* isolate() const { return isolate_; }
Factory* factory() const { return isolate()->factory(); }
@@ -60,10 +47,10 @@ class PropertyAccessCompiler BASE_EMBEDDED {
static void GenerateTailCall(MacroAssembler* masm, Handle<Code> code);
private:
- static Register* GetCallingConvention(Isolate* isolate, Code::Kind kind);
+ static Register* GetCallingConvention(Isolate* isolate, Type type);
static void InitializePlatformSpecific(AccessCompilerData* data);
- Code::Kind kind_;
+ Type type_;
Isolate* isolate_;
MacroAssembler masm_;
// Ensure that MacroAssembler has a reasonable size.
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 1dee130336..2472febd03 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -246,12 +246,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
{
Comment("convert hole");
GotoIfNot(IsSetWord<LoadHandler::ConvertHoleBits>(handler_word), miss);
- Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
- DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
- GotoIfNot(
- WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Isolate::kProtectorValid)),
- miss);
+ GotoIf(IsArrayProtectorCellInvalid(), miss);
exit_point->Return(UndefinedConstant());
}
@@ -261,8 +256,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
Label constant(this), field(this), normal(this, Label::kDeferred),
interceptor(this, Label::kDeferred), nonexistent(this),
- accessor(this, Label::kDeferred), proxy(this, Label::kDeferred),
- global(this, Label::kDeferred), module_export(this, Label::kDeferred);
+ accessor(this, Label::kDeferred), global(this, Label::kDeferred),
+ module_export(this, Label::kDeferred), proxy(this, Label::kDeferred);
GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kField)), &field);
GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kConstant)),
@@ -370,9 +365,35 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
BIND(&proxy);
{
- exit_point->ReturnCallStub(
- Builtins::CallableFor(isolate(), Builtins::kProxyGetProperty),
- p->context, holder, p->name, p->receiver);
+ VARIABLE(var_index, MachineType::PointerRepresentation());
+ VARIABLE(var_unique, MachineRepresentation::kTagged);
+
+ Label if_index(this), if_unique_name(this),
+ to_name_failed(this, Label::kDeferred);
+
+ if (support_elements == kSupportElements) {
+ TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique,
+ &to_name_failed);
+
+ BIND(&if_unique_name);
+ exit_point->ReturnCallStub(
+ Builtins::CallableFor(isolate(), Builtins::kProxyGetProperty),
+ p->context, holder, var_unique.value(), p->receiver);
+
+ BIND(&if_index);
+ // TODO(mslekova): introduce TryToName that doesn't try to compute
+ // the intptr index value
+ Goto(&to_name_failed);
+
+ BIND(&to_name_failed);
+ exit_point->ReturnCallRuntime(Runtime::kGetPropertyWithReceiver,
+ p->context, holder, p->name, p->receiver);
+
+ } else {
+ exit_point->ReturnCallStub(
+ Builtins::CallableFor(isolate(), Builtins::kProxyGetProperty),
+ p->context, holder, p->name, p->receiver);
+ }
}
BIND(&global);
@@ -516,43 +537,51 @@ void AccessorAssembler::HandleLoadICProtoHandlerCase(
}
}
+void AccessorAssembler::EmitAccessCheck(Node* expected_native_context,
+ Node* context, Node* receiver,
+ Label* can_access, Label* miss) {
+ CSA_ASSERT(this, IsNativeContext(expected_native_context));
+
+ Node* native_context = LoadNativeContext(context);
+ GotoIf(WordEqual(expected_native_context, native_context), can_access);
+ // If the receiver is not a JSGlobalProxy then we miss.
+ GotoIfNot(IsJSGlobalProxy(receiver), miss);
+ // For JSGlobalProxy receiver try to compare security tokens of current
+ // and expected native contexts.
+ Node* expected_token = LoadContextElement(expected_native_context,
+ Context::SECURITY_TOKEN_INDEX);
+ Node* current_token =
+ LoadContextElement(native_context, Context::SECURITY_TOKEN_INDEX);
+ Branch(WordEqual(expected_token, current_token), can_access, miss);
+}
+
Node* AccessorAssembler::EmitLoadICProtoArrayCheck(const LoadICParameters* p,
Node* handler,
Node* handler_length,
Node* handler_flags,
Label* miss) {
- VARIABLE(start_index, MachineType::PointerRepresentation());
- start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex));
+ VARIABLE(var_start_index, MachineType::PointerRepresentation(),
+ IntPtrConstant(LoadHandler::kFirstPrototypeIndex));
Label can_access(this);
GotoIfNot(IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_flags),
&can_access);
{
// Skip this entry of a handler.
- start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex + 1));
+ var_start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex + 1));
int offset =
FixedArray::OffsetOfElementAt(LoadHandler::kFirstPrototypeIndex);
Node* expected_native_context =
LoadWeakCellValue(LoadObjectField(handler, offset), miss);
- CSA_ASSERT(this, IsNativeContext(expected_native_context));
-
- Node* native_context = LoadNativeContext(p->context);
- GotoIf(WordEqual(expected_native_context, native_context), &can_access);
- // If the receiver is not a JSGlobalProxy then we miss.
- GotoIfNot(IsJSGlobalProxy(p->receiver), miss);
- // For JSGlobalProxy receiver try to compare security tokens of current
- // and expected native contexts.
- Node* expected_token = LoadContextElement(expected_native_context,
- Context::SECURITY_TOKEN_INDEX);
- Node* current_token =
- LoadContextElement(native_context, Context::SECURITY_TOKEN_INDEX);
- Branch(WordEqual(expected_token, current_token), &can_access, miss);
+
+ EmitAccessCheck(expected_native_context, p->context, p->receiver,
+ &can_access, miss);
}
BIND(&can_access);
- BuildFastLoop(start_index.value(), handler_length,
- [this, p, handler, miss](Node* current) {
+ BuildFastLoop(var_start_index.value(), handler_length,
+ [=](Node* current) {
Node* prototype_cell =
LoadFixedArrayElement(handler, current);
CheckPrototype(prototype_cell, p->name, miss);
@@ -626,11 +655,21 @@ void AccessorAssembler::HandleStoreICHandlerCase(
Node* holder = p->receiver;
Node* handler_word = SmiUntag(handler);
- Label if_fast_smi(this), slow(this);
- GotoIfNot(
- WordEqual(handler_word, IntPtrConstant(StoreHandler::kStoreNormal)),
- &if_fast_smi);
+ Label if_fast_smi(this), if_proxy(this);
+
+ STATIC_ASSERT(StoreHandler::kStoreGlobalProxy + 1 ==
+ StoreHandler::kStoreNormal);
+ STATIC_ASSERT(StoreHandler::kStoreNormal + 1 == StoreHandler::kProxy);
+ STATIC_ASSERT(StoreHandler::kProxy + 1 == StoreHandler::kKindsNumber);
+ Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+ GotoIf(IntPtrLessThan(handler_kind,
+ IntPtrConstant(StoreHandler::kStoreGlobalProxy)),
+ &if_fast_smi);
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kProxy)),
+ &if_proxy);
+ CSA_ASSERT(this, WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kStoreNormal)));
Node* properties = LoadSlowProperties(holder);
VARIABLE(var_name_index, MachineType::PointerRepresentation());
@@ -655,6 +694,9 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&if_fast_smi);
// Handle non-transitioning field stores.
HandleStoreICSmiHandlerCase(handler_word, holder, p->value, nullptr, miss);
+
+ BIND(&if_proxy);
+ HandleStoreToProxy(p, holder, miss, support_elements);
}
BIND(&if_nonsmi_handler);
@@ -669,11 +711,11 @@ void AccessorAssembler::HandleStoreICHandlerCase(
if (support_elements == kSupportElements) {
BIND(&if_element_handler);
- { HandleStoreICElementHandlerCase(p, handler, miss); }
+ HandleStoreICElementHandlerCase(p, handler, miss);
}
BIND(&if_proto_handler);
- { HandleStoreICProtoHandler(p, handler, miss, support_elements); }
+ HandleStoreICProtoHandler(p, handler, miss, support_elements);
// |handler| is a heap object. Must be code, call it.
BIND(&call_handler);
@@ -685,62 +727,11 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&store_global);
{
+ // Load value or miss if the {handler} weak cell is cleared.
Node* cell = LoadWeakCellValue(handler, miss);
- CSA_ASSERT(this, IsPropertyCell(cell));
- // Load the payload of the global parameter cell. A hole indicates that
- // the cell has been invalidated and that the store must be handled by the
- // runtime.
- Node* cell_contents = LoadObjectField(cell, PropertyCell::kValueOffset);
- Node* details =
- LoadAndUntagToWord32ObjectField(cell, PropertyCell::kDetailsOffset);
- Node* type = DecodeWord32<PropertyDetails::PropertyCellTypeField>(details);
-
- Label constant(this), store(this), not_smi(this);
-
- GotoIf(
- Word32Equal(
- type, Int32Constant(static_cast<int>(PropertyCellType::kConstant))),
- &constant);
-
- GotoIf(IsTheHole(cell_contents), miss);
-
- GotoIf(
- Word32Equal(
- type, Int32Constant(static_cast<int>(PropertyCellType::kMutable))),
- &store);
- CSA_ASSERT(this,
- Word32Or(Word32Equal(type,
- Int32Constant(static_cast<int>(
- PropertyCellType::kConstantType))),
- Word32Equal(type,
- Int32Constant(static_cast<int>(
- PropertyCellType::kUndefined)))));
-
- GotoIfNot(TaggedIsSmi(cell_contents), &not_smi);
- GotoIfNot(TaggedIsSmi(p->value), miss);
- Goto(&store);
-
- BIND(&not_smi);
- {
- GotoIf(TaggedIsSmi(p->value), miss);
- Node* expected_map = LoadMap(cell_contents);
- Node* map = LoadMap(p->value);
- GotoIfNot(WordEqual(expected_map, map), miss);
- Goto(&store);
- }
-
- BIND(&store);
- {
- StoreObjectField(cell, PropertyCell::kValueOffset, p->value);
- Return(p->value);
- }
-
- BIND(&constant);
- {
- GotoIfNot(WordEqual(cell_contents, p->value), miss);
- Return(p->value);
- }
+ ExitPoint direct_exit(this);
+ StoreGlobalIC_PropertyCellCase(cell, p->value, &direct_exit, miss);
}
}
@@ -763,9 +754,11 @@ void AccessorAssembler::HandleStoreICElementHandlerCase(
void AccessorAssembler::HandleStoreICProtoHandler(
const StoreICParameters* p, Node* handler, Label* miss,
ElementSupport support_elements) {
+ Comment("HandleStoreICProtoHandler");
+
// IC dispatchers rely on these assumptions to be held.
STATIC_ASSERT(FixedArray::kLengthOffset ==
- StoreHandler::kTransitionCellOffset);
+ StoreHandler::kTransitionOrHolderCellOffset);
DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kSmiHandlerIndex),
StoreHandler::kSmiHandlerOffset);
DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kValidityCellIndex),
@@ -786,44 +779,77 @@ void AccessorAssembler::HandleStoreICProtoHandler(
Node* smi_or_code = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
Node* maybe_transition_cell =
- LoadObjectField(handler, StoreHandler::kTransitionCellOffset);
+ LoadObjectField(handler, StoreHandler::kTransitionOrHolderCellOffset);
Label array_handler(this), tuple_handler(this);
Branch(TaggedIsSmi(maybe_transition_cell), &array_handler, &tuple_handler);
- VARIABLE(var_transition, MachineRepresentation::kTagged);
- Label if_transition(this), if_transition_to_constant(this),
- if_store_normal(this);
+ VARIABLE(var_transition_map_or_holder, MachineRepresentation::kTagged);
+ Label do_store(this), if_transition_map(this), if_holder_object(this);
BIND(&tuple_handler);
{
Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
- var_transition.Bind(transition);
- Goto(&if_transition);
+ var_transition_map_or_holder.Bind(transition);
+ Goto(&do_store);
}
BIND(&array_handler);
{
+ VARIABLE(var_start_index, MachineType::PointerRepresentation(),
+ IntPtrConstant(StoreHandler::kFirstPrototypeIndex));
+
+ Comment("array_handler");
+ Label can_access(this);
+ // Only Tuple3 handlers are allowed to have code handlers.
+ CSA_ASSERT(this, TaggedIsSmi(smi_or_code));
+ GotoIfNot(
+ IsSetSmi(smi_or_code, StoreHandler::DoAccessCheckOnReceiverBits::kMask),
+ &can_access);
+
+ {
+ // Skip this entry of a handler.
+ var_start_index.Bind(
+ IntPtrConstant(StoreHandler::kFirstPrototypeIndex + 1));
+
+ int offset =
+ FixedArray::OffsetOfElementAt(StoreHandler::kFirstPrototypeIndex);
+ Node* expected_native_context =
+ LoadWeakCellValue(LoadObjectField(handler, offset), miss);
+
+ EmitAccessCheck(expected_native_context, p->context, p->receiver,
+ &can_access, miss);
+ }
+ BIND(&can_access);
+
Node* length = SmiUntag(maybe_transition_cell);
- BuildFastLoop(IntPtrConstant(StoreHandler::kFirstPrototypeIndex), length,
- [this, p, handler, miss](Node* current) {
+ BuildFastLoop(var_start_index.value(), length,
+ [=](Node* current) {
Node* prototype_cell =
LoadFixedArrayElement(handler, current);
CheckPrototype(prototype_cell, p->name, miss);
},
1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
- Node* maybe_transition_cell =
- LoadFixedArrayElement(handler, StoreHandler::kTransitionCellIndex);
+ Node* maybe_transition_cell = LoadFixedArrayElement(
+ handler, StoreHandler::kTransitionMapOrHolderCellIndex);
Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
- var_transition.Bind(transition);
- Goto(&if_transition);
+ var_transition_map_or_holder.Bind(transition);
+ Goto(&do_store);
+ }
+
+ BIND(&do_store);
+ {
+ Node* transition = var_transition_map_or_holder.value();
+ Branch(IsMap(transition), &if_transition_map, &if_holder_object);
}
- BIND(&if_transition);
+ BIND(&if_transition_map);
{
+ Label if_transition_to_constant(this), if_store_normal(this);
+
Node* holder = p->receiver;
- Node* transition = var_transition.value();
+ Node* transition_map = var_transition_map_or_holder.value();
- GotoIf(IsDeprecatedMap(transition), miss);
+ GotoIf(IsDeprecatedMap(transition_map), miss);
if (support_elements == kSupportElements) {
Label if_smi_handler(this);
@@ -834,7 +860,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
StoreTransitionDescriptor descriptor(isolate());
TailCallStub(descriptor, code_handler, p->context, p->receiver, p->name,
- transition, p->value, p->slot, p->vector);
+ transition_map, p->value, p->slot, p->vector);
BIND(&if_smi_handler);
}
@@ -849,9 +875,12 @@ void AccessorAssembler::HandleStoreICProtoHandler(
GotoIf(WordEqual(handler_kind,
IntPtrConstant(StoreHandler::kTransitionToConstant)),
&if_transition_to_constant);
+ CSA_ASSERT(this,
+ WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kTransitionToField)));
// Handle transitioning field stores.
- HandleStoreICSmiHandlerCase(handler_word, holder, p->value, transition,
+ HandleStoreICSmiHandlerCase(handler_word, holder, p->value, transition_map,
miss);
BIND(&if_transition_to_constant);
@@ -864,16 +893,15 @@ void AccessorAssembler::HandleStoreICProtoHandler(
IntPtrAdd(scaled_descriptor,
IntPtrConstant(DescriptorArray::kFirstIndex +
DescriptorArray::kEntryValueIndex));
- Node* descriptors = LoadMapDescriptors(transition);
+ Node* descriptors = LoadMapDescriptors(transition_map);
CSA_ASSERT(
- this,
- UintPtrLessThan(descriptor,
- LoadAndUntagFixedArrayBaseLength(descriptors)));
+ this, UintPtrLessThan(descriptor,
+ LoadAndUntagFixedArrayBaseLength(descriptors)));
Node* constant = LoadFixedArrayElement(descriptors, value_index);
GotoIf(WordNotEqual(p->value, constant), miss);
- StoreMap(p->receiver, transition);
+ StoreMap(p->receiver, transition_map);
Return(p->value);
}
@@ -913,6 +941,70 @@ void AccessorAssembler::HandleStoreICProtoHandler(
}
}
}
+ BIND(&if_holder_object);
+ {
+ Label if_store_global_proxy(this);
+ Node* holder = var_transition_map_or_holder.value();
+
+ Node* smi_handler = smi_or_code;
+ CSA_ASSERT(this, TaggedIsSmi(smi_handler));
+ Node* handler_word = SmiUntag(smi_handler);
+
+ Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+ GotoIf(WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kStoreGlobalProxy)),
+ &if_store_global_proxy);
+ CSA_ASSERT(this,
+ WordEqual(handler_kind, IntPtrConstant(StoreHandler::kProxy)));
+ HandleStoreToProxy(p, holder, miss, support_elements);
+
+ BIND(&if_store_global_proxy);
+ {
+ ExitPoint direct_exit(this);
+ StoreGlobalIC_PropertyCellCase(holder, p->value, &direct_exit, miss);
+ }
+ }
+}
+
+void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p,
+ Node* proxy, Label* miss,
+ ElementSupport support_elements) {
+ VARIABLE(var_index, MachineType::PointerRepresentation());
+ VARIABLE(var_unique, MachineRepresentation::kTagged);
+ VARIABLE(var_language_mode, MachineRepresentation::kTaggedSigned,
+ SmiConstant(STRICT));
+
+ Label if_index(this), if_unique_name(this), language_mode_determined(this),
+ to_name_failed(this, Label::kDeferred);
+ BranchIfStrictMode(p->vector, p->slot, &language_mode_determined);
+ var_language_mode.Bind(SmiConstant(SLOPPY));
+ Goto(&language_mode_determined);
+ BIND(&language_mode_determined);
+
+ if (support_elements == kSupportElements) {
+ TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique,
+ &to_name_failed);
+
+ BIND(&if_unique_name);
+ CallBuiltin(Builtins::kProxySetProperty, p->context, proxy,
+ var_unique.value(), p->value, p->receiver,
+ var_language_mode.value());
+ Return(p->value);
+
+ // The index case is handled earlier by the runtime.
+ BIND(&if_index);
+ // TODO(mslekova): introduce TryToName that doesn't try to compute
+ // the intptr index value
+ Goto(&to_name_failed);
+
+ BIND(&to_name_failed);
+ TailCallRuntime(Runtime::kSetPropertyWithReceiver, p->context, proxy,
+ p->name, p->value, p->receiver, var_language_mode.value());
+ } else {
+ Node* name = ToName(p->context, p->name);
+ TailCallBuiltin(Builtins::kProxySetProperty, p->context, proxy, name,
+ p->value, p->receiver, var_language_mode.value());
+ }
}
void AccessorAssembler::HandleStoreICSmiHandlerCase(Node* handler_word,
@@ -1091,7 +1183,7 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
// TODO(gsathya): Clean up the type conversions by creating smarter
// helpers that do the correct op based on the mode.
VARIABLE(var_properties, MachineRepresentation::kTaggedPointer);
- VARIABLE(var_hash, MachineRepresentation::kWord32);
+ VARIABLE(var_encoded_hash, MachineRepresentation::kWord32);
VARIABLE(var_length, ParameterRepresentation(mode));
Node* properties = LoadObjectField(object, JSObject::kPropertiesOrHashOffset);
@@ -1102,7 +1194,10 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
BIND(&if_smi_hash);
{
- var_hash.Bind(SmiToWord32(properties));
+ Node* hash = SmiToWord32(properties);
+ Node* encoded_hash =
+ Word32Shl(hash, Int32Constant(PropertyArray::HashField::kShift));
+ var_encoded_hash.Bind(encoded_hash);
var_length.Bind(IntPtrOrSmiConstant(0, mode));
var_properties.Bind(EmptyFixedArrayConstant());
Goto(&extend_store);
@@ -1112,10 +1207,11 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
{
Node* length_and_hash_int32 = LoadAndUntagToWord32ObjectField(
var_properties.value(), PropertyArray::kLengthAndHashOffset);
- var_hash.Bind(Word32And(length_and_hash_int32,
- Int32Constant(PropertyArray::kHashMask)));
- Node* length_intptr = ChangeInt32ToIntPtr(Word32And(
- length_and_hash_int32, Int32Constant(PropertyArray::kLengthMask)));
+ var_encoded_hash.Bind(Word32And(
+ length_and_hash_int32, Int32Constant(PropertyArray::HashField::kMask)));
+ Node* length_intptr = ChangeInt32ToIntPtr(
+ Word32And(length_and_hash_int32,
+ Int32Constant(PropertyArray::LengthField::kMask)));
Node* length = WordToParameter(length_intptr, mode);
var_length.Bind(length);
Goto(&extend_store);
@@ -1161,7 +1257,7 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
Node* new_capacity_int32 =
TruncateWordToWord32(ParameterToWord(new_capacity, mode));
Node* new_length_and_hash_int32 =
- Word32Or(var_hash.value(), new_capacity_int32);
+ Word32Or(var_encoded_hash.value(), new_capacity_int32);
StoreObjectField(new_properties, PropertyArray::kLengthAndHashOffset,
SmiFromWord32(new_length_and_hash_int32));
StoreObjectField(object, JSObject::kPropertiesOrHashOffset, new_properties);
@@ -1489,6 +1585,38 @@ void AccessorAssembler::NameDictionaryNegativeLookup(Node* object, Node* name,
BIND(&done);
}
+void AccessorAssembler::BranchIfStrictMode(Node* vector, Node* slot,
+ Label* if_strict) {
+ Node* sfi =
+ LoadObjectField(vector, FeedbackVector::kSharedFunctionInfoOffset);
+ Node* metadata =
+ LoadObjectField(sfi, SharedFunctionInfo::kFeedbackMetadataOffset);
+ Node* slot_int = SmiToWord32(slot);
+
+ // See VectorICComputer::index().
+ const int kItemsPerWord = FeedbackMetadata::VectorICComputer::kItemsPerWord;
+ Node* word_index = Int32Div(slot_int, Int32Constant(kItemsPerWord));
+ Node* word_offset = Int32Mod(slot_int, Int32Constant(kItemsPerWord));
+ Node* data = SmiToWord32(LoadFixedArrayElement(
+ metadata, ChangeInt32ToIntPtr(word_index),
+ FeedbackMetadata::kReservedIndexCount * kPointerSize, INTPTR_PARAMETERS));
+ // See VectorICComputer::decode().
+ const int kBitsPerItem = FeedbackMetadata::kFeedbackSlotKindBits;
+ Node* shift = Int32Mul(word_offset, Int32Constant(kBitsPerItem));
+ const int kMask = FeedbackMetadata::VectorICComputer::kMask;
+ Node* kind = Word32And(Word32Shr(data, shift), Int32Constant(kMask));
+
+ STATIC_ASSERT(FeedbackSlotKind::kStoreGlobalSloppy <=
+ FeedbackSlotKind::kLastSloppyKind);
+ STATIC_ASSERT(FeedbackSlotKind::kStoreKeyedSloppy <=
+ FeedbackSlotKind::kLastSloppyKind);
+ STATIC_ASSERT(FeedbackSlotKind::kStoreNamedSloppy <=
+ FeedbackSlotKind::kLastSloppyKind);
+ GotoIfNot(Int32LessThanOrEqual(kind, Int32Constant(static_cast<int>(
+ FeedbackSlotKind::kLastSloppyKind))),
+ if_strict);
+}
+
void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
Node* instance_type, Node* index,
Label* slow) {
@@ -2220,8 +2348,7 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
}
}
-void AccessorAssembler::StoreIC(const StoreICParameters* p,
- LanguageMode language_mode) {
+void AccessorAssembler::StoreIC(const StoreICParameters* p) {
VARIABLE(var_handler, MachineRepresentation::kTagged);
Label if_handler(this, &var_handler), try_polymorphic(this, Label::kDeferred),
try_megamorphic(this, Label::kDeferred),
@@ -2266,8 +2393,9 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p,
GotoIfNot(
WordEqual(feedback, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
&miss);
- TailCallStub(CodeFactory::StoreIC_Uninitialized(isolate(), language_mode),
- p->context, p->receiver, p->name, p->value, p->slot,
+ Callable stub =
+ Builtins::CallableFor(isolate(), Builtins::kStoreIC_Uninitialized);
+ TailCallStub(stub, p->context, p->receiver, p->name, p->value, p->slot,
p->vector);
}
BIND(&miss);
@@ -2277,8 +2405,66 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p,
}
}
-void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p,
- LanguageMode language_mode) {
+void AccessorAssembler::StoreGlobalIC_PropertyCellCase(Node* property_cell,
+ Node* value,
+ ExitPoint* exit_point,
+ Label* miss) {
+ Comment("StoreGlobalIC_TryPropertyCellCase");
+ CSA_ASSERT(this, IsPropertyCell(property_cell));
+
+ // Load the payload of the global parameter cell. A hole indicates that
+ // the cell has been invalidated and that the store must be handled by the
+ // runtime.
+ Node* cell_contents =
+ LoadObjectField(property_cell, PropertyCell::kValueOffset);
+ Node* details = LoadAndUntagToWord32ObjectField(property_cell,
+ PropertyCell::kDetailsOffset);
+ Node* type = DecodeWord32<PropertyDetails::PropertyCellTypeField>(details);
+
+ Label constant(this), store(this), not_smi(this);
+
+ GotoIf(Word32Equal(type, Int32Constant(
+ static_cast<int>(PropertyCellType::kConstant))),
+ &constant);
+
+ GotoIf(IsTheHole(cell_contents), miss);
+
+ GotoIf(Word32Equal(
+ type, Int32Constant(static_cast<int>(PropertyCellType::kMutable))),
+ &store);
+ CSA_ASSERT(this,
+ Word32Or(Word32Equal(type, Int32Constant(static_cast<int>(
+ PropertyCellType::kConstantType))),
+ Word32Equal(type, Int32Constant(static_cast<int>(
+ PropertyCellType::kUndefined)))));
+
+ GotoIfNot(TaggedIsSmi(cell_contents), &not_smi);
+ GotoIfNot(TaggedIsSmi(value), miss);
+ Goto(&store);
+
+ BIND(&not_smi);
+ {
+ GotoIf(TaggedIsSmi(value), miss);
+ Node* expected_map = LoadMap(cell_contents);
+ Node* map = LoadMap(value);
+ GotoIfNot(WordEqual(expected_map, map), miss);
+ Goto(&store);
+ }
+
+ BIND(&store);
+ {
+ StoreObjectField(property_cell, PropertyCell::kValueOffset, value);
+ exit_point->Return(value);
+ }
+
+ BIND(&constant);
+ {
+ GotoIfNot(WordEqual(cell_contents, value), miss);
+ exit_point->Return(value);
+ }
+}
+
+void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
Label miss(this, Label::kDeferred);
{
VARIABLE(var_handler, MachineRepresentation::kTagged);
@@ -2320,7 +2506,7 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p,
WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
&try_polymorphic_name);
TailCallStub(
- CodeFactory::KeyedStoreIC_Megamorphic(isolate(), language_mode),
+ Builtins::CallableFor(isolate(), Builtins::kKeyedStoreIC_Megamorphic),
p->context, p->receiver, p->name, p->value, p->slot, p->vector);
}
@@ -2516,7 +2702,7 @@ void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
KeyedLoadICGeneric(&p);
}
-void AccessorAssembler::GenerateStoreIC(LanguageMode language_mode) {
+void AccessorAssembler::GenerateStoreIC() {
typedef StoreWithVectorDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -2527,10 +2713,10 @@ void AccessorAssembler::GenerateStoreIC(LanguageMode language_mode) {
Node* context = Parameter(Descriptor::kContext);
StoreICParameters p(context, receiver, name, value, slot, vector);
- StoreIC(&p, language_mode);
+ StoreIC(&p);
}
-void AccessorAssembler::GenerateStoreICTrampoline(LanguageMode language_mode) {
+void AccessorAssembler::GenerateStoreICTrampoline() {
typedef StoreDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -2540,12 +2726,11 @@ void AccessorAssembler::GenerateStoreICTrampoline(LanguageMode language_mode) {
Node* context = Parameter(Descriptor::kContext);
Node* vector = LoadFeedbackVectorForStub();
- Callable callable =
- CodeFactory::StoreICInOptimizedCode(isolate(), language_mode);
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kStoreIC);
TailCallStub(callable, context, receiver, name, value, slot, vector);
}
-void AccessorAssembler::GenerateKeyedStoreIC(LanguageMode language_mode) {
+void AccessorAssembler::GenerateKeyedStoreIC() {
typedef StoreWithVectorDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -2556,11 +2741,10 @@ void AccessorAssembler::GenerateKeyedStoreIC(LanguageMode language_mode) {
Node* context = Parameter(Descriptor::kContext);
StoreICParameters p(context, receiver, name, value, slot, vector);
- KeyedStoreIC(&p, language_mode);
+ KeyedStoreIC(&p);
}
-void AccessorAssembler::GenerateKeyedStoreICTrampoline(
- LanguageMode language_mode) {
+void AccessorAssembler::GenerateKeyedStoreICTrampoline() {
typedef StoreDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -2570,8 +2754,7 @@ void AccessorAssembler::GenerateKeyedStoreICTrampoline(
Node* context = Parameter(Descriptor::kContext);
Node* vector = LoadFeedbackVectorForStub();
- Callable callable =
- CodeFactory::KeyedStoreICInOptimizedCode(isolate(), language_mode);
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kKeyedStoreIC);
TailCallStub(callable, context, receiver, name, value, slot, vector);
}
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index c771b2ff5a..4fe1c0bbf9 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -31,16 +31,16 @@ class AccessorAssembler : public CodeStubAssembler {
void GenerateKeyedLoadIC();
void GenerateKeyedLoadICTrampoline();
void GenerateKeyedLoadIC_Megamorphic();
- void GenerateStoreIC(LanguageMode language_mode);
- void GenerateStoreICTrampoline(LanguageMode language_mode);
+ void GenerateStoreIC();
+ void GenerateStoreICTrampoline();
void GenerateLoadICProtoArray(bool throw_reference_error_if_nonexistent);
void GenerateLoadGlobalIC(TypeofMode typeof_mode);
void GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode);
- void GenerateKeyedStoreIC(LanguageMode language_mode);
- void GenerateKeyedStoreICTrampoline(LanguageMode language_mode);
+ void GenerateKeyedStoreIC();
+ void GenerateKeyedStoreICTrampoline();
void TryProbeStubCache(StubCache* stub_cache, Node* receiver, Node* name,
Label* if_handler, Variable* var_handler,
@@ -96,6 +96,8 @@ class AccessorAssembler : public CodeStubAssembler {
ElementSupport support_elements = kOnlyProperties);
void JumpIfDataProperty(Node* details, Label* writable, Label* readonly);
+ void BranchIfStrictMode(Node* vector, Node* slot, Label* if_strict);
+
private:
// Stub generation entry points.
@@ -112,8 +114,10 @@ class AccessorAssembler : public CodeStubAssembler {
void LoadGlobalIC(const LoadICParameters* p, TypeofMode typeof_mode);
void KeyedLoadIC(const LoadICParameters* p);
void KeyedLoadICGeneric(const LoadICParameters* p);
- void StoreIC(const StoreICParameters* p, LanguageMode language_mode);
- void KeyedStoreIC(const StoreICParameters* p, LanguageMode language_mode);
+ void StoreIC(const StoreICParameters* p);
+ void StoreGlobalIC_PropertyCellCase(Node* property_cell, Node* value,
+ ExitPoint* exit_point, Label* miss);
+ void KeyedStoreIC(const StoreICParameters* p);
// IC dispatcher behavior.
@@ -148,6 +152,9 @@ class AccessorAssembler : public CodeStubAssembler {
Variable* var_double_value, Label* rebox_double,
ExitPoint* exit_point);
+ void EmitAccessCheck(Node* expected_native_context, Node* context,
+ Node* receiver, Label* can_access, Label* miss);
+
Node* EmitLoadICProtoArrayCheck(const LoadICParameters* p, Node* handler,
Node* handler_length, Node* handler_flags,
Label* miss);
@@ -175,6 +182,9 @@ class AccessorAssembler : public CodeStubAssembler {
Representation representation, Node* value,
Node* transition, Label* miss);
+ void HandleStoreToProxy(const StoreICParameters* p, Node* proxy, Label* miss,
+ ElementSupport support_elements);
+
// KeyedLoadIC_Generic implementation.
void GenericElementLoad(Node* receiver, Node* receiver_map,
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index c34d92472f..ac5d3ecc22 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -47,9 +47,9 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
__ push(value());
if (accessor_index >= 0) {
- DCHECK(!holder.is(scratch));
- DCHECK(!receiver.is(scratch));
- DCHECK(!value().is(scratch));
+ DCHECK(holder != scratch);
+ DCHECK(receiver != scratch);
+ DCHECK(value() != scratch);
// Call the JavaScript setter with receiver and value on the stack.
if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
@@ -110,7 +110,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
DCHECK(name->IsUniqueName());
- DCHECK(!receiver.is(scratch0));
+ DCHECK(receiver != scratch0);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
__ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
@@ -176,14 +176,14 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Handle<Map> receiver_map, Register receiver, Register scratch_in,
bool is_store, Register store_parameter, Register accessor_holder,
int accessor_index) {
- DCHECK(!accessor_holder.is(scratch_in));
- DCHECK(!receiver.is(scratch_in));
+ DCHECK(accessor_holder != scratch_in);
+ DCHECK(receiver != scratch_in);
__ push(accessor_holder);
__ push(receiver);
// Write the arguments to stack frame.
if (is_store) {
- DCHECK(!receiver.is(store_parameter));
- DCHECK(!scratch_in.is(store_parameter));
+ DCHECK(receiver != store_parameter);
+ DCHECK(scratch_in != store_parameter);
__ push(store_parameter);
}
DCHECK(optimization.is_simple_api_call());
@@ -289,9 +289,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
- DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
- !scratch2.is(scratch1));
+ DCHECK(scratch1 != object_reg && scratch1 != holder_reg);
+ DCHECK(scratch2 != object_reg && scratch2 != holder_reg &&
+ scratch2 != scratch1);
Handle<Cell> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
@@ -366,9 +366,8 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ b(&success);
__ bind(miss);
- DCHECK(kind() == Code::LOAD_IC);
PopVectorAndSlot();
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ TailCallBuiltin(masm(), Builtins::kLoadIC_Miss);
__ bind(&success);
}
}
@@ -380,7 +379,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
__ b(&success);
GenerateRestoreName(miss, name);
PopVectorAndSlot();
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ TailCallBuiltin(masm(), Builtins::kStoreIC_Miss);
__ bind(&success);
}
}
@@ -419,7 +418,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
- return GetCode(kind(), name);
+ return GetCode(name);
}
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index e74b0ea455..ee3a5b9245 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -28,15 +28,13 @@ void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
StoreWithVectorDescriptor::kVector);
STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
StoreTransitionDescriptor::kVector);
- __ Push(slot);
- __ Push(vector);
+ __ Push(slot, vector);
}
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
MacroAssembler* masm = this->masm();
- __ Pop(vector);
- __ Pop(slot);
+ __ Pop(vector, slot);
}
@@ -64,8 +62,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
Register map = scratch1;
__ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ Tst(scratch0, kInterceptorOrAccessCheckNeededMask);
- __ B(ne, miss_label);
+ __ TestAndBranchIfAnySet(scratch0, kInterceptorOrAccessCheckNeededMask,
+ miss_label);
// Check that receiver is a JSObject.
__ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -193,10 +191,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Save context register
- __ Push(cp);
- // Save value register, so we can restore it later.
- __ Push(value());
+ // Save context and value registers, so we can restore them later.
+ __ Push(cp, value());
if (accessor_index >= 0) {
DCHECK(!AreAliased(holder, scratch));
@@ -222,10 +218,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
}
// We have to return the passed value, not the return value of the setter.
- __ Pop(x0);
-
- // Restore context register.
- __ Pop(cp);
+ // Also, restore the context register.
+ __ Pop(x0, cp);
}
__ Ret();
}
@@ -282,7 +276,7 @@ void PropertyHandlerCompiler::GenerateAccessCheck(
}
__ B(ne, miss);
- __ bind(&done);
+ __ Bind(&done);
}
Register PropertyHandlerCompiler::CheckPrototypes(
@@ -369,9 +363,8 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
__ B(&success);
__ Bind(miss);
- DCHECK(kind() == Code::LOAD_IC);
PopVectorAndSlot();
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ TailCallBuiltin(masm(), Builtins::kLoadIC_Miss);
__ Bind(&success);
}
@@ -385,7 +378,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
GenerateRestoreName(miss, name);
PopVectorAndSlot();
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ TailCallBuiltin(masm(), Builtins::kStoreIC_Miss);
__ Bind(&success);
}
@@ -416,14 +409,18 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Mov(scratch1(), Operand(cell));
}
__ Mov(scratch2(), Operand(name));
- __ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
- __ Push(Smi::FromInt(language_mode));
+ {
+ UseScratchRegisterScope temps(this->masm());
+ Register temp = temps.AcquireX();
+ __ Mov(temp, Smi::FromInt(language_mode));
+ __ Push(receiver(), holder_reg, scratch1(), scratch2(), value(), temp);
+ }
// Do tail-call to the runtime system.
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
- return GetCode(kind(), name);
+ return GetCode(name);
}
@@ -431,4 +428,4 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
} // namespace internal
} // namespace v8
-#endif // V8_TARGET_ARCH_IA32
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/ic/binary-op-assembler.cc b/deps/v8/src/ic/binary-op-assembler.cc
index 20bbc94fa7..cfe7317884 100644
--- a/deps/v8/src/ic/binary-op-assembler.cc
+++ b/deps/v8/src/ic/binary-op-assembler.cc
@@ -14,13 +14,12 @@ using compiler::Node;
Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
Node* rhs, Node* slot_id,
Node* feedback_vector,
- Node* function,
bool rhs_is_smi) {
// Shared entry for floating point addition.
Label do_fadd(this), if_lhsisnotnumber(this, Label::kDeferred),
check_rhsisoddball(this, Label::kDeferred),
call_with_oddball_feedback(this), call_with_any_feedback(this),
- call_add_stub(this), end(this);
+ call_add_stub(this), end(this), bigint(this, Label::kDeferred);
VARIABLE(var_fadd_lhs, MachineRepresentation::kFloat64);
VARIABLE(var_fadd_rhs, MachineRepresentation::kFloat64);
VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned);
@@ -149,10 +148,20 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
BIND(&if_lhsisnotoddball);
{
- // Exit unless {lhs} is a string
- GotoIfNot(IsStringInstanceType(lhs_instance_type),
- &call_with_any_feedback);
+ Label lhs_is_string(this), lhs_is_bigint(this);
+ GotoIf(IsStringInstanceType(lhs_instance_type), &lhs_is_string);
+ GotoIf(IsBigIntInstanceType(lhs_instance_type), &lhs_is_bigint);
+ Goto(&call_with_any_feedback);
+
+ BIND(&lhs_is_bigint);
+ {
+ // Label "bigint" handles BigInt + {anything except string}.
+ GotoIf(TaggedIsSmi(rhs), &bigint);
+ Branch(IsStringInstanceType(LoadInstanceType(rhs)),
+ &call_with_any_feedback, &bigint);
+ }
+ BIND(&lhs_is_string);
// Check if the {rhs} is a smi, and exit the string check early if it is.
GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback);
@@ -179,10 +188,19 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
Node* rhs_instance_type = LoadInstanceType(rhs);
Node* rhs_is_oddball =
Word32Equal(rhs_instance_type, Int32Constant(ODDBALL_TYPE));
- Branch(rhs_is_oddball, &call_with_oddball_feedback,
+ GotoIf(rhs_is_oddball, &call_with_oddball_feedback);
+ Branch(IsBigIntInstanceType(rhs_instance_type), &bigint,
&call_with_any_feedback);
}
+ BIND(&bigint);
+ {
+ var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
+ var_result.Bind(CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs,
+ SmiConstant(Token::ADD)));
+ Goto(&end);
+ }
+
BIND(&call_with_oddball_feedback);
{
var_type_feedback.Bind(
@@ -203,18 +221,18 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
}
BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id, function);
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id);
return var_result.value();
}
Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
Node* context, Node* lhs, Node* rhs, Node* slot_id, Node* feedback_vector,
- Node* function, const SmiOperation& smiOperation,
- const FloatOperation& floatOperation, Token::Value opcode,
- bool rhs_is_smi) {
+ const SmiOperation& smiOperation, const FloatOperation& floatOperation,
+ Token::Value opcode, bool rhs_is_smi) {
Label do_float_operation(this), end(this), call_stub(this),
check_rhsisoddball(this, Label::kDeferred), call_with_any_feedback(this),
- if_lhsisnotnumber(this, Label::kDeferred);
+ if_lhsisnotnumber(this, Label::kDeferred),
+ if_bigint(this, Label::kDeferred);
VARIABLE(var_float_lhs, MachineRepresentation::kFloat64);
VARIABLE(var_float_rhs, MachineRepresentation::kFloat64);
VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned);
@@ -284,7 +302,7 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
}
{
- // Perform a floating point subtraction.
+ // Perform floating point operation.
var_float_lhs.Bind(LoadHeapNumberValue(lhs));
var_float_rhs.Bind(SmiToFloat64(rhs));
Goto(&do_float_operation);
@@ -306,6 +324,7 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
// No checks on rhs are done yet. We just know lhs is not a number or Smi.
// Check if lhs is an oddball.
Node* lhs_instance_type = LoadInstanceType(lhs);
+ GotoIf(IsBigIntInstanceType(lhs_instance_type), &if_bigint);
Node* lhs_is_oddball =
Word32Equal(lhs_instance_type, Int32Constant(ODDBALL_TYPE));
GotoIfNot(lhs_is_oddball, &call_with_any_feedback);
@@ -336,6 +355,7 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
// Check if rhs is an oddball. At this point we know lhs is either a
// Smi or number or oddball and rhs is not a number or Smi.
Node* rhs_instance_type = LoadInstanceType(rhs);
+ GotoIf(IsBigIntInstanceType(rhs_instance_type), &if_bigint);
Node* rhs_is_oddball =
Word32Equal(rhs_instance_type, Int32Constant(ODDBALL_TYPE));
GotoIfNot(rhs_is_oddball, &call_with_any_feedback);
@@ -345,6 +365,15 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
Goto(&call_stub);
}
+ // This handles the case where at least one input is a BigInt.
+ BIND(&if_bigint);
+ {
+ var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
+ var_result.Bind(CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs,
+ SmiConstant(opcode)));
+ Goto(&end);
+ }
+
BIND(&call_with_any_feedback);
{
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
@@ -375,14 +404,13 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
}
BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id, function);
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id);
return var_result.value();
}
Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs,
Node* rhs, Node* slot_id,
Node* feedback_vector,
- Node* function,
bool rhs_is_smi) {
auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) {
VARIABLE(var_result, MachineRepresentation::kTagged);
@@ -423,14 +451,13 @@ Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs,
return Float64Sub(lhs, rhs);
};
return Generate_BinaryOperationWithFeedback(
- context, lhs, rhs, slot_id, feedback_vector, function, smiFunction,
- floatFunction, Token::SUB, rhs_is_smi);
+ context, lhs, rhs, slot_id, feedback_vector, smiFunction, floatFunction,
+ Token::SUB, rhs_is_smi);
}
Node* BinaryOpAssembler::Generate_MultiplyWithFeedback(Node* context, Node* lhs,
Node* rhs, Node* slot_id,
Node* feedback_vector,
- Node* function,
bool rhs_is_smi) {
auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) {
Node* result = SmiMul(lhs, rhs);
@@ -443,13 +470,13 @@ Node* BinaryOpAssembler::Generate_MultiplyWithFeedback(Node* context, Node* lhs,
return Float64Mul(lhs, rhs);
};
return Generate_BinaryOperationWithFeedback(
- context, lhs, rhs, slot_id, feedback_vector, function, smiFunction,
- floatFunction, Token::MUL, rhs_is_smi);
+ context, lhs, rhs, slot_id, feedback_vector, smiFunction, floatFunction,
+ Token::MUL, rhs_is_smi);
}
Node* BinaryOpAssembler::Generate_DivideWithFeedback(
Node* context, Node* dividend, Node* divisor, Node* slot_id,
- Node* feedback_vector, Node* function, bool rhs_is_smi) {
+ Node* feedback_vector, bool rhs_is_smi) {
auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) {
VARIABLE(var_result, MachineRepresentation::kTagged);
// If rhs is known to be an Smi (for DivSmi) we want to fast path Smi
@@ -477,13 +504,13 @@ Node* BinaryOpAssembler::Generate_DivideWithFeedback(
return Float64Div(lhs, rhs);
};
return Generate_BinaryOperationWithFeedback(
- context, dividend, divisor, slot_id, feedback_vector, function,
- smiFunction, floatFunction, Token::DIV, rhs_is_smi);
+ context, dividend, divisor, slot_id, feedback_vector, smiFunction,
+ floatFunction, Token::DIV, rhs_is_smi);
}
Node* BinaryOpAssembler::Generate_ModulusWithFeedback(
Node* context, Node* dividend, Node* divisor, Node* slot_id,
- Node* feedback_vector, Node* function, bool rhs_is_smi) {
+ Node* feedback_vector, bool rhs_is_smi) {
auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) {
Node* result = SmiMod(lhs, rhs);
var_type_feedback->Bind(SelectSmiConstant(
@@ -495,8 +522,8 @@ Node* BinaryOpAssembler::Generate_ModulusWithFeedback(
return Float64Mod(lhs, rhs);
};
return Generate_BinaryOperationWithFeedback(
- context, dividend, divisor, slot_id, feedback_vector, function,
- smiFunction, floatFunction, Token::MOD, rhs_is_smi);
+ context, dividend, divisor, slot_id, feedback_vector, smiFunction,
+ floatFunction, Token::MOD, rhs_is_smi);
}
} // namespace internal
diff --git a/deps/v8/src/ic/binary-op-assembler.h b/deps/v8/src/ic/binary-op-assembler.h
index bb37298447..625dfce79a 100644
--- a/deps/v8/src/ic/binary-op-assembler.h
+++ b/deps/v8/src/ic/binary-op-assembler.h
@@ -24,25 +24,23 @@ class BinaryOpAssembler : public CodeStubAssembler {
Node* Generate_AddWithFeedback(Node* context, Node* lhs, Node* rhs,
Node* slot_id, Node* feedback_vector,
- Node* function, bool rhs_is_smi);
+ bool rhs_is_smi);
Node* Generate_SubtractWithFeedback(Node* context, Node* lhs, Node* rhs,
Node* slot_id, Node* feedback_vector,
- Node* function, bool rhs_is_smi);
+ bool rhs_is_smi);
Node* Generate_MultiplyWithFeedback(Node* context, Node* lhs, Node* rhs,
Node* slot_id, Node* feedback_vector,
- Node* function, bool rhs_is_smi);
+ bool rhs_is_smi);
Node* Generate_DivideWithFeedback(Node* context, Node* dividend,
Node* divisor, Node* slot_id,
- Node* feedback_vector, Node* function,
- bool rhs_is_smi);
+ Node* feedback_vector, bool rhs_is_smi);
Node* Generate_ModulusWithFeedback(Node* context, Node* dividend,
Node* divisor, Node* slot_id,
- Node* feedback_vector, Node* function,
- bool rhs_is_smi);
+ Node* feedback_vector, bool rhs_is_smi);
private:
typedef std::function<Node*(Node*, Node*, Variable*)> SmiOperation;
@@ -50,9 +48,8 @@ class BinaryOpAssembler : public CodeStubAssembler {
Node* Generate_BinaryOperationWithFeedback(
Node* context, Node* lhs, Node* rhs, Node* slot_id, Node* feedback_vector,
- Node* function, const SmiOperation& smiOperation,
- const FloatOperation& floatOperation, Token::Value opcode,
- bool rhs_is_smi);
+ const SmiOperation& smiOperation, const FloatOperation& floatOperation,
+ Token::Value opcode, bool rhs_is_smi);
};
} // namespace internal
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index b4aff8ec55..749f2fa963 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -15,24 +15,14 @@
namespace v8 {
namespace internal {
-Handle<Code> PropertyHandlerCompiler::Find(Handle<Name> name,
- Handle<Map> stub_holder,
- Code::Kind kind) {
- Code::Flags flags = Code::ComputeHandlerFlags(kind);
- Code* code = stub_holder->LookupInCodeCache(*name, flags);
- if (code == nullptr) return Handle<Code>();
- return handle(code);
-}
-
-Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind,
- Handle<Name> name) {
- Code::Flags flags = Code::ComputeHandlerFlags(kind);
-
+Handle<Code> PropertyHandlerCompiler::GetCode(Handle<Name> name) {
// Create code object in the heap.
CodeDesc desc;
masm()->GetCode(isolate(), &desc);
- Handle<Code> code = factory()->NewCode(desc, flags, masm()->CodeObject());
- if (code->IsCodeStubOrIC()) code->set_stub_key(CodeStub::NoCacheKey());
+ Handle<Code> code =
+ factory()->NewCode(desc, Code::STUB, masm()->CodeObject());
+ DCHECK(code->is_stub());
+ code->set_stub_key(CodeStub::NoCacheKey());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
char* raw_name = !name.is_null() && name->IsString()
@@ -94,18 +84,26 @@ Register NamedStoreHandlerCompiler::FrontendHeader(Register object_reg,
miss);
}
+// The ICs that don't pass slot and vector through the stack have to
+// save/restore them in the dispatcher.
+bool PropertyHandlerCompiler::ShouldPushPopSlotAndVector() {
+ switch (type()) {
+ case LOAD:
+ return true;
+ case STORE:
+ return !StoreWithVectorDescriptor::kPassLastArgsOnStack;
+ }
+ UNREACHABLE();
+ return false;
+}
Register PropertyHandlerCompiler::Frontend(Handle<Name> name) {
Label miss;
- if (IC::ShouldPushPopSlotAndVector(kind())) {
- PushVectorAndSlot();
- }
+ if (ShouldPushPopSlotAndVector()) PushVectorAndSlot();
Register reg = FrontendHeader(receiver(), name, &miss);
FrontendFooter(name, &miss);
// The footer consumes the vector and slot from the stack if miss occurs.
- if (IC::ShouldPushPopSlotAndVector(kind())) {
- DiscardVectorAndSlot();
- }
+ if (ShouldPushPopSlotAndVector()) DiscardVectorAndSlot();
return reg;
}
@@ -119,7 +117,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
Register holder = Frontend(name);
GenerateApiAccessorCall(masm(), call_optimization, map(), receiver(),
scratch2(), false, no_reg, holder, accessor_index);
- return GetCode(kind(), name);
+ return GetCode(name);
}
Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
@@ -129,7 +127,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
GenerateStoreViaSetter(masm(), map(), receiver(), holder, accessor_index,
expected_arguments, scratch2());
- return GetCode(kind(), name);
+ return GetCode(name);
}
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
@@ -146,7 +144,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
GenerateApiAccessorCall(masm(), call_optimization, handle(object->map()),
receiver(), scratch2(), true, value(), holder,
accessor_index);
- return GetCode(kind(), name);
+ return GetCode(name);
}
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index 98c12ed7c0..2fe5870ef1 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -13,16 +13,17 @@ namespace internal {
class CallOptimization;
class PropertyHandlerCompiler : public PropertyAccessCompiler {
- public:
- static Handle<Code> Find(Handle<Name> name, Handle<Map> map, Code::Kind kind);
-
protected:
- PropertyHandlerCompiler(Isolate* isolate, Code::Kind kind, Handle<Map> map,
+ PropertyHandlerCompiler(Isolate* isolate, Type type, Handle<Map> map,
Handle<JSObject> holder)
- : PropertyAccessCompiler(isolate, kind), map_(map), holder_(holder) {}
+ : PropertyAccessCompiler(isolate, type), map_(map), holder_(holder) {}
virtual ~PropertyHandlerCompiler() {}
+ // The ICs that don't pass slot and vector through the stack have to
+ // save/restore them in the dispatcher.
+ bool ShouldPushPopSlotAndVector();
+
virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
Label* miss) {
UNREACHABLE();
@@ -99,7 +100,7 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler {
Register scratch1, Register scratch2,
Handle<Name> name, Label* miss);
- Handle<Code> GetCode(Code::Kind kind, Handle<Name> name);
+ Handle<Code> GetCode(Handle<Name> name);
Handle<Map> map() const { return map_; }
Handle<JSObject> holder() const { return holder_; }
@@ -113,7 +114,7 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
public:
NamedLoadHandlerCompiler(Isolate* isolate, Handle<Map> map,
Handle<JSObject> holder)
- : PropertyHandlerCompiler(isolate, Code::LOAD_IC, map, holder) {}
+ : PropertyHandlerCompiler(isolate, LOAD, map, holder) {}
virtual ~NamedLoadHandlerCompiler() {}
@@ -141,7 +142,7 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
explicit NamedStoreHandlerCompiler(Isolate* isolate, Handle<Map> map,
Handle<JSObject> holder)
- : PropertyHandlerCompiler(isolate, Code::STORE_IC, map, holder) {
+ : PropertyHandlerCompiler(isolate, STORE, map, holder) {
#ifdef DEBUG
if (Descriptor::kPassLastArgsOnStack) {
ZapStackArgumentsRegisterAliases();
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index e008de39a4..dc1d595723 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -108,11 +108,28 @@ Handle<Smi> LoadHandler::LoadElement(Isolate* isolate,
return handle(Smi::FromInt(config), isolate);
}
+Handle<Smi> StoreHandler::StoreGlobalProxy(Isolate* isolate) {
+ int config = KindBits::encode(kStoreGlobalProxy);
+ return handle(Smi::FromInt(config), isolate);
+}
+
Handle<Smi> StoreHandler::StoreNormal(Isolate* isolate) {
int config = KindBits::encode(kStoreNormal);
return handle(Smi::FromInt(config), isolate);
}
+Handle<Smi> StoreHandler::StoreProxy(Isolate* isolate) {
+ int config = KindBits::encode(kProxy);
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Smi> StoreHandler::EnableAccessCheckOnReceiver(Isolate* isolate,
+ Handle<Smi> smi_handler) {
+ int config = smi_handler->value();
+ config = DoAccessCheckOnReceiverBits::update(config, true);
+ return handle(Smi::FromInt(config), isolate);
+}
+
Handle<Smi> StoreHandler::StoreField(Isolate* isolate, Kind kind,
int descriptor, FieldIndex field_index,
Representation representation,
@@ -177,19 +194,24 @@ Handle<Smi> StoreHandler::TransitionToConstant(Isolate* isolate,
}
// static
-WeakCell* StoreHandler::GetTuple3TransitionCell(Object* tuple3_handler) {
- STATIC_ASSERT(kTransitionCellOffset == Tuple3::kValue1Offset);
- WeakCell* cell = WeakCell::cast(Tuple3::cast(tuple3_handler)->value1());
+WeakCell* StoreHandler::GetTransitionCell(Object* handler) {
+ if (handler->IsTuple3()) {
+ STATIC_ASSERT(kTransitionOrHolderCellOffset == Tuple3::kValue1Offset);
+ WeakCell* cell = WeakCell::cast(Tuple3::cast(handler)->value1());
+ DCHECK(!cell->cleared());
+ return cell;
+ }
+
+ DCHECK(handler->IsFixedArray());
+ WeakCell* cell = WeakCell::cast(
+ FixedArray::cast(handler)->get(kTransitionMapOrHolderCellIndex));
DCHECK(!cell->cleared());
return cell;
}
// static
-WeakCell* StoreHandler::GetArrayTransitionCell(Object* array_handler) {
- WeakCell* cell = WeakCell::cast(
- FixedArray::cast(array_handler)->get(kTransitionCellIndex));
- DCHECK(!cell->cleared());
- return cell;
+bool StoreHandler::IsHandler(Object* maybe_handler) {
+ return maybe_handler->IsFixedArray() || maybe_handler->IsTuple3();
}
} // namespace internal
diff --git a/deps/v8/src/ic/handler-configuration.cc b/deps/v8/src/ic/handler-configuration.cc
index 7071941bd7..b294c864a9 100644
--- a/deps/v8/src/ic/handler-configuration.cc
+++ b/deps/v8/src/ic/handler-configuration.cc
@@ -4,78 +4,432 @@
#include "src/ic/handler-configuration.h"
+#include "src/code-stubs.h"
#include "src/ic/handler-configuration-inl.h"
#include "src/transitions.h"
namespace v8 {
namespace internal {
-// |name| can be nullptr if no name/details check needs to be performed.
-Object* StoreHandler::ValidTuple3HandlerOrNull(Object* handler, Name* name,
- Handle<Map>* out_transition) {
- DCHECK(handler->IsTuple3());
- // Step 1: Check validity cell.
- STATIC_ASSERT(kValidityCellOffset == Tuple3::kValue3Offset);
- Object* raw_validity_cell = Tuple3::cast(handler)->value3();
- Smi* valid = Smi::FromInt(Map::kPrototypeChainValid);
- // |raw_valitity_cell| can be Smi::kZero if no validity cell is required
- // (which counts as valid).
- if (raw_validity_cell->IsCell() &&
- Cell::cast(raw_validity_cell)->value() != valid) {
- return nullptr;
- }
- // Step 2 (optional): Check transition key.
- WeakCell* target_cell = StoreHandler::GetTuple3TransitionCell(handler);
- if (name != nullptr) {
- if (!TransitionsAccessor::IsMatchingMap(target_cell, name, kData, NONE)) {
- return nullptr;
+namespace {
+
+template <bool fill_array = true>
+int InitPrototypeChecks(Isolate* isolate, Handle<Map> receiver_map,
+ Handle<JSReceiver> holder, Handle<Name> name,
+ Handle<FixedArray> array, int first_index) {
+ if (!holder.is_null() && holder->map() == *receiver_map) return 0;
+
+ HandleScope scope(isolate);
+ int checks_count = 0;
+
+ if (receiver_map->IsPrimitiveMap() || receiver_map->IsJSGlobalProxyMap()) {
+ // The validity cell check for primitive and global proxy receivers does
+ // not guarantee that certain native context ever had access to other
+ // native context. However, a handler created for one native context could
+ // be used in other native context through the megamorphic stub cache.
+ // So we record the original native context to which this handler
+ // corresponds.
+ if (fill_array) {
+ Handle<Context> native_context = isolate->native_context();
+ array->set(first_index + checks_count, native_context->self_weak_cell());
}
+ checks_count++;
+
+ } else if (receiver_map->IsJSGlobalObjectMap()) {
+ // If we are creating a handler for [Load/Store]GlobalIC then we need to
+ // check that the property did not appear in the global object.
+ if (fill_array) {
+ Handle<JSGlobalObject> global = isolate->global_object();
+ Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+ global, name, PropertyCellType::kInvalidated);
+ DCHECK(cell->value()->IsTheHole(isolate));
+ Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
+ array->set(first_index + checks_count, *weak_cell);
+ }
+ checks_count++;
}
- // Step 3: Check if the transition target is deprecated.
- Map* transition = Map::cast(target_cell->value());
- if (transition->is_deprecated()) return nullptr;
- *out_transition = handle(transition);
- return handler;
+
+ // Create/count entries for each global or dictionary prototype appeared in
+ // the prototype chain contains from receiver till holder.
+ PrototypeIterator::WhereToEnd end = name->IsPrivate()
+ ? PrototypeIterator::END_AT_NON_HIDDEN
+ : PrototypeIterator::END_AT_NULL;
+ for (PrototypeIterator iter(receiver_map, end); !iter.IsAtEnd();
+ iter.Advance()) {
+ Handle<JSReceiver> current =
+ PrototypeIterator::GetCurrent<JSReceiver>(iter);
+ if (holder.is_identical_to(current)) break;
+ Handle<Map> current_map(current->map(), isolate);
+
+ if (current_map->IsJSGlobalObjectMap()) {
+ if (fill_array) {
+ Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(current);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+ global, name, PropertyCellType::kInvalidated);
+ DCHECK(cell->value()->IsTheHole(isolate));
+ Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
+ array->set(first_index + checks_count, *weak_cell);
+ }
+ checks_count++;
+
+ } else if (current_map->is_dictionary_map()) {
+ DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
+ if (fill_array) {
+ DCHECK_EQ(NameDictionary::kNotFound,
+ current->property_dictionary()->FindEntry(name));
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate);
+ array->set(first_index + checks_count, *weak_cell);
+ }
+ checks_count++;
+ }
+ }
+ return checks_count;
+}
+
+// Returns 0 if the validity cell check is enough to ensure that the
+// prototype chain from |receiver_map| till |holder| did not change.
+// If the |holder| is an empty handle then the full prototype chain is
+// checked.
+// Returns -1 if the handler has to be compiled or the number of prototype
+// checks otherwise.
+int GetPrototypeCheckCount(Isolate* isolate, Handle<Map> receiver_map,
+ Handle<JSReceiver> holder, Handle<Name> name) {
+ return InitPrototypeChecks<false>(isolate, receiver_map, holder, name,
+ Handle<FixedArray>(), 0);
+}
+
+enum class HolderCellRequest {
+ kGlobalPropertyCell,
+ kHolder,
+};
+
+Handle<WeakCell> HolderCell(Isolate* isolate, Handle<JSReceiver> holder,
+ Handle<Name> name, HolderCellRequest request) {
+ if (request == HolderCellRequest::kGlobalPropertyCell) {
+ DCHECK(holder->IsJSGlobalObject());
+ Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(holder);
+ GlobalDictionary* dict = global->global_dictionary();
+ int number = dict->FindEntry(name);
+ DCHECK_NE(NameDictionary::kNotFound, number);
+ Handle<PropertyCell> cell(dict->CellAt(number), isolate);
+ return isolate->factory()->NewWeakCell(cell);
+ }
+ return Map::GetOrCreatePrototypeWeakCell(holder, isolate);
}
-Object* StoreHandler::ValidFixedArrayHandlerOrNull(
- Object* raw_handler, Name* name, Handle<Map>* out_transition) {
- DCHECK(raw_handler->IsFixedArray());
- FixedArray* handler = FixedArray::cast(raw_handler);
- // Step 1: Check validity cell.
- Object* value = Cell::cast(handler->get(kValidityCellIndex))->value();
- if (value != Smi::FromInt(Map::kPrototypeChainValid)) return nullptr;
- // Step 2: Check transition key.
- WeakCell* target_cell = StoreHandler::GetArrayTransitionCell(handler);
- if (!TransitionsAccessor::IsMatchingMap(target_cell, name, kData, NONE)) {
- return nullptr;
- }
- // Step 3: Check prototypes.
- Heap* heap = handler->GetHeap();
- Isolate* isolate = heap->isolate();
- Handle<Name> name_handle(name, isolate);
- for (int i = kFirstPrototypeIndex; i < handler->length(); i++) {
- // This mirrors AccessorAssembler::CheckPrototype.
- WeakCell* prototype_cell = WeakCell::cast(handler->get(i));
- if (prototype_cell->cleared()) return nullptr;
- HeapObject* maybe_prototype = HeapObject::cast(prototype_cell->value());
- if (maybe_prototype->IsPropertyCell()) {
- Object* value = PropertyCell::cast(maybe_prototype)->value();
- if (value != heap->the_hole_value()) return nullptr;
+} // namespace
+
+// static
+Handle<Object> LoadHandler::LoadFromPrototype(Isolate* isolate,
+ Handle<Map> receiver_map,
+ Handle<JSReceiver> holder,
+ Handle<Name> name,
+ Handle<Smi> smi_handler) {
+ int checks_count =
+ GetPrototypeCheckCount(isolate, receiver_map, holder, name);
+ DCHECK_LE(0, checks_count);
+
+ if (receiver_map->IsPrimitiveMap() ||
+ receiver_map->is_access_check_needed()) {
+ DCHECK(!receiver_map->is_dictionary_map());
+ DCHECK_LE(1, checks_count); // For native context.
+ smi_handler = EnableAccessCheckOnReceiver(isolate, smi_handler);
+ } else if (receiver_map->is_dictionary_map() &&
+ !receiver_map->IsJSGlobalObjectMap()) {
+ smi_handler = EnableLookupOnReceiver(isolate, smi_handler);
+ }
+
+ Handle<Cell> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
+ DCHECK(!validity_cell.is_null());
+
+ // LoadIC dispatcher expects PropertyCell as a "holder" in case of kGlobal
+ // handler kind.
+ HolderCellRequest request = GetHandlerKind(*smi_handler) == kGlobal
+ ? HolderCellRequest::kGlobalPropertyCell
+ : HolderCellRequest::kHolder;
+
+ Handle<WeakCell> holder_cell = HolderCell(isolate, holder, name, request);
+
+ if (checks_count == 0) {
+ return isolate->factory()->NewTuple3(holder_cell, smi_handler,
+ validity_cell, TENURED);
+ }
+ Handle<FixedArray> handler_array(isolate->factory()->NewFixedArray(
+ kFirstPrototypeIndex + checks_count, TENURED));
+ handler_array->set(kSmiHandlerIndex, *smi_handler);
+ handler_array->set(kValidityCellIndex, *validity_cell);
+ handler_array->set(kHolderCellIndex, *holder_cell);
+ InitPrototypeChecks(isolate, receiver_map, holder, name, handler_array,
+ kFirstPrototypeIndex);
+ return handler_array;
+}
+
+// static
+Handle<Object> LoadHandler::LoadFullChain(Isolate* isolate,
+ Handle<Map> receiver_map,
+ Handle<Object> holder,
+ Handle<Name> name,
+ Handle<Smi> smi_handler) {
+ Handle<JSReceiver> end; // null handle
+ int checks_count = GetPrototypeCheckCount(isolate, receiver_map, end, name);
+ DCHECK_LE(0, checks_count);
+
+ if (receiver_map->IsPrimitiveMap() ||
+ receiver_map->is_access_check_needed()) {
+ DCHECK(!receiver_map->is_dictionary_map());
+ DCHECK_LE(1, checks_count); // For native context.
+ smi_handler = EnableAccessCheckOnReceiver(isolate, smi_handler);
+ } else if (receiver_map->is_dictionary_map() &&
+ !receiver_map->IsJSGlobalObjectMap()) {
+ smi_handler = EnableLookupOnReceiver(isolate, smi_handler);
+ }
+
+ Handle<Object> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
+ if (validity_cell.is_null()) {
+ DCHECK_EQ(0, checks_count);
+ // Lookup on receiver isn't supported in case of a simple smi handler.
+ if (!LookupOnReceiverBits::decode(smi_handler->value())) return smi_handler;
+ validity_cell = handle(Smi::kZero, isolate);
+ }
+
+ Factory* factory = isolate->factory();
+ if (checks_count == 0) {
+ return factory->NewTuple3(holder, smi_handler, validity_cell, TENURED);
+ }
+ Handle<FixedArray> handler_array(factory->NewFixedArray(
+ LoadHandler::kFirstPrototypeIndex + checks_count, TENURED));
+ handler_array->set(kSmiHandlerIndex, *smi_handler);
+ handler_array->set(kValidityCellIndex, *validity_cell);
+ handler_array->set(kHolderCellIndex, *holder);
+ InitPrototypeChecks(isolate, receiver_map, end, name, handler_array,
+ kFirstPrototypeIndex);
+ return handler_array;
+}
+
+// static
+Handle<Object> StoreHandler::StoreElementTransition(
+ Isolate* isolate, Handle<Map> receiver_map, Handle<Map> transition,
+ KeyedAccessStoreMode store_mode) {
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ Handle<Code> stub = ElementsTransitionAndStoreStub(
+ isolate, elements_kind, transition->elements_kind(),
+ is_js_array, store_mode)
+ .GetCode();
+ Handle<Object> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
+ if (validity_cell.is_null()) {
+ validity_cell = handle(Smi::kZero, isolate);
+ }
+ Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+ return isolate->factory()->NewTuple3(cell, stub, validity_cell, TENURED);
+}
+
+// static
+Handle<Object> StoreHandler::StoreTransition(Isolate* isolate,
+ Handle<Map> receiver_map,
+ Handle<JSObject> holder,
+ Handle<HeapObject> transition,
+ Handle<Name> name) {
+ Handle<Smi> smi_handler;
+ Handle<WeakCell> transition_cell;
+
+ if (transition->IsMap()) {
+ Handle<Map> transition_map = Handle<Map>::cast(transition);
+ if (transition_map->is_dictionary_map()) {
+ smi_handler = StoreNormal(isolate);
} else {
- DCHECK(maybe_prototype->map()->is_dictionary_map());
- // Do a negative dictionary lookup.
- NameDictionary* dict =
- JSObject::cast(maybe_prototype)->property_dictionary();
- int number = dict->FindEntry(isolate, name_handle);
- if (number != NameDictionary::kNotFound) return nullptr;
+ int descriptor = transition_map->LastAdded();
+ Handle<DescriptorArray> descriptors(
+ transition_map->instance_descriptors());
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ DCHECK(!representation.IsNone());
+
+ // Declarative handlers don't support access checks.
+ DCHECK(!transition_map->is_access_check_needed());
+
+ DCHECK_EQ(kData, details.kind());
+ if (details.location() == kDescriptor) {
+ smi_handler = TransitionToConstant(isolate, descriptor);
+
+ } else {
+ DCHECK_EQ(kField, details.location());
+ bool extend_storage = Map::cast(transition_map->GetBackPointer())
+ ->unused_property_fields() == 0;
+
+ FieldIndex index =
+ FieldIndex::ForDescriptor(*transition_map, descriptor);
+ smi_handler = TransitionToField(isolate, descriptor, index,
+ representation, extend_storage);
+ }
+ }
+ // |holder| is either a receiver if the property is non-existent or
+ // one of the prototypes.
+ DCHECK(!holder.is_null());
+ bool is_nonexistent = holder->map() == transition_map->GetBackPointer();
+ if (is_nonexistent) holder = Handle<JSObject>::null();
+ transition_cell = Map::WeakCellForMap(transition_map);
+
+ } else {
+ DCHECK(transition->IsPropertyCell());
+ if (receiver_map->IsJSGlobalObjectMap()) {
+ // TODO(ishell): this must be handled by StoreGlobalIC once it's finished.
+ return StoreGlobal(isolate, Handle<PropertyCell>::cast(transition));
+ } else {
+ DCHECK(receiver_map->IsJSGlobalProxyMap());
+ smi_handler = StoreGlobalProxy(isolate);
+ transition_cell = isolate->factory()->NewWeakCell(transition);
+ }
+ }
+
+ int checks_count =
+ GetPrototypeCheckCount(isolate, receiver_map, holder, name);
+
+ DCHECK_LE(0, checks_count);
+ DCHECK(!receiver_map->IsJSGlobalObjectMap());
+
+ if (receiver_map->is_access_check_needed()) {
+ DCHECK(!receiver_map->is_dictionary_map());
+ DCHECK_LE(1, checks_count); // For native context.
+ smi_handler = EnableAccessCheckOnReceiver(isolate, smi_handler);
+ }
+
+ Handle<Object> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
+ if (validity_cell.is_null()) {
+ DCHECK_EQ(0, checks_count);
+ validity_cell = handle(Smi::kZero, isolate);
+ }
+
+ Factory* factory = isolate->factory();
+ if (checks_count == 0) {
+ return factory->NewTuple3(transition_cell, smi_handler, validity_cell,
+ TENURED);
+ }
+ Handle<FixedArray> handler_array(
+ factory->NewFixedArray(kFirstPrototypeIndex + checks_count, TENURED));
+ handler_array->set(kSmiHandlerIndex, *smi_handler);
+ handler_array->set(kValidityCellIndex, *validity_cell);
+ handler_array->set(kTransitionMapOrHolderCellIndex, *transition_cell);
+ InitPrototypeChecks(isolate, receiver_map, holder, name, handler_array,
+ kFirstPrototypeIndex);
+ return handler_array;
+}
+
+// static
+Handle<Object> StoreHandler::StoreGlobal(Isolate* isolate,
+ Handle<PropertyCell> cell) {
+ return isolate->factory()->NewWeakCell(cell);
+}
+
+// static
+Handle<Object> StoreHandler::StoreProxy(Isolate* isolate,
+ Handle<Map> receiver_map,
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name) {
+ Handle<Smi> smi_handler = StoreProxy(isolate);
+
+ if (receiver.is_identical_to(proxy)) return smi_handler;
+
+ int checks_count = GetPrototypeCheckCount(isolate, receiver_map, proxy, name);
+
+ DCHECK_LE(0, checks_count);
+
+ if (receiver_map->is_access_check_needed()) {
+ DCHECK(!receiver_map->is_dictionary_map());
+ DCHECK_LE(1, checks_count); // For native context.
+ smi_handler = EnableAccessCheckOnReceiver(isolate, smi_handler);
+ }
+
+ Handle<Object> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
+ if (validity_cell.is_null()) {
+ DCHECK_EQ(0, checks_count);
+ validity_cell = handle(Smi::kZero, isolate);
+ }
+
+ Factory* factory = isolate->factory();
+ Handle<WeakCell> holder_cell = factory->NewWeakCell(proxy);
+
+ if (checks_count == 0) {
+ return factory->NewTuple3(holder_cell, smi_handler, validity_cell, TENURED);
+ }
+ Handle<FixedArray> handler_array(
+ factory->NewFixedArray(kFirstPrototypeIndex + checks_count, TENURED));
+ handler_array->set(kSmiHandlerIndex, *smi_handler);
+ handler_array->set(kValidityCellIndex, *validity_cell);
+ handler_array->set(kTransitionMapOrHolderCellIndex, *holder_cell);
+ InitPrototypeChecks(isolate, receiver_map, proxy, name, handler_array,
+ kFirstPrototypeIndex);
+ return handler_array;
+}
+
+Object* StoreHandler::ValidHandlerOrNull(Object* raw_handler, Name* name,
+ Handle<Map>* out_transition) {
+ STATIC_ASSERT(kValidityCellOffset == Tuple3::kValue3Offset);
+
+ Smi* valid = Smi::FromInt(Map::kPrototypeChainValid);
+
+ if (raw_handler->IsTuple3()) {
+ // Check validity cell.
+ Tuple3* handler = Tuple3::cast(raw_handler);
+
+ Object* raw_validity_cell = handler->value3();
+ // |raw_valitity_cell| can be Smi::kZero if no validity cell is required
+ // (which counts as valid).
+ if (raw_validity_cell->IsCell() &&
+ Cell::cast(raw_validity_cell)->value() != valid) {
+ return nullptr;
+ }
+
+ } else {
+ DCHECK(raw_handler->IsFixedArray());
+ FixedArray* handler = FixedArray::cast(raw_handler);
+
+ // Check validity cell.
+ Object* value = Cell::cast(handler->get(kValidityCellIndex))->value();
+ if (value != valid) return nullptr;
+
+ // Check prototypes.
+ Heap* heap = handler->GetHeap();
+ Isolate* isolate = heap->isolate();
+ Handle<Name> name_handle(name, isolate);
+ for (int i = kFirstPrototypeIndex; i < handler->length(); i++) {
+ // This mirrors AccessorAssembler::CheckPrototype.
+ WeakCell* prototype_cell = WeakCell::cast(handler->get(i));
+ if (prototype_cell->cleared()) return nullptr;
+ HeapObject* maybe_prototype = HeapObject::cast(prototype_cell->value());
+ if (maybe_prototype->IsPropertyCell()) {
+ Object* value = PropertyCell::cast(maybe_prototype)->value();
+ if (value != heap->the_hole_value()) return nullptr;
+ } else {
+ DCHECK(maybe_prototype->map()->is_dictionary_map());
+ // Do a negative dictionary lookup.
+ NameDictionary* dict =
+ JSObject::cast(maybe_prototype)->property_dictionary();
+ int number = dict->FindEntry(isolate, name_handle);
+ if (number != NameDictionary::kNotFound) {
+ PropertyDetails details = dict->DetailsAt(number);
+ if (details.IsReadOnly() || details.kind() == kAccessor) {
+ return nullptr;
+ }
+ break;
+ }
+ }
}
}
- // Step 4: Check if the transition target is deprecated.
+
+ // Check if the transition target is deprecated.
+ WeakCell* target_cell = GetTransitionCell(raw_handler);
Map* transition = Map::cast(target_cell->value());
if (transition->is_deprecated()) return nullptr;
*out_transition = handle(transition);
- return handler;
+ return raw_handler;
}
} // namespace internal
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index e970de754c..87ff45a46a 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -132,15 +132,22 @@ class LoadHandler {
// dictionary.
static inline Handle<Smi> LoadModuleExport(Isolate* isolate, int index);
- // Sets DoAccessCheckOnReceiverBits in given Smi-handler. The receiver
- // check is a part of a prototype chain check.
- static inline Handle<Smi> EnableAccessCheckOnReceiver(
- Isolate* isolate, Handle<Smi> smi_handler);
-
- // Sets LookupOnReceiverBits in given Smi-handler. The receiver
- // check is a part of a prototype chain check.
- static inline Handle<Smi> EnableLookupOnReceiver(Isolate* isolate,
- Handle<Smi> smi_handler);
+ // Creates a data handler that represents a load of a non-existent property.
+ // {holder} is the object from which the property is loaded. If no holder is
+ // needed (e.g., for "nonexistent"), null_value() may be passed in.
+ static Handle<Object> LoadFullChain(Isolate* isolate,
+ Handle<Map> receiver_map,
+ Handle<Object> holder, Handle<Name> name,
+ Handle<Smi> smi_handler);
+
+ // Creates a data handler that represents a prototype chain check followed
+ // by given Smi-handler that encoded a load from the holder.
+ // Can be used only if GetPrototypeCheckCount() returns non negative value.
+ static Handle<Object> LoadFromPrototype(Isolate* isolate,
+ Handle<Map> receiver_map,
+ Handle<JSReceiver> holder,
+ Handle<Name> name,
+ Handle<Smi> smi_handler);
// Creates a Smi-handler for loading a non-existent property. Works only as
// a part of prototype chain check.
@@ -151,6 +158,17 @@ class LoadHandler {
ElementsKind elements_kind,
bool convert_hole_to_undefined,
bool is_js_array);
+
+ private:
+ // Sets DoAccessCheckOnReceiverBits in given Smi-handler. The receiver
+ // check is a part of a prototype chain check.
+ static inline Handle<Smi> EnableAccessCheckOnReceiver(
+ Isolate* isolate, Handle<Smi> smi_handler);
+
+ // Sets LookupOnReceiverBits in given Smi-handler. The receiver
+ // check is a part of a prototype chain check.
+ static inline Handle<Smi> EnableLookupOnReceiver(Isolate* isolate,
+ Handle<Smi> smi_handler);
};
// A set of bit fields representing Smi handlers for stores.
@@ -160,21 +178,33 @@ class StoreHandler {
kStoreElement,
kStoreField,
kStoreConstField,
- kStoreNormal,
- kTransitionToField,
// TODO(ishell): remove once constant field tracking is done.
- kTransitionToConstant = kStoreConstField
+ kTransitionToConstant = kStoreConstField,
+ kTransitionToField,
+ kStoreGlobalProxy,
+ kStoreNormal,
+ kProxy,
+ kKindsNumber // Keep last
};
class KindBits : public BitField<Kind, 0, 3> {};
enum FieldRepresentation { kSmi, kDouble, kHeapObject, kTagged };
+ static inline bool IsHandler(Object* maybe_handler);
+
+ // Applicable to kStoreGlobalProxy, kProxy kinds.
+
+ // Defines whether access rights check should be done on receiver object.
+ class DoAccessCheckOnReceiverBits
+ : public BitField<bool, KindBits::kNext, 1> {};
+
// Applicable to kStoreField, kTransitionToField and kTransitionToConstant
// kinds.
// Index of a value entry in the descriptor array.
class DescriptorBits
- : public BitField<unsigned, KindBits::kNext, kDescriptorIndexBitCount> {};
+ : public BitField<unsigned, DoAccessCheckOnReceiverBits::kNext,
+ kDescriptorIndexBitCount> {};
//
// Encoding when KindBits contains kTransitionToConstant.
//
@@ -199,34 +229,68 @@ class StoreHandler {
// The layout of an Tuple3 handler representing a transitioning store
// when prototype chain checks do not include non-existing lookups or access
// checks.
- static const int kTransitionCellOffset = Tuple3::kValue1Offset;
+ static const int kTransitionOrHolderCellOffset = Tuple3::kValue1Offset;
static const int kSmiHandlerOffset = Tuple3::kValue2Offset;
static const int kValidityCellOffset = Tuple3::kValue3Offset;
- static inline WeakCell* GetTuple3TransitionCell(Object* tuple3_handler);
- static Object* ValidTuple3HandlerOrNull(Object* handler, Name* name,
- Handle<Map>* out_transition);
+ static inline WeakCell* GetTransitionCell(Object* handler);
+ static Object* ValidHandlerOrNull(Object* handler, Name* name,
+ Handle<Map>* out_transition);
// The layout of an array handler representing a transitioning store
// when prototype chain checks include non-existing lookups and access checks.
static const int kSmiHandlerIndex = 0;
static const int kValidityCellIndex = 1;
- static const int kTransitionCellIndex = 2;
+ static const int kTransitionMapOrHolderCellIndex = 2;
static const int kFirstPrototypeIndex = 3;
- static inline WeakCell* GetArrayTransitionCell(Object* array_handler);
- static Object* ValidFixedArrayHandlerOrNull(Object* raw_handler, Name* name,
- Handle<Map>* out_transition);
-
// Creates a Smi-handler for storing a field to fast object.
static inline Handle<Smi> StoreField(Isolate* isolate, int descriptor,
FieldIndex field_index,
PropertyConstness constness,
Representation representation);
+ static Handle<Object> StoreTransition(Isolate* isolate,
+ Handle<Map> receiver_map,
+ Handle<JSObject> holder,
+ Handle<HeapObject> transition,
+ Handle<Name> name);
+
+ static Handle<Object> StoreElementTransition(Isolate* isolate,
+ Handle<Map> receiver_map,
+ Handle<Map> transition,
+ KeyedAccessStoreMode store_mode);
+
+ static Handle<Object> StoreProxy(Isolate* isolate, Handle<Map> receiver_map,
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name);
+
+ // Creates a handler for storing a property to the property cell of a global
+ // object.
+ static Handle<Object> StoreGlobal(Isolate* isolate,
+ Handle<PropertyCell> cell);
+
+ // Creates a Smi-handler for storing a property to a global proxy object.
+ static inline Handle<Smi> StoreGlobalProxy(Isolate* isolate);
+
// Creates a Smi-handler for storing a property to a slow object.
static inline Handle<Smi> StoreNormal(Isolate* isolate);
+ // Creates a Smi-handler for storing a property on a proxy.
+ static inline Handle<Smi> StoreProxy(Isolate* isolate);
+
+ private:
+ // Sets DoAccessCheckOnReceiverBits in given Smi-handler. The receiver
+ // check is a part of a prototype chain check.
+ static inline Handle<Smi> EnableAccessCheckOnReceiver(
+ Isolate* isolate, Handle<Smi> smi_handler);
+
+ static inline Handle<Smi> StoreField(Isolate* isolate, Kind kind,
+ int descriptor, FieldIndex field_index,
+ Representation representation,
+ bool extend_storage);
+
// Creates a Smi-handler for transitioning store to a field.
static inline Handle<Smi> TransitionToField(Isolate* isolate, int descriptor,
FieldIndex field_index,
@@ -237,12 +301,6 @@ class StoreHandler {
// case the only thing that needs to be done is an update of a map).
static inline Handle<Smi> TransitionToConstant(Isolate* isolate,
int descriptor);
-
- private:
- static inline Handle<Smi> StoreField(Isolate* isolate, Kind kind,
- int descriptor, FieldIndex field_index,
- Representation representation,
- bool extend_storage);
};
} // namespace internal
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index 94f7c0fa63..9e9a9c58a7 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -63,7 +63,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
DCHECK(name->IsUniqueName());
- DCHECK(!receiver.is(scratch0));
+ DCHECK(receiver != scratch0);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1);
__ IncrementCounter(counters->negative_lookups_miss(), 1);
@@ -107,7 +107,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Handle<Map> receiver_map, Register receiver, Register scratch,
bool is_store, Register store_parameter, Register accessor_holder,
int accessor_index) {
- DCHECK(!accessor_holder.is(scratch));
+ DCHECK(accessor_holder != scratch);
// Copy return value.
__ pop(scratch);
@@ -218,9 +218,9 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
__ push(value());
if (accessor_index >= 0) {
- DCHECK(!holder.is(scratch));
- DCHECK(!receiver.is(scratch));
- DCHECK(!value().is(scratch));
+ DCHECK(holder != scratch);
+ DCHECK(receiver != scratch);
+ DCHECK(value() != scratch);
// Call the JavaScript setter with receiver and value on the stack.
if (map->IsJSGlobalObjectMap()) {
__ mov(scratch,
@@ -296,9 +296,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
- DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
- !scratch2.is(scratch1));
+ DCHECK(scratch1 != object_reg && scratch1 != holder_reg);
+ DCHECK(scratch2 != object_reg && scratch2 != holder_reg &&
+ scratch2 != scratch1);
Handle<Cell> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
@@ -373,11 +373,11 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ jmp(&success);
__ bind(miss);
- if (IC::ShouldPushPopSlotAndVector(kind())) {
- DCHECK(kind() == Code::LOAD_IC);
+ if (ShouldPushPopSlotAndVector()) {
+ DCHECK_EQ(LOAD, type());
PopVectorAndSlot();
}
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ TailCallBuiltin(masm(), Builtins::kLoadIC_Miss);
__ bind(&success);
}
}
@@ -388,8 +388,8 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ jmp(&success);
GenerateRestoreName(miss, name);
- DCHECK(!IC::ShouldPushPopSlotAndVector(kind()));
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ DCHECK(!ShouldPushPopSlotAndVector());
+ TailCallBuiltin(masm(), Builtins::kStoreIC_Miss);
__ bind(&success);
}
}
@@ -433,7 +433,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
- return GetCode(kind(), name);
+ return GetCode(name);
}
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index ea5aa6c5f6..d73ea2a759 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -43,7 +43,7 @@ Address IC::raw_constant_pool() const {
bool IC::IsHandler(Object* object) {
return (object->IsSmi() && (object != nullptr)) || object->IsTuple2() ||
object->IsTuple3() || object->IsFixedArray() || object->IsWeakCell() ||
- (object->IsCode() && Code::cast(object)->is_handler());
+ object->IsCode();
}
bool IC::AddressIsDeoptimizedCode() const {
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index e8a5ec191a..09920241ee 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -225,23 +225,9 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
DCHECK_NOT_NULL(nexus);
kind_ = nexus->kind();
state_ = nexus->StateFromFeedback();
- extra_ic_state_ = kNoExtraICState;
old_state_ = state_;
}
-// The ICs that don't pass slot and vector through the stack have to
-// save/restore them in the dispatcher.
-bool IC::ShouldPushPopSlotAndVector(Code::Kind kind) {
- if (kind == Code::LOAD_IC || kind == Code::LOAD_GLOBAL_IC ||
- kind == Code::KEYED_LOAD_IC) {
- return true;
- }
- if (kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC) {
- return !StoreWithVectorDescriptor::kPassLastArgsOnStack;
- }
- return false;
-}
-
JSFunction* IC::GetHostFunction() const {
// Compute the JavaScript frame for the frame pointer of this IC
// structure. We need this to be able to find the function
@@ -411,7 +397,7 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
}
void IC::ConfigureVectorState(Handle<Name> name, MapHandles const& maps,
- List<Handle<Object>>* handlers) {
+ ObjectHandles* handlers) {
DCHECK(!IsLoadGlobalIC());
// Non-keyed ICs don't track the name explicitly.
if (!is_keyed()) name = Handle<Name>::null();
@@ -515,15 +501,18 @@ static bool AddOneReceiverMapIfMissing(MapHandles* receiver_maps,
bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> handler) {
DCHECK(IsHandler(*handler));
- if (is_keyed() && state() != RECOMPUTE_HANDLER) return false;
+ if (is_keyed() && state() != RECOMPUTE_HANDLER) {
+ if (nexus()->FindFirstName() != *name) return false;
+ }
Handle<Map> map = receiver_map();
MapHandles maps;
- List<Handle<Object>> handlers;
+ ObjectHandles handlers;
TargetMaps(&maps);
int number_of_maps = static_cast<int>(maps.size());
int deprecated_maps = 0;
int handler_to_overwrite = -1;
+ if (!nexus()->FindHandlers(&handlers, number_of_maps)) return false;
for (int i = 0; i < number_of_maps; i++) {
Handle<Map> current_map = maps.at(i);
@@ -531,6 +520,15 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> handler) {
// Filter out deprecated maps to ensure their instances get migrated.
++deprecated_maps;
} else if (map.is_identical_to(current_map)) {
+ // If both map and handler stayed the same (and the name is also the
+ // same as checked above, for keyed accesses), we're not progressing
+ // in the lattice and need to go MEGAMORPHIC instead. There's one
+ // exception to this rule, which is when we're in RECOMPUTE_HANDLER
+ // state, there we allow to migrate to a new handler.
+ if (handler.is_identical_to(handlers[i]) &&
+ state() != RECOMPUTE_HANDLER) {
+ return false;
+ }
// If the receiver type is already in the polymorphic IC, this indicates
// there was a prototoype chain failure. In that case, just overwrite the
// handler.
@@ -548,23 +546,20 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> handler) {
if (number_of_maps == 0 && state() != MONOMORPHIC && state() != POLYMORPHIC) {
return false;
}
- if (!nexus()->FindHandlers(&handlers, static_cast<int>(maps.size()))) {
- return false;
- }
number_of_valid_maps++;
- if (number_of_valid_maps > 1 && is_keyed()) return false;
if (number_of_valid_maps == 1) {
ConfigureVectorState(name, receiver_map(), handler);
} else {
+ if (is_keyed() && nexus()->FindFirstName() != *name) return false;
if (handler_to_overwrite >= 0) {
- handlers.Set(handler_to_overwrite, handler);
+ handlers[handler_to_overwrite] = handler;
if (!map.is_identical_to(maps.at(handler_to_overwrite))) {
maps[handler_to_overwrite] = map;
}
} else {
maps.push_back(map);
- handlers.Add(handler);
+ handlers.push_back(handler);
}
ConfigureVectorState(name, maps, &handlers);
@@ -581,7 +576,7 @@ void IC::UpdateMonomorphicIC(Handle<Object> handler, Handle<Name> name) {
void IC::CopyICToMegamorphicCache(Handle<Name> name) {
MapHandles maps;
- List<Handle<Object>> handlers;
+ ObjectHandles handlers;
TargetMaps(&maps);
if (!nexus()->FindHandlers(&handlers, static_cast<int>(maps.size()))) return;
for (int i = 0; i < static_cast<int>(maps.size()); i++) {
@@ -623,10 +618,8 @@ void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
}
// Fall through.
case POLYMORPHIC:
+ if (UpdatePolymorphicIC(name, handler)) break;
if (!is_keyed() || state() == RECOMPUTE_HANDLER) {
- if (UpdatePolymorphicIC(name, handler)) break;
- // For keyed stubs, we can't know whether old handlers were for the
- // same key.
CopyICToMegamorphicCache(name);
}
ConfigureVectorState(MEGAMORPHIC, name);
@@ -647,204 +640,6 @@ Handle<Smi> LoadIC::SimpleFieldLoad(Isolate* isolate, FieldIndex index) {
return LoadHandler::LoadField(isolate, index);
}
-namespace {
-
-template <bool fill_array = true>
-int InitPrototypeChecks(Isolate* isolate, Handle<Map> receiver_map,
- Handle<JSReceiver> holder, Handle<Name> name,
- Handle<FixedArray> array, int first_index) {
- if (!holder.is_null() && holder->map() == *receiver_map) return 0;
-
- HandleScope scope(isolate);
- int checks_count = 0;
-
- if (receiver_map->IsPrimitiveMap() || receiver_map->IsJSGlobalProxyMap()) {
- // The validity cell check for primitive and global proxy receivers does
- // not guarantee that certain native context ever had access to other
- // native context. However, a handler created for one native context could
- // be used in other native context through the megamorphic stub cache.
- // So we record the original native context to which this handler
- // corresponds.
- if (fill_array) {
- Handle<Context> native_context = isolate->native_context();
- array->set(first_index + checks_count, native_context->self_weak_cell());
- }
- checks_count++;
-
- } else if (receiver_map->IsJSGlobalObjectMap()) {
- // If we are creating a handler for [Load/Store]GlobalIC then we need to
- // check that the property did not appear in the global object.
- if (fill_array) {
- Handle<JSGlobalObject> global = isolate->global_object();
- Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
- global, name, PropertyCellType::kInvalidated);
- DCHECK(cell->value()->IsTheHole(isolate));
- Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
- array->set(first_index + checks_count, *weak_cell);
- }
- checks_count++;
- }
-
- // Create/count entries for each global or dictionary prototype appeared in
- // the prototype chain contains from receiver till holder.
- PrototypeIterator::WhereToEnd end = name->IsPrivate()
- ? PrototypeIterator::END_AT_NON_HIDDEN
- : PrototypeIterator::END_AT_NULL;
- for (PrototypeIterator iter(receiver_map, end); !iter.IsAtEnd();
- iter.Advance()) {
- Handle<JSReceiver> current =
- PrototypeIterator::GetCurrent<JSReceiver>(iter);
- if (holder.is_identical_to(current)) break;
- Handle<Map> current_map(current->map(), isolate);
-
- if (current_map->IsJSGlobalObjectMap()) {
- if (fill_array) {
- Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(current);
- Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
- global, name, PropertyCellType::kInvalidated);
- DCHECK(cell->value()->IsTheHole(isolate));
- Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
- array->set(first_index + checks_count, *weak_cell);
- }
- checks_count++;
-
- } else if (current_map->is_dictionary_map()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (fill_array) {
- DCHECK_EQ(NameDictionary::kNotFound,
- current->property_dictionary()->FindEntry(name));
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate);
- array->set(first_index + checks_count, *weak_cell);
- }
- checks_count++;
- }
- }
- return checks_count;
-}
-
-// Returns 0 if the validity cell check is enough to ensure that the
-// prototype chain from |receiver_map| till |holder| did not change.
-// If the |holder| is an empty handle then the full prototype chain is
-// checked.
-// Returns -1 if the handler has to be compiled or the number of prototype
-// checks otherwise.
-int GetPrototypeCheckCount(Isolate* isolate, Handle<Map> receiver_map,
- Handle<JSReceiver> holder, Handle<Name> name) {
- return InitPrototypeChecks<false>(isolate, receiver_map, holder, name,
- Handle<FixedArray>(), 0);
-}
-
-enum class HolderCellRequest {
- kGlobalPropertyCell,
- kHolder,
-};
-
-Handle<WeakCell> HolderCell(Isolate* isolate, Handle<JSReceiver> holder,
- Handle<Name> name, HolderCellRequest request) {
- if (request == HolderCellRequest::kGlobalPropertyCell) {
- DCHECK(holder->IsJSGlobalObject());
- Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(holder);
- GlobalDictionary* dict = global->global_dictionary();
- int number = dict->FindEntry(name);
- DCHECK_NE(NameDictionary::kNotFound, number);
- Handle<PropertyCell> cell(dict->CellAt(number), isolate);
- return isolate->factory()->NewWeakCell(cell);
- }
- return Map::GetOrCreatePrototypeWeakCell(holder, isolate);
-}
-
-} // namespace
-
-Handle<Object> LoadIC::LoadFromPrototype(Handle<Map> receiver_map,
- Handle<JSReceiver> holder,
- Handle<Name> name,
- Handle<Smi> smi_handler) {
- int checks_count =
- GetPrototypeCheckCount(isolate(), receiver_map, holder, name);
- DCHECK_LE(0, checks_count);
-
- if (receiver_map->IsPrimitiveMap() ||
- receiver_map->is_access_check_needed()) {
- DCHECK(!receiver_map->is_dictionary_map());
- DCHECK_LE(1, checks_count); // For native context.
- smi_handler =
- LoadHandler::EnableAccessCheckOnReceiver(isolate(), smi_handler);
- } else if (receiver_map->is_dictionary_map() &&
- !receiver_map->IsJSGlobalObjectMap()) {
- smi_handler = LoadHandler::EnableLookupOnReceiver(isolate(), smi_handler);
- }
-
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- DCHECK(!validity_cell.is_null());
-
- // LoadIC dispatcher expects PropertyCell as a "holder" in case of kGlobal
- // handler kind.
- HolderCellRequest request =
- LoadHandler::GetHandlerKind(*smi_handler) == LoadHandler::kGlobal
- ? HolderCellRequest::kGlobalPropertyCell
- : HolderCellRequest::kHolder;
-
- Handle<WeakCell> holder_cell = HolderCell(isolate(), holder, name, request);
-
- if (checks_count == 0) {
- return isolate()->factory()->NewTuple3(holder_cell, smi_handler,
- validity_cell);
- }
- Handle<FixedArray> handler_array(isolate()->factory()->NewFixedArray(
- LoadHandler::kFirstPrototypeIndex + checks_count, TENURED));
- handler_array->set(LoadHandler::kSmiHandlerIndex, *smi_handler);
- handler_array->set(LoadHandler::kValidityCellIndex, *validity_cell);
- handler_array->set(LoadHandler::kHolderCellIndex, *holder_cell);
- InitPrototypeChecks(isolate(), receiver_map, holder, name, handler_array,
- LoadHandler::kFirstPrototypeIndex);
- return handler_array;
-}
-
-Handle<Object> LoadIC::LoadFullChain(Handle<Map> receiver_map,
- Handle<Object> holder, Handle<Name> name,
- Handle<Smi> smi_handler) {
- Handle<JSReceiver> end; // null handle
- int checks_count = GetPrototypeCheckCount(isolate(), receiver_map, end, name);
- DCHECK_LE(0, checks_count);
-
- if (receiver_map->IsPrimitiveMap() ||
- receiver_map->is_access_check_needed()) {
- DCHECK(!receiver_map->is_dictionary_map());
- DCHECK_LE(1, checks_count); // For native context.
- smi_handler =
- LoadHandler::EnableAccessCheckOnReceiver(isolate(), smi_handler);
- } else if (receiver_map->is_dictionary_map() &&
- !receiver_map->IsJSGlobalObjectMap()) {
- smi_handler = LoadHandler::EnableLookupOnReceiver(isolate(), smi_handler);
- }
-
- Handle<Object> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (validity_cell.is_null()) {
- DCHECK_EQ(0, checks_count);
- // Lookup on receiver isn't supported in case of a simple smi handler.
- if (!LoadHandler::LookupOnReceiverBits::decode(smi_handler->value())) {
- return smi_handler;
- }
- validity_cell = handle(Smi::kZero, isolate());
- }
-
- Factory* factory = isolate()->factory();
- if (checks_count == 0) {
- return factory->NewTuple3(holder, smi_handler, validity_cell);
- }
- Handle<FixedArray> handler_array(factory->NewFixedArray(
- LoadHandler::kFirstPrototypeIndex + checks_count, TENURED));
- handler_array->set(LoadHandler::kSmiHandlerIndex, *smi_handler);
- handler_array->set(LoadHandler::kValidityCellIndex, *validity_cell);
- handler_array->set(LoadHandler::kHolderCellIndex, *holder);
- InitPrototypeChecks(isolate(), receiver_map, end, name, handler_array,
- LoadHandler::kFirstPrototypeIndex);
- return handler_array;
-}
-
void LoadIC::UpdateCaches(LookupIterator* lookup) {
if (state() == UNINITIALIZED && !IsLoadGlobalIC()) {
// This is the first time we execute this inline cache. Set the target to
@@ -861,8 +656,9 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
} else if (!lookup->IsFound()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonexistentDH);
Handle<Smi> smi_handler = LoadHandler::LoadNonExistent(isolate());
- code = LoadFullChain(receiver_map(), isolate()->factory()->null_value(),
- lookup->name(), smi_handler);
+ code = LoadHandler::LoadFullChain(isolate(), receiver_map(),
+ isolate()->factory()->null_value(),
+ lookup->name(), smi_handler);
} else {
if (IsLoadGlobalIC()) {
if (lookup->TryLookupCachedProperty()) {
@@ -917,39 +713,7 @@ Handle<Object> IC::ComputeHandler(LookupIterator* lookup) {
return shared_handler;
}
- Handle<Code> handler = PropertyHandlerCompiler::Find(
- lookup->name(), receiver_map(), handler_kind());
- // Use the cached value if it exists, and if it is different from the
- // handler that just missed.
- if (!handler.is_null()) {
- Handle<Object> current_handler;
- if (maybe_handler_.ToHandle(&current_handler)) {
- if (!current_handler.is_identical_to(handler)) {
- TraceHandlerCacheHitStats(lookup);
- return handler;
- }
- } else {
- // maybe_handler_ is only populated for MONOMORPHIC and POLYMORPHIC ICs.
- // In MEGAMORPHIC case, check if the handler in the megamorphic stub
- // cache (which just missed) is different from the cached handler.
- if (state() == MEGAMORPHIC && lookup->GetReceiver()->IsHeapObject()) {
- Map* map = Handle<HeapObject>::cast(lookup->GetReceiver())->map();
- Object* megamorphic_cached_handler =
- stub_cache()->Get(*lookup->name(), map);
- if (megamorphic_cached_handler != *handler) {
- TraceHandlerCacheHitStats(lookup);
- return handler;
- }
- } else {
- TraceHandlerCacheHitStats(lookup);
- return handler;
- }
- }
- }
-
- handler = CompileHandler(lookup);
- Map::UpdateCodeCache(receiver_map(), lookup->name(), handler);
- return handler;
+ return CompileHandler(lookup);
}
Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
@@ -962,9 +726,8 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
if (receiver->IsStringWrapper() &&
*lookup->name() == isolate()->heap()->length_string()) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_StringLengthStub);
- StringLengthStub string_length_stub(isolate());
- return string_length_stub.GetCode();
+ TRACE_HANDLER_STATS(isolate(), LoadIC_StringLength);
+ return BUILTIN_CODE(isolate(), LoadIC_StringLength);
}
// Use specialized code for getting prototype of functions.
@@ -997,7 +760,8 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
holder_ref = Map::GetOrCreatePrototypeWeakCell(holder, isolate());
}
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonMaskingInterceptorDH);
- return LoadFullChain(map, holder_ref, lookup->name(), smi_handler);
+ return LoadHandler::LoadFullChain(isolate(), map, holder_ref,
+ lookup->name(), smi_handler);
}
if (receiver_is_holder) {
@@ -1007,7 +771,8 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
}
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadInterceptorFromPrototypeDH);
- return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
+ return LoadHandler::LoadFromPrototype(isolate(), map, holder,
+ lookup->name(), smi_handler);
}
case LookupIterator::ACCESSOR: {
@@ -1094,7 +859,8 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormalFromPrototypeDH);
}
- return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
+ return LoadHandler::LoadFromPrototype(isolate(), map, holder,
+ lookup->name(), smi_handler);
}
Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
@@ -1112,7 +878,8 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterDH);
if (receiver_is_holder) return smi_handler;
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterFromPrototypeDH);
- return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
+ return LoadHandler::LoadFromPrototype(isolate(), map, holder,
+ lookup->name(), smi_handler);
}
case LookupIterator::DATA: {
@@ -1126,7 +893,8 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
// global object.
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadGlobalDH);
smi_handler = LoadHandler::LoadGlobal(isolate());
- return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
+ return LoadHandler::LoadFromPrototype(isolate(), map, holder,
+ lookup->name(), smi_handler);
}
DCHECK(!holder->IsJSGlobalObject());
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormalDH);
@@ -1153,7 +921,8 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
if (receiver_is_holder) return smi_handler;
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantFromPrototypeDH);
}
- return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
+ return LoadHandler::LoadFromPrototype(isolate(), map, holder,
+ lookup->name(), smi_handler);
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadIntegerIndexedExoticDH);
@@ -1165,7 +934,8 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
if (receiver_is_holder_proxy) {
return smi_handler;
}
- return LoadFromPrototype(map, holder_proxy, lookup->name(), smi_handler);
+ return LoadHandler::LoadFromPrototype(isolate(), map, holder_proxy,
+ lookup->name(), smi_handler);
}
case LookupIterator::ACCESS_CHECK:
case LookupIterator::NOT_FOUND:
@@ -1221,8 +991,7 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver) {
Handle<Map> receiver_map(receiver->map(), isolate());
- DCHECK(receiver_map->instance_type() != JS_VALUE_TYPE &&
- receiver_map->instance_type() != JS_PROXY_TYPE); // Checked by caller.
+ DCHECK(receiver_map->instance_type() != JS_VALUE_TYPE); // Checked by caller.
MapHandles target_receiver_maps;
TargetMaps(&target_receiver_maps);
@@ -1251,6 +1020,7 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver) {
// miss again and it will become polymorphic and support both the
// untransitioned and transitioned maps.
if (state() == MONOMORPHIC && !receiver->IsString() &&
+ !receiver->IsJSProxy() &&
IsMoreGeneralElementsKindTransition(
target_receiver_maps.at(0)->elements_kind(),
Handle<JSObject>::cast(receiver)->GetElementsKind())) {
@@ -1276,12 +1046,12 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver) {
return;
}
- List<Handle<Object>> handlers(static_cast<int>(target_receiver_maps.size()));
+ ObjectHandles handlers;
+ handlers.reserve(target_receiver_maps.size());
LoadElementPolymorphicHandlers(&target_receiver_maps, &handlers);
DCHECK_LE(1, target_receiver_maps.size());
if (target_receiver_maps.size() == 1) {
- ConfigureVectorState(Handle<Name>(), target_receiver_maps[0],
- handlers.at(0));
+ ConfigureVectorState(Handle<Name>(), target_receiver_maps[0], handlers[0]);
} else {
ConfigureVectorState(Handle<Name>(), target_receiver_maps, &handlers);
}
@@ -1304,6 +1074,9 @@ Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map) {
TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_SlowStub);
return BUILTIN_CODE(isolate(), KeyedLoadIC_Slow);
}
+ if (instance_type == JS_PROXY_TYPE) {
+ return LoadHandler::LoadProxy(isolate());
+ }
ElementsKind elements_kind = receiver_map->elements_kind();
if (IsSloppyArgumentsElementsKind(elements_kind)) {
@@ -1328,8 +1101,8 @@ Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map) {
convert_hole_to_undefined, is_js_array);
}
-void KeyedLoadIC::LoadElementPolymorphicHandlers(
- MapHandles* receiver_maps, List<Handle<Object>>* handlers) {
+void KeyedLoadIC::LoadElementPolymorphicHandlers(MapHandles* receiver_maps,
+ ObjectHandles* handlers) {
// Filter out deprecated maps to ensure their instances get migrated.
receiver_maps->erase(
std::remove_if(
@@ -1347,7 +1120,7 @@ void KeyedLoadIC::LoadElementPolymorphicHandlers(
receiver_map->NotifyLeafMapLayoutChange();
}
}
- handlers->Add(LoadElementHandler(receiver_map));
+ handlers->push_back(LoadElementHandler(receiver_map));
}
}
@@ -1376,7 +1149,7 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
Object);
} else if (FLAG_use_ic && !object->IsAccessCheckNeeded() &&
!object->IsJSValue()) {
- if ((object->IsJSObject() && key->IsSmi()) ||
+ if ((object->IsJSReceiver() && key->IsSmi()) ||
(object->IsString() && key->IsNumber())) {
UpdateLoadElement(Handle<HeapObject>::cast(object));
if (is_vector_set()) {
@@ -1385,7 +1158,7 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
}
}
- if (!is_vector_set()) {
+ if (vector_needs_update()) {
ConfigureVectorState(MEGAMORPHIC, key);
TRACE_IC("LoadIC", key);
}
@@ -1404,6 +1177,7 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode) {
// Disable ICs for non-JSObjects for now.
Handle<Object> object = it->GetReceiver();
+ if (object->IsJSProxy()) return true;
if (!object->IsJSObject()) return false;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
DCHECK(!receiver->map()->is_deprecated());
@@ -1414,7 +1188,7 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::JSPROXY:
- return false;
+ return true;
case LookupIterator::INTERCEPTOR: {
Handle<JSObject> holder = it->GetHolder<JSObject>();
InterceptorInfo* info = holder->GetNamedInterceptor();
@@ -1516,7 +1290,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
JSReceiver::StoreFromKeyed store_mode) {
// TODO(verwaest): Let SetProperty do the migration, since storing a property
// might deprecate the current map again, if value does not fit.
- if (MigrateDeprecated(object) || object->IsJSProxy()) {
+ if (MigrateDeprecated(object)) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
@@ -1592,95 +1366,19 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
TRACE_IC("StoreIC", lookup->name());
}
-Handle<Object> StoreIC::StoreTransition(Handle<Map> receiver_map,
- Handle<JSObject> holder,
- Handle<Map> transition,
- Handle<Name> name) {
- Handle<Object> smi_handler;
- if (transition->is_dictionary_map()) {
- smi_handler = StoreHandler::StoreNormal(isolate());
- } else {
- int descriptor = transition->LastAdded();
- Handle<DescriptorArray> descriptors(transition->instance_descriptors());
- PropertyDetails details = descriptors->GetDetails(descriptor);
- Representation representation = details.representation();
- DCHECK(!representation.IsNone());
-
- // Declarative handlers don't support access checks.
- DCHECK(!transition->is_access_check_needed());
-
- DCHECK_EQ(kData, details.kind());
- if (details.location() == kDescriptor) {
- smi_handler = StoreHandler::TransitionToConstant(isolate(), descriptor);
-
- } else {
- DCHECK_EQ(kField, details.location());
- bool extend_storage =
- Map::cast(transition->GetBackPointer())->unused_property_fields() ==
- 0;
-
- FieldIndex index = FieldIndex::ForDescriptor(*transition, descriptor);
- smi_handler = StoreHandler::TransitionToField(
- isolate(), descriptor, index, representation, extend_storage);
- }
- }
- // |holder| is either a receiver if the property is non-existent or
- // one of the prototypes.
- DCHECK(!holder.is_null());
- bool is_nonexistent = holder->map() == transition->GetBackPointer();
- if (is_nonexistent) holder = Handle<JSObject>::null();
-
- int checks_count =
- GetPrototypeCheckCount(isolate(), receiver_map, holder, name);
-
- DCHECK_LE(0, checks_count);
- DCHECK(!receiver_map->IsJSGlobalObjectMap());
-
- Handle<Object> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (validity_cell.is_null()) {
- DCHECK_EQ(0, checks_count);
- validity_cell = handle(Smi::kZero, isolate());
- }
-
- Handle<WeakCell> transition_cell = Map::WeakCellForMap(transition);
-
- Factory* factory = isolate()->factory();
- if (checks_count == 0) {
- return factory->NewTuple3(transition_cell, smi_handler, validity_cell);
- }
- Handle<FixedArray> handler_array(factory->NewFixedArray(
- StoreHandler::kFirstPrototypeIndex + checks_count, TENURED));
- handler_array->set(StoreHandler::kSmiHandlerIndex, *smi_handler);
- handler_array->set(StoreHandler::kValidityCellIndex, *validity_cell);
- handler_array->set(StoreHandler::kTransitionCellIndex, *transition_cell);
- InitPrototypeChecks(isolate(), receiver_map, holder, name, handler_array,
- StoreHandler::kFirstPrototypeIndex);
- return handler_array;
-}
-
-namespace {
-
-Handle<Object> StoreGlobal(Isolate* isolate, Handle<PropertyCell> cell) {
- return isolate->factory()->NewWeakCell(cell);
-}
-
-} // namespace
-
Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
- DCHECK_NE(LookupIterator::JSPROXY, lookup->state());
-
- // This is currently guaranteed by checks in StoreIC::Store.
- Handle<JSObject> receiver = Handle<JSObject>::cast(lookup->GetReceiver());
- Handle<JSObject> holder = lookup->GetHolder<JSObject>();
- DCHECK(!receiver->IsAccessCheckNeeded() || lookup->name()->IsPrivate());
-
switch (lookup->state()) {
case LookupIterator::TRANSITION: {
- auto store_target = lookup->GetStoreTarget();
+ Handle<JSObject> holder = lookup->GetHolder<JSObject>();
+
+ Handle<JSObject> store_target = lookup->GetStoreTarget();
if (store_target->IsJSGlobalObject()) {
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobalTransitionDH);
- return StoreGlobal(isolate(), lookup->transition_cell());
+
+ Handle<Object> handler = StoreHandler::StoreTransition(
+ isolate(), receiver_map(), store_target, lookup->transition_cell(),
+ lookup->name());
+ return handler;
}
// Currently not handled by CompileStoreTransition.
if (!holder->HasFastProperties()) {
@@ -1692,14 +1390,17 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
DCHECK(lookup->IsCacheableTransition());
Handle<Map> transition = lookup->transition_map();
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransitionDH);
- Handle<Object> handler =
- StoreTransition(receiver_map(), holder, transition, lookup->name());
+ Handle<Object> handler = StoreHandler::StoreTransition(
+ isolate(), receiver_map(), holder, transition, lookup->name());
TransitionsAccessor(receiver_map())
.UpdateHandler(*lookup->name(), *handler);
return handler;
}
case LookupIterator::INTERCEPTOR: {
+ Handle<JSObject> holder = lookup->GetHolder<JSObject>();
+ USE(holder);
+
DCHECK(!holder->GetNamedInterceptor()->setter()->IsUndefined(isolate()));
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreInterceptorStub);
StoreInterceptorStub stub(isolate());
@@ -1707,6 +1408,11 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
}
case LookupIterator::ACCESSOR: {
+ // This is currently guaranteed by checks in StoreIC::Store.
+ Handle<JSObject> receiver = Handle<JSObject>::cast(lookup->GetReceiver());
+ Handle<JSObject> holder = lookup->GetHolder<JSObject>();
+ DCHECK(!receiver->IsAccessCheckNeeded() || lookup->name()->IsPrivate());
+
if (!holder->HasFastProperties()) {
TRACE_GENERIC_IC("accessor on slow map");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
@@ -1757,11 +1463,18 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
}
case LookupIterator::DATA: {
+ // This is currently guaranteed by checks in StoreIC::Store.
+ Handle<JSObject> receiver = Handle<JSObject>::cast(lookup->GetReceiver());
+ USE(receiver);
+ Handle<JSObject> holder = lookup->GetHolder<JSObject>();
+ DCHECK(!receiver->IsAccessCheckNeeded() || lookup->name()->IsPrivate());
+
DCHECK_EQ(kData, lookup->property_details().kind());
if (lookup->is_dictionary_holder()) {
if (holder->IsJSGlobalObject()) {
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobalDH);
- return StoreGlobal(isolate(), lookup->GetPropertyCell());
+ return StoreHandler::StoreGlobal(isolate(),
+ lookup->GetPropertyCell());
}
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreNormalDH);
DCHECK(holder.is_identical_to(receiver));
@@ -1789,10 +1502,16 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
+ case LookupIterator::JSPROXY: {
+ Handle<JSReceiver> receiver =
+ Handle<JSReceiver>::cast(lookup->GetReceiver());
+ Handle<JSProxy> holder = lookup->GetHolder<JSProxy>();
+ return StoreHandler::StoreProxy(isolate(), receiver_map(), holder,
+ receiver, lookup->name());
+ }
case LookupIterator::INTEGER_INDEXED_EXOTIC:
case LookupIterator::ACCESS_CHECK:
- case LookupIterator::JSPROXY:
case LookupIterator::NOT_FOUND:
UNREACHABLE();
}
@@ -1873,7 +1592,8 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
// transition to a different GetNonTransitioningStoreMode IC that handles a
// superset of the original IC. Handle those here if the receiver map hasn't
// changed or it has transitioned to a more general kind.
- KeyedAccessStoreMode old_store_mode = GetKeyedAccessStoreMode();
+ KeyedAccessStoreMode old_store_mode;
+ old_store_mode = GetKeyedAccessStoreMode();
Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
if (state() == MONOMORPHIC) {
Handle<Map> transitioned_receiver_map = receiver_map;
@@ -1959,13 +1679,13 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
}
}
- List<Handle<Object>> handlers(static_cast<int>(target_receiver_maps.size()));
+ ObjectHandles handlers;
+ handlers.reserve(target_receiver_maps.size());
StoreElementPolymorphicHandlers(&target_receiver_maps, &handlers, store_mode);
if (target_receiver_maps.size() == 0) {
ConfigureVectorState(PREMONOMORPHIC, Handle<Name>());
} else if (target_receiver_maps.size() == 1) {
- ConfigureVectorState(Handle<Name>(), target_receiver_maps[0],
- handlers.at(0));
+ ConfigureVectorState(Handle<Name>(), target_receiver_maps[0], handlers[0]);
} else {
ConfigureVectorState(Handle<Name>(), target_receiver_maps, &handlers);
}
@@ -2008,6 +1728,10 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
store_mode == STORE_NO_TRANSITION_HANDLE_COW);
DCHECK(!receiver_map->DictionaryElementsInPrototypeChainOnly());
+ if (receiver_map->IsJSProxyMap()) {
+ return StoreHandler::StoreProxy(isolate());
+ }
+
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub;
@@ -2028,11 +1752,11 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
Handle<Object> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
if (validity_cell.is_null()) return stub;
- return isolate()->factory()->NewTuple2(validity_cell, stub);
+ return isolate()->factory()->NewTuple2(validity_cell, stub, TENURED);
}
void KeyedStoreIC::StoreElementPolymorphicHandlers(
- MapHandles* receiver_maps, List<Handle<Object>>* handlers,
+ MapHandles* receiver_maps, ObjectHandles* handlers,
KeyedAccessStoreMode store_mode) {
DCHECK(store_mode == STANDARD_STORE ||
store_mode == STORE_AND_GROW_NO_TRANSITION ||
@@ -2048,7 +1772,7 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers(
for (Handle<Map> receiver_map : *receiver_maps) {
Handle<Object> handler;
- Handle<Map> transitioned_map;
+ Handle<Map> transition;
if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE ||
receiver_map->DictionaryElementsInPrototypeChainOnly()) {
@@ -2065,7 +1789,7 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers(
if (receiver_map->is_stable()) {
receiver_map->NotifyLeafMapLayoutChange();
}
- transitioned_map = handle(tmap);
+ transition = handle(tmap);
}
}
@@ -2074,30 +1798,17 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers(
// Site Tracking to do a better job of ensuring the data types are what
// they need to be. Not all the elements are in place yet, pessimistic
// elements transitions are still important for performance.
- if (!transitioned_map.is_null()) {
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- ElementsKind elements_kind = receiver_map->elements_kind();
+ if (!transition.is_null()) {
TRACE_HANDLER_STATS(isolate(),
KeyedStoreIC_ElementsTransitionAndStoreStub);
- Handle<Code> stub =
- ElementsTransitionAndStoreStub(isolate(), elements_kind,
- transitioned_map->elements_kind(),
- is_js_array, store_mode)
- .GetCode();
- Handle<Object> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (validity_cell.is_null()) {
- validity_cell = handle(Smi::kZero, isolate());
- }
- Handle<WeakCell> transition = Map::WeakCellForMap(transitioned_map);
- handler =
- isolate()->factory()->NewTuple3(transition, stub, validity_cell);
+ handler = StoreHandler::StoreElementTransition(isolate(), receiver_map,
+ transition, store_mode);
} else {
handler = StoreElementHandler(receiver_map, store_mode);
}
}
DCHECK(!handler.is_null());
- handlers->Add(handler);
+ handlers->push_back(handler);
}
}
@@ -2190,7 +1901,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
StoreIC::Store(object, Handle<Name>::cast(key), value,
JSReceiver::MAY_BE_STORE_FROM_KEYED),
Object);
- if (!is_vector_set()) {
+ if (vector_needs_update()) {
ConfigureVectorState(MEGAMORPHIC, key);
TRACE_GENERIC_IC("unhandled internalized string key");
TRACE_IC("StoreIC", key);
@@ -2219,15 +1930,17 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
bool is_arguments = false;
bool key_is_valid_index = false;
KeyedAccessStoreMode store_mode = STANDARD_STORE;
- if (use_ic && object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (use_ic && object->IsJSReceiver()) {
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
old_receiver_map = handle(receiver->map(), isolate());
is_arguments = receiver->IsJSArgumentsObject();
- if (!is_arguments) {
- key_is_valid_index = key->IsSmi() && Smi::ToInt(*key) >= 0;
+ bool is_proxy = receiver->IsJSProxy();
+ key_is_valid_index = key->IsSmi() && Smi::ToInt(*key) >= 0;
+ if (!is_arguments && !is_proxy) {
if (key_is_valid_index) {
uint32_t index = static_cast<uint32_t>(Smi::ToInt(*key));
- store_mode = GetStoreMode(receiver, index, value);
+ Handle<JSObject> receiver_object = Handle<JSObject>::cast(object);
+ store_mode = GetStoreMode(receiver_object, index, value);
}
}
}
@@ -2263,7 +1976,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
}
- if (!is_vector_set()) {
+ if (vector_needs_update()) {
ConfigureVectorState(MEGAMORPHIC, key);
}
TRACE_IC("StoreIC", key);
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index f69985c0a3..175b1f42fd 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -5,6 +5,8 @@
#ifndef V8_IC_H_
#define V8_IC_H_
+#include <vector>
+
#include "src/factory.h"
#include "src/feedback-vector.h"
#include "src/macro-assembler.h"
@@ -58,10 +60,6 @@ class IC {
IsKeyedStoreIC();
}
- // The ICs that don't pass slot and vector through the stack have to
- // save/restore them in the dispatcher.
- static bool ShouldPushPopSlotAndVector(Code::Kind kind);
-
static inline bool IsHandler(Object* object);
// Nofity the IC system that a feedback has changed.
@@ -85,6 +83,11 @@ class IC {
Address address);
bool is_vector_set() { return vector_set_; }
+ bool vector_needs_update() {
+ return (!vector_set_ &&
+ (state() != MEGAMORPHIC ||
+ Smi::ToInt(nexus()->GetFeedbackExtra()) != ELEMENT));
+ }
// Configure for most states.
void ConfigureVectorState(IC::State new_state, Handle<Object> key);
@@ -93,7 +96,7 @@ class IC {
Handle<Object> handler);
// Configure the vector for POLYMORPHIC.
void ConfigureVectorState(Handle<Name> name, MapHandles const& maps,
- List<Handle<Object>>* handlers);
+ ObjectHandles* handlers);
char TransitionMarkFromState(IC::State state);
void TraceIC(const char* type, Handle<Object> name);
@@ -133,15 +136,8 @@ class IC {
bool IsStoreOwnIC() const { return IsStoreOwnICKind(kind_); }
bool IsKeyedStoreIC() const { return IsKeyedStoreICKind(kind_); }
bool is_keyed() const { return IsKeyedLoadIC() || IsKeyedStoreIC(); }
- Code::Kind handler_kind() const {
- if (IsAnyLoad()) return Code::LOAD_IC;
- DCHECK(IsAnyStore());
- return Code::STORE_IC;
- }
bool ShouldRecomputeHandler(Handle<String> name);
- ExtraICState extra_ic_state() const { return extra_ic_state_; }
-
Handle<Map> receiver_map() { return receiver_map_; }
void update_receiver_map(Handle<Object> receiver) {
if (receiver->IsSmi()) {
@@ -207,7 +203,6 @@ class IC {
Handle<Map> receiver_map_;
MaybeHandle<Object> maybe_handler_;
- ExtraICState extra_ic_state_;
MapHandles target_maps_;
bool target_maps_set_;
@@ -264,19 +259,6 @@ class LoadIC : public IC {
// Creates a data handler that represents a load of a field by given index.
static Handle<Smi> SimpleFieldLoad(Isolate* isolate, FieldIndex index);
- // Creates a data handler that represents a prototype chain check followed
- // by given Smi-handler that encoded a load from the holder.
- // Can be used only if GetPrototypeCheckCount() returns non negative value.
- Handle<Object> LoadFromPrototype(Handle<Map> receiver_map,
- Handle<JSReceiver> holder, Handle<Name> name,
- Handle<Smi> smi_handler);
-
- // Creates a data handler that represents a load of a non-existent property.
- // {holder} is the object from which the property is loaded. If no holder is
- // needed (e.g., for "nonexistent"), null_value() may be passed in.
- Handle<Object> LoadFullChain(Handle<Map> receiver_map, Handle<Object> holder,
- Handle<Name> name, Handle<Smi> smi_handler);
-
friend class IC;
friend class NamedLoadHandlerCompiler;
};
@@ -314,7 +296,7 @@ class KeyedLoadIC : public LoadIC {
Handle<Object> LoadElementHandler(Handle<Map> receiver_map);
void LoadElementPolymorphicHandlers(MapHandles* receiver_maps,
- List<Handle<Object>>* handlers);
+ ObjectHandles* handlers);
};
@@ -353,10 +335,6 @@ class StoreIC : public IC {
Handle<Code> CompileHandler(LookupIterator* lookup) override;
private:
- Handle<Object> StoreTransition(Handle<Map> receiver_map,
- Handle<JSObject> holder,
- Handle<Map> transition, Handle<Name> name);
-
friend class IC;
bool created_new_transition_ = false;
@@ -403,7 +381,7 @@ class KeyedStoreIC : public StoreIC {
KeyedAccessStoreMode store_mode);
void StoreElementPolymorphicHandlers(MapHandles* receiver_maps,
- List<Handle<Object>>* handlers,
+ ObjectHandles* handlers,
KeyedAccessStoreMode store_mode);
friend class IC;
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index d2a82f7077..7ff72bb72f 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -7,6 +7,7 @@
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/contexts.h"
+#include "src/feedback-vector.h"
#include "src/ic/accessor-assembler.h"
#include "src/interface-descriptors.h"
#include "src/isolate.h"
@@ -22,9 +23,9 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
explicit KeyedStoreGenericAssembler(compiler::CodeAssemblerState* state)
: AccessorAssembler(state) {}
- void KeyedStoreGeneric(LanguageMode language_mode);
+ void KeyedStoreGeneric();
- void StoreIC_Uninitialized(LanguageMode language_mode);
+ void StoreIC_Uninitialized();
private:
enum UpdateLength {
@@ -41,7 +42,6 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
void EmitGenericPropertyStore(Node* receiver, Node* receiver_map,
const StoreICParameters* p, Label* slow,
- LanguageMode language_mode,
UseStubCache use_stub_cache = kUseStubCache);
void BranchIfPrototypesHaveNonFastElements(Node* receiver_map,
@@ -86,16 +86,15 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
Node* value, Label* slow);
};
-void KeyedStoreGenericGenerator::Generate(compiler::CodeAssemblerState* state,
- LanguageMode language_mode) {
+void KeyedStoreGenericGenerator::Generate(compiler::CodeAssemblerState* state) {
KeyedStoreGenericAssembler assembler(state);
- assembler.KeyedStoreGeneric(language_mode);
+ assembler.KeyedStoreGeneric();
}
void StoreICUninitializedGenerator::Generate(
- compiler::CodeAssemblerState* state, LanguageMode language_mode) {
+ compiler::CodeAssemblerState* state) {
KeyedStoreGenericAssembler assembler(state);
- assembler.StoreIC_Uninitialized(language_mode);
+ assembler.StoreIC_Uninitialized();
}
void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
@@ -746,7 +745,7 @@ void KeyedStoreGenericAssembler::OverwriteExistingFastProperty(
void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
Node* receiver, Node* receiver_map, const StoreICParameters* p, Label* slow,
- LanguageMode language_mode, UseStubCache use_stub_cache) {
+ UseStubCache use_stub_cache) {
VARIABLE(var_accessor_pair, MachineRepresentation::kTagged);
VARIABLE(var_accessor_holder, MachineRepresentation::kTagged);
Label stub_cache(this), fast_properties(this), dictionary_properties(this),
@@ -812,14 +811,14 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&tuple3);
{
var_transition_cell.Bind(LoadObjectField(
- maybe_handler, StoreHandler::kTransitionCellOffset));
+ maybe_handler, StoreHandler::kTransitionOrHolderCellOffset));
Goto(&check_key);
}
BIND(&fixedarray);
{
var_transition_cell.Bind(LoadFixedArrayElement(
- maybe_handler, StoreHandler::kTransitionCellIndex));
+ maybe_handler, StoreHandler::kTransitionMapOrHolderCellIndex));
Goto(&check_key);
}
@@ -915,27 +914,31 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&not_callable);
{
- if (language_mode == STRICT) {
+ Label strict(this);
+ BranchIfStrictMode(p->vector, p->slot, &strict);
+ Return(p->value);
+
+ BIND(&strict);
+ {
Node* message = SmiConstant(MessageTemplate::kNoSetterInCallback);
TailCallRuntime(Runtime::kThrowTypeError, p->context, message, p->name,
var_accessor_holder.value());
- } else {
- DCHECK_EQ(SLOPPY, language_mode);
- Return(p->value);
}
}
}
BIND(&readonly);
{
- if (language_mode == STRICT) {
+ Label strict(this);
+ BranchIfStrictMode(p->vector, p->slot, &strict);
+ Return(p->value);
+
+ BIND(&strict);
+ {
Node* message = SmiConstant(MessageTemplate::kStrictReadOnlyProperty);
Node* type = Typeof(p->receiver);
TailCallRuntime(Runtime::kThrowTypeError, p->context, message, p->name,
type, p->receiver);
- } else {
- DCHECK_EQ(SLOPPY, language_mode);
- Return(p->value);
}
}
@@ -960,7 +963,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
}
}
-void KeyedStoreGenericAssembler::KeyedStoreGeneric(LanguageMode language_mode) {
+void KeyedStoreGenericAssembler::KeyedStoreGeneric() {
typedef StoreWithVectorDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -998,19 +1001,25 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(LanguageMode language_mode) {
Comment("key is unique name");
StoreICParameters p(context, receiver, var_unique.value(), value, slot,
vector);
- EmitGenericPropertyStore(receiver, receiver_map, &p, &slow, language_mode);
+ EmitGenericPropertyStore(receiver, receiver_map, &p, &slow);
}
BIND(&slow);
{
Comment("KeyedStoreGeneric_slow");
+ VARIABLE(var_language_mode, MachineRepresentation::kTaggedSigned,
+ SmiConstant(STRICT));
+ Label call_runtime(this);
+ BranchIfStrictMode(vector, slot, &call_runtime);
+ var_language_mode.Bind(SmiConstant(SLOPPY));
+ Goto(&call_runtime);
+ BIND(&call_runtime);
TailCallRuntime(Runtime::kSetProperty, context, receiver, name, value,
- SmiConstant(language_mode));
+ var_language_mode.value());
}
}
-void KeyedStoreGenericAssembler::StoreIC_Uninitialized(
- LanguageMode language_mode) {
+void KeyedStoreGenericAssembler::StoreIC_Uninitialized() {
typedef StoreWithVectorDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1037,7 +1046,7 @@ void KeyedStoreGenericAssembler::StoreIC_Uninitialized(
SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
StoreICParameters p(context, receiver, name, value, slot, vector);
- EmitGenericPropertyStore(receiver, receiver_map, &p, &miss, language_mode,
+ EmitGenericPropertyStore(receiver, receiver_map, &p, &miss,
kDontUseStubCache);
BIND(&miss);
diff --git a/deps/v8/src/ic/keyed-store-generic.h b/deps/v8/src/ic/keyed-store-generic.h
index 70f87f83ad..4d82840be3 100644
--- a/deps/v8/src/ic/keyed-store-generic.h
+++ b/deps/v8/src/ic/keyed-store-generic.h
@@ -16,14 +16,12 @@ class CodeAssemblerState;
class KeyedStoreGenericGenerator {
public:
- static void Generate(compiler::CodeAssemblerState* state,
- LanguageMode language_mode);
+ static void Generate(compiler::CodeAssemblerState* state);
};
class StoreICUninitializedGenerator {
public:
- static void Generate(compiler::CodeAssemblerState* state,
- LanguageMode language_mode);
+ static void Generate(compiler::CodeAssemblerState* state);
};
} // namespace internal
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index dbbe04e16d..d9edc30ba6 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -44,9 +44,9 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
__ Push(cp, value());
if (accessor_index >= 0) {
- DCHECK(!holder.is(scratch));
- DCHECK(!receiver.is(scratch));
- DCHECK(!value().is(scratch));
+ DCHECK(holder != scratch);
+ DCHECK(receiver != scratch);
+ DCHECK(value() != scratch);
// Call the JavaScript setter with receiver and value on the stack.
if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
@@ -103,7 +103,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
DCHECK(name->IsUniqueName());
- DCHECK(!receiver.is(scratch0));
+ DCHECK(receiver != scratch0);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
__ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
@@ -167,14 +167,14 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Handle<Map> receiver_map, Register receiver, Register scratch_in,
bool is_store, Register store_parameter, Register accessor_holder,
int accessor_index) {
- DCHECK(!accessor_holder.is(scratch_in));
- DCHECK(!receiver.is(scratch_in));
+ DCHECK(accessor_holder != scratch_in);
+ DCHECK(receiver != scratch_in);
__ push(accessor_holder);
__ push(receiver);
// Write the arguments to stack frame.
if (is_store) {
- DCHECK(!receiver.is(store_parameter));
- DCHECK(!scratch_in.is(store_parameter));
+ DCHECK(receiver != store_parameter);
+ DCHECK(scratch_in != store_parameter);
__ push(store_parameter);
}
DCHECK(optimization.is_simple_api_call());
@@ -275,9 +275,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
- DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
- !scratch2.is(scratch1));
+ DCHECK(scratch1 != object_reg && scratch1 != holder_reg);
+ DCHECK(scratch2 != object_reg && scratch2 != holder_reg &&
+ scratch2 != scratch1);
Handle<Cell> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
@@ -352,9 +352,8 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ Branch(&success);
__ bind(miss);
- DCHECK(kind() == Code::LOAD_IC);
PopVectorAndSlot();
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ TailCallBuiltin(masm(), Builtins::kLoadIC_Miss);
__ bind(&success);
}
}
@@ -366,7 +365,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
__ Branch(&success);
GenerateRestoreName(miss, name);
PopVectorAndSlot();
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ TailCallBuiltin(masm(), Builtins::kStoreIC_Miss);
__ bind(&success);
}
}
@@ -398,7 +397,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
- return GetCode(kind(), name);
+ return GetCode(name);
}
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index 71574a684c..f528ac9fdb 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -44,9 +44,9 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
__ Push(cp, value());
if (accessor_index >= 0) {
- DCHECK(!holder.is(scratch));
- DCHECK(!receiver.is(scratch));
- DCHECK(!value().is(scratch));
+ DCHECK(holder != scratch);
+ DCHECK(receiver != scratch);
+ DCHECK(value() != scratch);
// Call the JavaScript setter with receiver and value on the stack.
if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
@@ -103,7 +103,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
DCHECK(name->IsUniqueName());
- DCHECK(!receiver.is(scratch0));
+ DCHECK(receiver != scratch0);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
__ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
@@ -167,14 +167,14 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Handle<Map> receiver_map, Register receiver, Register scratch_in,
bool is_store, Register store_parameter, Register accessor_holder,
int accessor_index) {
- DCHECK(!accessor_holder.is(scratch_in));
- DCHECK(!receiver.is(scratch_in));
+ DCHECK(accessor_holder != scratch_in);
+ DCHECK(receiver != scratch_in);
__ push(accessor_holder);
__ push(receiver);
// Write the arguments to stack frame.
if (is_store) {
- DCHECK(!receiver.is(store_parameter));
- DCHECK(!scratch_in.is(store_parameter));
+ DCHECK(receiver != store_parameter);
+ DCHECK(scratch_in != store_parameter);
__ push(store_parameter);
}
DCHECK(optimization.is_simple_api_call());
@@ -275,9 +275,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
- DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
- !scratch2.is(scratch1));
+ DCHECK(scratch1 != object_reg && scratch1 != holder_reg);
+ DCHECK(scratch2 != object_reg && scratch2 != holder_reg &&
+ scratch2 != scratch1);
Handle<Cell> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
@@ -352,9 +352,8 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ Branch(&success);
__ bind(miss);
- DCHECK(kind() == Code::LOAD_IC);
PopVectorAndSlot();
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ TailCallBuiltin(masm(), Builtins::kLoadIC_Miss);
__ bind(&success);
}
}
@@ -366,7 +365,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
__ Branch(&success);
GenerateRestoreName(miss, name);
PopVectorAndSlot();
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ TailCallBuiltin(masm(), Builtins::kStoreIC_Miss);
__ bind(&success);
}
}
@@ -398,7 +397,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
- return GetCode(kind(), name);
+ return GetCode(name);
}
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index f833c07e4d..30686e6184 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -45,9 +45,9 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
__ Push(cp, value());
if (accessor_index >= 0) {
- DCHECK(!holder.is(scratch));
- DCHECK(!receiver.is(scratch));
- DCHECK(!value().is(scratch));
+ DCHECK(holder != scratch);
+ DCHECK(receiver != scratch);
+ DCHECK(value() != scratch);
// Call the JavaScript setter with receiver and value on the stack.
if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
@@ -104,7 +104,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
DCHECK(name->IsUniqueName());
- DCHECK(!receiver.is(scratch0));
+ DCHECK(receiver != scratch0);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
__ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
@@ -172,14 +172,14 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Handle<Map> receiver_map, Register receiver, Register scratch_in,
bool is_store, Register store_parameter, Register accessor_holder,
int accessor_index) {
- DCHECK(!accessor_holder.is(scratch_in));
- DCHECK(!receiver.is(scratch_in));
+ DCHECK(accessor_holder != scratch_in);
+ DCHECK(receiver != scratch_in);
__ push(accessor_holder);
__ push(receiver);
// Write the arguments to stack frame.
if (is_store) {
- DCHECK(!receiver.is(store_parameter));
- DCHECK(!scratch_in.is(store_parameter));
+ DCHECK(receiver != store_parameter);
+ DCHECK(scratch_in != store_parameter);
__ push(store_parameter);
}
DCHECK(optimization.is_simple_api_call());
@@ -285,9 +285,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
- DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
- !scratch2.is(scratch1));
+ DCHECK(scratch1 != object_reg && scratch1 != holder_reg);
+ DCHECK(scratch2 != object_reg && scratch2 != holder_reg &&
+ scratch2 != scratch1);
Handle<Cell> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
@@ -367,9 +367,8 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ b(&success);
__ bind(miss);
- DCHECK(kind() == Code::LOAD_IC);
PopVectorAndSlot();
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ TailCallBuiltin(masm(), Builtins::kLoadIC_Miss);
__ bind(&success);
}
}
@@ -381,7 +380,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
__ b(&success);
GenerateRestoreName(miss, name);
PopVectorAndSlot();
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ TailCallBuiltin(masm(), Builtins::kStoreIC_Miss);
__ bind(&success);
}
}
@@ -414,7 +413,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
- return GetCode(kind(), name);
+ return GetCode(name);
}
diff --git a/deps/v8/src/ic/s390/handler-compiler-s390.cc b/deps/v8/src/ic/s390/handler-compiler-s390.cc
index fa42ee9b16..4fd0013ac0 100644
--- a/deps/v8/src/ic/s390/handler-compiler-s390.cc
+++ b/deps/v8/src/ic/s390/handler-compiler-s390.cc
@@ -44,9 +44,9 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
__ Push(cp, value());
if (accessor_index >= 0) {
- DCHECK(!holder.is(scratch));
- DCHECK(!receiver.is(scratch));
- DCHECK(!value().is(scratch));
+ DCHECK(holder != scratch);
+ DCHECK(receiver != scratch);
+ DCHECK(value() != scratch);
// Call the JavaScript setter with receiver and value on the stack.
if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
@@ -100,7 +100,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
DCHECK(name->IsUniqueName());
- DCHECK(!receiver.is(scratch0));
+ DCHECK(receiver != scratch0);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
__ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
@@ -165,14 +165,14 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Handle<Map> receiver_map, Register receiver, Register scratch_in,
bool is_store, Register store_parameter, Register accessor_holder,
int accessor_index) {
- DCHECK(!accessor_holder.is(scratch_in));
- DCHECK(!receiver.is(scratch_in));
+ DCHECK(accessor_holder != scratch_in);
+ DCHECK(receiver != scratch_in);
__ Push(accessor_holder);
__ Push(receiver);
// Write the arguments to stack frame.
if (is_store) {
- DCHECK(!receiver.is(store_parameter));
- DCHECK(!scratch_in.is(store_parameter));
+ DCHECK(receiver != store_parameter);
+ DCHECK(scratch_in != store_parameter);
__ Push(store_parameter);
}
DCHECK(optimization.is_simple_api_call());
@@ -277,9 +277,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
- DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
- !scratch2.is(scratch1));
+ DCHECK(scratch1 != object_reg && scratch1 != holder_reg);
+ DCHECK(scratch2 != object_reg && scratch2 != holder_reg &&
+ scratch2 != scratch1);
Handle<Cell> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
@@ -353,9 +353,8 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ b(&success);
__ bind(miss);
- DCHECK(kind() == Code::LOAD_IC);
PopVectorAndSlot();
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ TailCallBuiltin(masm(), Builtins::kLoadIC_Miss);
__ bind(&success);
}
}
@@ -366,7 +365,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
__ b(&success);
GenerateRestoreName(miss, name);
PopVectorAndSlot();
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ TailCallBuiltin(masm(), Builtins::kStoreIC_Miss);
__ bind(&success);
}
}
@@ -399,7 +398,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
- return GetCode(kind(), name);
+ return GetCode(name);
}
Register NamedStoreHandlerCompiler::value() {
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index 46ac580a70..ecdf8c83e1 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -13,8 +13,7 @@
namespace v8 {
namespace internal {
-StubCache::StubCache(Isolate* isolate, Code::Kind ic_kind)
- : isolate_(isolate), ic_kind_(ic_kind) {
+StubCache::StubCache(Isolate* isolate) : isolate_(isolate) {
// Ensure the nullptr (aka Smi::kZero) which StubCache::Get() returns
// when the entry is not found is not considered as a handler.
DCHECK(!IC::IsHandler(nullptr));
@@ -26,6 +25,35 @@ void StubCache::Initialize() {
Clear();
}
+// Hash algorithm for the primary table. This algorithm is replicated in
+// assembler for every architecture. Returns an index into the table that
+// is scaled by 1 << kCacheIndexShift.
+int StubCache::PrimaryOffset(Name* name, Map* map) {
+ STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
+ // Compute the hash of the name (use entire hash field).
+ DCHECK(name->HasHashCode());
+ uint32_t field = name->hash_field();
+ // Using only the low bits in 64-bit mode is unlikely to increase the
+ // risk of collision even if the heap is spread over an area larger than
+ // 4Gb (and not at all if it isn't).
+ uint32_t map_low32bits =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
+ // Base the offset on a simple combination of name and map.
+ uint32_t key = (map_low32bits + field) ^ kPrimaryMagic;
+ return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
+}
+
+// Hash algorithm for the secondary table. This algorithm is replicated in
+// assembler for every architecture. Returns an index into the table that
+// is scaled by 1 << kCacheIndexShift.
+int StubCache::SecondaryOffset(Name* name, int seed) {
+ // Use the seed from the primary cache in the secondary cache.
+ uint32_t name_low32bits =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
+ uint32_t key = (seed - name_low32bits) + kSecondaryMagic;
+ return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
+}
+
#ifdef DEBUG
namespace {
@@ -37,15 +65,7 @@ bool CommonStubCacheChecks(StubCache* stub_cache, Name* name, Map* map,
DCHECK(!name->GetHeap()->InNewSpace(handler));
DCHECK(name->IsUniqueName());
DCHECK(name->HasHashCode());
- if (handler) {
- DCHECK(IC::IsHandler(handler));
- if (handler->IsCode()) {
- Code::Flags code_flags = Code::cast(handler)->flags();
- Code::Kind ic_code_kind = stub_cache->ic_kind();
- DCHECK_EQ(ic_code_kind, Code::ExtractExtraICStateFromFlags(code_flags));
- DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(code_flags));
- }
- }
+ if (handler) DCHECK(IC::IsHandler(handler));
return true;
}
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index 74b5715883..4b3144b9ad 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -6,12 +6,11 @@
#define V8_STUB_CACHE_H_
#include "src/macro-assembler.h"
+#include "src/objects/name.h"
namespace v8 {
namespace internal {
-class SmallMapList;
-
// The stub cache is used for megamorphic property accesses.
// It maps (map, name, type) to property access handlers. The cache does not
// need explicit invalidation when a prototype chain is modified, since the
@@ -74,7 +73,6 @@ class StubCache {
}
Isolate* isolate() { return isolate_; }
- Code::Kind ic_kind() const { return ic_kind_; }
// Setting the entry size such that the index is shifted by Name::kHashShift
// is convenient; shifting down the length field (to extract the hash code)
@@ -99,7 +97,7 @@ class StubCache {
}
// The constructor is made public only for the purposes of testing.
- StubCache(Isolate* isolate, Code::Kind ic_kind);
+ explicit StubCache(Isolate* isolate);
private:
// The stub cache has a primary and secondary level. The two levels have
@@ -112,31 +110,12 @@ class StubCache {
// Hash algorithm for the primary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kCacheIndexShift.
- static int PrimaryOffset(Name* name, Map* map) {
- STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
- // Compute the hash of the name (use entire hash field).
- DCHECK(name->HasHashCode());
- uint32_t field = name->hash_field();
- // Using only the low bits in 64-bit mode is unlikely to increase the
- // risk of collision even if the heap is spread over an area larger than
- // 4Gb (and not at all if it isn't).
- uint32_t map_low32bits =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
- // Base the offset on a simple combination of name and map.
- uint32_t key = (map_low32bits + field) ^ kPrimaryMagic;
- return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
- }
+ static int PrimaryOffset(Name* name, Map* map);
// Hash algorithm for the secondary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kCacheIndexShift.
- static int SecondaryOffset(Name* name, int seed) {
- // Use the seed from the primary cache in the secondary cache.
- uint32_t name_low32bits =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
- uint32_t key = (seed - name_low32bits) + kSecondaryMagic;
- return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
- }
+ static int SecondaryOffset(Name* name, int seed);
// Compute the entry for a given offset in exactly the same way as
// we do in generated code. We generate an hash code that already
@@ -153,7 +132,6 @@ class StubCache {
Entry primary_[kPrimaryTableSize];
Entry secondary_[kSecondaryTableSize];
Isolate* isolate_;
- Code::Kind ic_kind_;
friend class Isolate;
friend class SCTableReference;
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index ee8fa90600..99b2a7a41c 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -48,7 +48,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
DCHECK(name->IsUniqueName());
- DCHECK(!receiver.is(scratch0));
+ DCHECK(receiver != scratch0);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1);
__ IncrementCounter(counters->negative_lookups_miss(), 1);
@@ -90,7 +90,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Handle<Map> receiver_map, Register receiver, Register scratch,
bool is_store, Register store_parameter, Register accessor_holder,
int accessor_index) {
- DCHECK(!accessor_holder.is(scratch));
+ DCHECK(accessor_holder != scratch);
DCHECK(optimization.is_simple_api_call());
__ PopReturnAddressTo(scratch);
@@ -100,8 +100,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ Push(receiver);
// Write the arguments to stack frame.
if (is_store) {
- DCHECK(!receiver.is(store_parameter));
- DCHECK(!scratch.is(store_parameter));
+ DCHECK(receiver != store_parameter);
+ DCHECK(scratch != store_parameter);
__ Push(store_parameter);
}
__ PushReturnAddressFrom(scratch);
@@ -194,9 +194,9 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
__ Push(value());
if (accessor_index >= 0) {
- DCHECK(!holder.is(scratch));
- DCHECK(!receiver.is(scratch));
- DCHECK(!value().is(scratch));
+ DCHECK(holder != scratch);
+ DCHECK(receiver != scratch);
+ DCHECK(value() != scratch);
// Call the JavaScript setter with receiver and value on the stack.
if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
@@ -279,9 +279,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
- DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
- !scratch2.is(scratch1));
+ DCHECK(scratch1 != object_reg && scratch1 != holder_reg);
+ DCHECK(scratch2 != object_reg && scratch2 != holder_reg &&
+ scratch2 != scratch1);
Handle<Cell> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
@@ -358,9 +358,8 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ jmp(&success);
__ bind(miss);
- DCHECK(kind() == Code::LOAD_IC);
PopVectorAndSlot();
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ TailCallBuiltin(masm(), Builtins::kLoadIC_Miss);
__ bind(&success);
}
}
@@ -372,7 +371,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
__ jmp(&success);
GenerateRestoreName(miss, name);
PopVectorAndSlot();
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ TailCallBuiltin(masm(), Builtins::kStoreIC_Miss);
__ bind(&success);
}
}
@@ -406,7 +405,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
- return GetCode(kind(), name);
+ return GetCode(name);
}
diff --git a/deps/v8/src/inspector/BUILD.gn b/deps/v8/src/inspector/BUILD.gn
index d9d3ac879a..2ebf561135 100644
--- a/deps/v8/src/inspector/BUILD.gn
+++ b/deps/v8/src/inspector/BUILD.gn
@@ -120,7 +120,6 @@ v8_source_set("inspector") {
"inspected-context.h",
"remote-object-id.cc",
"remote-object-id.h",
- "script-breakpoint.h",
"search-util.cc",
"search-util.h",
"string-16.cc",
diff --git a/deps/v8/src/inspector/injected-script-source.js b/deps/v8/src/inspector/injected-script-source.js
index bb8264f0f5..c6222f80a3 100644
--- a/deps/v8/src/inspector/injected-script-source.js
+++ b/deps/v8/src/inspector/injected-script-source.js
@@ -77,17 +77,6 @@ function toStringDescription(obj)
}
/**
- * @param {number|string} obj
- * @return {boolean}
- */
-function isUInt32(obj)
-{
- if (typeof obj === "number")
- return obj >>> 0 === obj && (obj > 0 || 1 / obj > 0);
- return "" + (obj >>> 0) === obj;
-}
-
-/**
* FireBug's array detection.
* @param {*} obj
* @return {boolean}
@@ -101,7 +90,8 @@ function isArrayLike(obj)
if (!InjectedScriptHost.objectHasOwnProperty(/** @type {!Object} */ (obj), "length"))
return false;
var len = InjectedScriptHost.getProperty(obj, "length");
- return typeof len === "number" && isUInt32(len);
+ // is len uint32?
+ return typeof len === "number" && len >>> 0 === len && (len > 0 || 1 / len > 0);
}
return false;
}
@@ -184,16 +174,18 @@ InjectedScript.primitiveTypes = {
* @type {!Object<string, string>}
* @const
*/
-InjectedScript.closureTypes = { __proto__: null };
-InjectedScript.closureTypes["local"] = "Local";
-InjectedScript.closureTypes["closure"] = "Closure";
-InjectedScript.closureTypes["catch"] = "Catch";
-InjectedScript.closureTypes["block"] = "Block";
-InjectedScript.closureTypes["script"] = "Script";
-InjectedScript.closureTypes["with"] = "With Block";
-InjectedScript.closureTypes["global"] = "Global";
-InjectedScript.closureTypes["eval"] = "Eval";
-InjectedScript.closureTypes["module"] = "Module";
+InjectedScript.closureTypes = {
+ "local": "Local",
+ "closure": "Closure",
+ "catch": "Catch",
+ "block": "Block",
+ "script": "Script",
+ "with": "With Block",
+ "global": "Global",
+ "eval": "Eval",
+ "module": "Module",
+ __proto__: null
+};
InjectedScript.prototype = {
/**
@@ -254,6 +246,7 @@ InjectedScript.prototype = {
columns = [columns];
if (InjectedScriptHost.subtype(columns) === "array") {
columnNames = [];
+ InjectedScriptHost.nullifyPrototype(columnNames);
for (var i = 0; i < columns.length; ++i)
columnNames[i] = toString(columns[i]);
}
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index cd594be5b8..4f24d25698 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -272,8 +272,8 @@ std::unique_ptr<InjectedScript> InjectedScript::create(
// The function is expected
// to create and configure InjectedScript instance that is going to be used by
// the inspector.
- String16 injectedScriptSource(
- reinterpret_cast<const char*>(InjectedScriptSource_js),
+ StringView injectedScriptSource(
+ reinterpret_cast<const uint8_t*>(InjectedScriptSource_js),
sizeof(InjectedScriptSource_js));
v8::Local<v8::Value> value;
if (!inspectedContext->inspector()
diff --git a/deps/v8/src/inspector/inspector.gypi b/deps/v8/src/inspector/inspector.gypi
index bb96b0f610..d6443283f5 100644
--- a/deps/v8/src/inspector/inspector.gypi
+++ b/deps/v8/src/inspector/inspector.gypi
@@ -39,7 +39,6 @@
'inspector/inspected-context.h',
'inspector/remote-object-id.cc',
'inspector/remote-object-id.h',
- 'inspector/script-breakpoint.h',
'inspector/search-util.cc',
'inspector/search-util.h',
'inspector/string-16.cc',
diff --git a/deps/v8/src/inspector/js_protocol.json b/deps/v8/src/inspector/js_protocol.json
index 439de72c10..df7db67cdb 100644
--- a/deps/v8/src/inspector/js_protocol.json
+++ b/deps/v8/src/inspector/js_protocol.json
@@ -242,14 +242,16 @@
{
"name": "callFunctionOn",
"parameters": [
- { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to call function on." },
{ "name": "functionDeclaration", "type": "string", "description": "Declaration of the function to call." },
+ { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Identifier of the object to call function on. Either objectId or executionContextId should be specified." },
{ "name": "arguments", "type": "array", "items": { "$ref": "CallArgument", "description": "Call argument." }, "optional": true, "description": "Call arguments. All call arguments must belong to the same JavaScript world as the target object." },
{ "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
{ "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
{ "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
{ "name": "userGesture", "type": "boolean", "optional": true, "experimental": true, "description": "Whether execution should be treated as initiated by user in the UI." },
- { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved." }
+ { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved." },
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies execution context which global object will be used to call function on. Either executionContextId or objectId should be specified." },
+ { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects. If objectGroup is not specified and objectId is, objectGroup will be inherited from object." }
],
"returns": [
{ "name": "result", "$ref": "RemoteObject", "description": "Call result." },
@@ -387,7 +389,7 @@
"description": "Issued when unhandled exception was revoked.",
"parameters": [
{ "name": "reason", "type": "string", "description": "Reason describing why exception was revoked." },
- { "name": "exceptionId", "type": "integer", "description": "The id of revoked exception, as reported in <code>exceptionUnhandled</code>." }
+ { "name": "exceptionId", "type": "integer", "description": "The id of revoked exception, as reported in <code>exceptionThrown</code>." }
]
},
{
@@ -455,6 +457,7 @@
{ "name": "functionName", "type": "string", "description": "Name of the JavaScript function called on this call frame." },
{ "name": "functionLocation", "$ref": "Location", "optional": true, "experimental": true, "description": "Location in the source code." },
{ "name": "location", "$ref": "Location", "description": "Location in the source code." },
+ { "name": "url", "type": "string", "description": "JavaScript script name or url." },
{ "name": "scopeChain", "type": "array", "items": { "$ref": "Scope" }, "description": "Scope chain for this call frame." },
{ "name": "this", "$ref": "Runtime.RemoteObject", "description": "<code>this</code> object for this call frame." },
{ "name": "returnValue", "$ref": "Runtime.RemoteObject", "optional": true, "description": "The value being returned, if the function is at return point." }
@@ -524,6 +527,7 @@
{ "name": "lineNumber", "type": "integer", "description": "Line number to set breakpoint at." },
{ "name": "url", "type": "string", "optional": true, "description": "URL of the resources to set breakpoint on." },
{ "name": "urlRegex", "type": "string", "optional": true, "description": "Regex pattern for the URLs of the resources to set breakpoints on. Either <code>url</code> or <code>urlRegex</code> must be specified." },
+ { "name": "scriptHash", "type": "string", "optional": true, "experimental": true, "description": "Script hash of the resources to set breakpoint on." },
{ "name": "columnNumber", "type": "integer", "optional": true, "description": "Offset in the line to set breakpoint at." },
{ "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
],
@@ -888,6 +892,34 @@
{ "name": "functions", "type": "array", "items": { "$ref": "FunctionCoverage" }, "description": "Functions contained in the script that has coverage data." }
],
"experimental": true
+ },
+ { "id": "TypeObject",
+ "type": "object",
+ "description": "Describes a type collected during runtime.",
+ "properties": [
+ { "name": "name", "type": "string", "description": "Name of a type collected with type profiling." }
+ ],
+ "experimental": true
+ },
+ { "id": "TypeProfileEntry",
+ "type": "object",
+ "description": "Source offset and types for a parameter or return value.",
+ "properties": [
+ { "name": "offset", "type": "integer", "description": "Source offset of the parameter or end of function for return values." },
+ { "name": "types", "type": "array", "items": {"$ref": "TypeObject"}, "description": "The types for this parameter or return value."}
+ ],
+ "experimental": true
+ },
+ {
+ "id": "ScriptTypeProfile",
+ "type": "object",
+ "description": "Type profile data collected during runtime for a JavaScript script.",
+ "properties": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "JavaScript script id." },
+ { "name": "url", "type": "string", "description": "JavaScript script name or url." },
+ { "name": "entries", "type": "array", "items": { "$ref": "TypeProfileEntry" }, "description": "Type profile entries for parameters and return values of the functions in the script." }
+ ],
+ "experimental": true
}
],
"commands": [
@@ -942,6 +974,24 @@
],
"description": "Collect coverage data for the current isolate. The coverage data may be incomplete due to garbage collection.",
"experimental": true
+ },
+ {
+ "name": "startTypeProfile",
+ "description": "Enable type profile.",
+ "experimental": true
+ },
+ {
+ "name": "stopTypeProfile",
+ "description": "Disable type profile. Disabling releases type profile data collected so far.",
+ "experimental": true
+ },
+ {
+ "name": "takeTypeProfile",
+ "returns": [
+ { "name": "result", "type": "array", "items": { "$ref": "ScriptTypeProfile" }, "description": "Type profile for all scripts since startTypeProfile() was turned on." }
+ ],
+ "description": "Collect type profile.",
+ "experimental": true
}
],
"events": [
diff --git a/deps/v8/src/inspector/script-breakpoint.h b/deps/v8/src/inspector/script-breakpoint.h
deleted file mode 100644
index a981b1626c..0000000000
--- a/deps/v8/src/inspector/script-breakpoint.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
- * Copyright (C) 2009 Google Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef V8_INSPECTOR_SCRIPTBREAKPOINT_H_
-#define V8_INSPECTOR_SCRIPTBREAKPOINT_H_
-
-#include "src/inspector/string-16.h"
-
-namespace v8_inspector {
-
-struct ScriptBreakpoint {
- ScriptBreakpoint() {}
-
- ScriptBreakpoint(String16 script_id, int line_number, int column_number,
- String16 condition)
- : script_id(std::move(script_id)),
- line_number(line_number),
- column_number(column_number),
- condition(std::move(condition)) {}
-
- String16 script_id;
- int line_number = 0;
- int column_number = 0;
- String16 condition;
-};
-
-} // namespace v8_inspector
-
-#endif // V8_INSPECTOR_SCRIPTBREAKPOINT_H_
diff --git a/deps/v8/src/inspector/search-util.cc b/deps/v8/src/inspector/search-util.cc
index b05d7a07ec..d2550ad9e4 100644
--- a/deps/v8/src/inspector/search-util.cc
+++ b/deps/v8/src/inspector/search-util.cc
@@ -15,7 +15,7 @@ namespace {
String16 findMagicComment(const String16& content, const String16& name,
bool multiline) {
- DCHECK(name.find("=") == String16::kNotFound);
+ DCHECK_EQ(String16::kNotFound, name.find("="));
size_t length = content.length();
size_t nameLength = name.length();
diff --git a/deps/v8/src/inspector/string-util.cc b/deps/v8/src/inspector/string-util.cc
index 95d4247d14..d591daf38e 100644
--- a/deps/v8/src/inspector/string-util.cc
+++ b/deps/v8/src/inspector/string-util.cc
@@ -12,7 +12,7 @@ namespace v8_inspector {
v8::Local<v8::String> toV8String(v8::Isolate* isolate, const String16& string) {
if (string.isEmpty()) return v8::String::Empty(isolate);
- DCHECK(string.length() < v8::String::kMaxLength);
+ DCHECK_GT(v8::String::kMaxLength, string.length());
return v8::String::NewFromTwoByte(
isolate, reinterpret_cast<const uint16_t*>(string.characters16()),
v8::NewStringType::kNormal, static_cast<int>(string.length()))
@@ -22,7 +22,7 @@ v8::Local<v8::String> toV8String(v8::Isolate* isolate, const String16& string) {
v8::Local<v8::String> toV8StringInternalized(v8::Isolate* isolate,
const String16& string) {
if (string.isEmpty()) return v8::String::Empty(isolate);
- DCHECK(string.length() < v8::String::kMaxLength);
+ DCHECK_GT(v8::String::kMaxLength, string.length());
return v8::String::NewFromTwoByte(
isolate, reinterpret_cast<const uint16_t*>(string.characters16()),
v8::NewStringType::kInternalized,
@@ -39,7 +39,7 @@ v8::Local<v8::String> toV8StringInternalized(v8::Isolate* isolate,
v8::Local<v8::String> toV8String(v8::Isolate* isolate,
const StringView& string) {
if (!string.length()) return v8::String::Empty(isolate);
- DCHECK(string.length() < v8::String::kMaxLength);
+ DCHECK_GT(v8::String::kMaxLength, string.length());
if (string.is8Bit())
return v8::String::NewFromOneByte(
isolate, reinterpret_cast<const uint8_t*>(string.characters8()),
diff --git a/deps/v8/src/inspector/v8-console-agent-impl.cc b/deps/v8/src/inspector/v8-console-agent-impl.cc
index 6b0e12a77b..96ffdc593c 100644
--- a/deps/v8/src/inspector/v8-console-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-console-agent-impl.cc
@@ -70,7 +70,7 @@ void V8ConsoleAgentImpl::reportAllMessages() {
bool V8ConsoleAgentImpl::reportMessage(V8ConsoleMessage* message,
bool generatePreview) {
- DCHECK(message->origin() == V8MessageOrigin::kConsole);
+ DCHECK_EQ(V8MessageOrigin::kConsole, message->origin());
message->reportToFrontend(&m_frontend);
m_frontend.flush();
return m_session->inspector()->hasConsoleMessageStorage(
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index fa740bbc4e..5da18e538f 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -204,7 +204,7 @@ void V8ConsoleMessage::setLocation(const String16& url, unsigned lineNumber,
void V8ConsoleMessage::reportToFrontend(
protocol::Console::Frontend* frontend) const {
- DCHECK(m_origin == V8MessageOrigin::kConsole);
+ DCHECK_EQ(V8MessageOrigin::kConsole, m_origin);
String16 level = protocol::Console::ConsoleMessage::LevelEnum::Log;
if (m_type == ConsoleAPIType::kDebug || m_type == ConsoleAPIType::kCount ||
m_type == ConsoleAPIType::kTimeEnd)
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index 7ed7f76fb7..fb535f0f24 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -618,7 +618,7 @@ void V8Console::queryObjectsCallback(
void V8Console::inspectedObject(const v8::FunctionCallbackInfo<v8::Value>& info,
int sessionId, unsigned num) {
- DCHECK(num < V8InspectorSessionImpl::kInspectedObjectBufferSize);
+ DCHECK_GT(V8InspectorSessionImpl::kInspectedObjectBufferSize, num);
v8::debug::ConsoleCallArguments args(info);
ConsoleHelper helper(args, v8::debug::ConsoleContext(), m_inspector);
if (V8InspectorSessionImpl* session = helper.session(sessionId)) {
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index e78466c99f..1b15f04c1a 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -11,7 +11,6 @@
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/remote-object-id.h"
-#include "src/inspector/script-breakpoint.h"
#include "src/inspector/search-util.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-debugger-script.h"
@@ -37,20 +36,16 @@ using protocol::Runtime::RemoteObject;
using protocol::Debugger::Scope;
namespace DebuggerAgentState {
-static const char javaScriptBreakpoints[] = "javaScriptBreakopints";
static const char pauseOnExceptionsState[] = "pauseOnExceptionsState";
static const char asyncCallStackDepth[] = "asyncCallStackDepth";
static const char blackboxPattern[] = "blackboxPattern";
static const char debuggerEnabled[] = "debuggerEnabled";
static const char skipAllPauses[] = "skipAllPauses";
-// Breakpoint properties.
-static const char url[] = "url";
-static const char isRegex[] = "isRegex";
-static const char lineNumber[] = "lineNumber";
-static const char columnNumber[] = "columnNumber";
-static const char condition[] = "condition";
-static const char hint[] = "hint";
+static const char breakpointsByRegex[] = "breakpointsByRegex";
+static const char breakpointsByUrl[] = "breakpointsByUrl";
+static const char breakpointsByScriptHash[] = "breakpointsByScriptHash";
+static const char breakpointHints[] = "breakpointHints";
} // namespace DebuggerAgentState
@@ -64,67 +59,89 @@ static const intptr_t kBreakpointHintMaxSearchOffset = 80 * 10;
namespace {
-void TranslateWasmStackTraceLocations(Array<CallFrame>* stackTrace,
- WasmTranslation* wasmTranslation) {
- for (size_t i = 0, e = stackTrace->length(); i != e; ++i) {
- protocol::Debugger::Location* location = stackTrace->get(i)->getLocation();
- String16 scriptId = location->getScriptId();
- int lineNumber = location->getLineNumber();
- int columnNumber = location->getColumnNumber(-1);
-
- if (!wasmTranslation->TranslateWasmScriptLocationToProtocolLocation(
- &scriptId, &lineNumber, &columnNumber)) {
- continue;
- }
-
+void TranslateLocation(protocol::Debugger::Location* location,
+ WasmTranslation* wasmTranslation) {
+ String16 scriptId = location->getScriptId();
+ int lineNumber = location->getLineNumber();
+ int columnNumber = location->getColumnNumber(-1);
+ if (wasmTranslation->TranslateWasmScriptLocationToProtocolLocation(
+ &scriptId, &lineNumber, &columnNumber)) {
location->setScriptId(std::move(scriptId));
location->setLineNumber(lineNumber);
location->setColumnNumber(columnNumber);
}
}
-String16 breakpointIdSuffix(V8DebuggerAgentImpl::BreakpointSource source) {
- switch (source) {
- case V8DebuggerAgentImpl::UserBreakpointSource:
- break;
- case V8DebuggerAgentImpl::DebugCommandBreakpointSource:
- return ":debug";
- case V8DebuggerAgentImpl::MonitorCommandBreakpointSource:
- return ":monitor";
- }
- return String16();
-}
-
-String16 generateBreakpointId(const ScriptBreakpoint& breakpoint,
- V8DebuggerAgentImpl::BreakpointSource source) {
+enum class BreakpointType {
+ kByUrl = 1,
+ kByUrlRegex,
+ kByScriptHash,
+ kByScriptId,
+ kDebugCommand,
+ kMonitorCommand
+};
+
+String16 generateBreakpointId(BreakpointType type,
+ const String16& scriptSelector, int lineNumber,
+ int columnNumber) {
String16Builder builder;
- builder.append(breakpoint.script_id);
+ builder.appendNumber(static_cast<int>(type));
+ builder.append(':');
+ builder.appendNumber(lineNumber);
builder.append(':');
- builder.appendNumber(breakpoint.line_number);
+ builder.appendNumber(columnNumber);
builder.append(':');
- builder.appendNumber(breakpoint.column_number);
- builder.append(breakpointIdSuffix(source));
+ builder.append(scriptSelector);
return builder.toString();
}
+bool parseBreakpointId(const String16& breakpointId, BreakpointType* type,
+ String16* scriptSelector = nullptr,
+ int* lineNumber = nullptr, int* columnNumber = nullptr) {
+ size_t typeLineSeparator = breakpointId.find(':');
+ if (typeLineSeparator == String16::kNotFound) return false;
+ size_t lineColumnSeparator = breakpointId.find(':', typeLineSeparator + 1);
+ if (lineColumnSeparator == String16::kNotFound) return false;
+ size_t columnSelectorSeparator =
+ breakpointId.find(':', lineColumnSeparator + 1);
+ if (columnSelectorSeparator == String16::kNotFound) return false;
+
+ if (type) {
+ int rawType = breakpointId.substring(0, typeLineSeparator).toInteger();
+ if (rawType < static_cast<int>(BreakpointType::kByUrl) ||
+ rawType > static_cast<int>(BreakpointType::kMonitorCommand)) {
+ return false;
+ }
+ *type = static_cast<BreakpointType>(rawType);
+ }
+ if (scriptSelector) {
+ *scriptSelector = breakpointId.substring(columnSelectorSeparator + 1);
+ }
+ if (lineNumber) {
+ *lineNumber = breakpointId
+ .substring(typeLineSeparator + 1,
+ lineColumnSeparator - typeLineSeparator - 1)
+ .toInteger();
+ }
+ if (columnNumber) {
+ *columnNumber =
+ breakpointId
+ .substring(lineColumnSeparator + 1,
+ columnSelectorSeparator - lineColumnSeparator - 1)
+ .toInteger();
+ }
+ return true;
+}
+
bool positionComparator(const std::pair<int, int>& a,
const std::pair<int, int>& b) {
if (a.first != b.first) return a.first < b.first;
return a.second < b.second;
}
-std::unique_ptr<protocol::Debugger::Location> buildProtocolLocation(
- const String16& scriptId, int lineNumber, int columnNumber) {
- return protocol::Debugger::Location::create()
- .setScriptId(scriptId)
- .setLineNumber(lineNumber)
- .setColumnNumber(columnNumber)
- .build();
-}
-
-String16 breakpointHint(const V8DebuggerScript& script,
- const ScriptBreakpoint& breakpoint) {
- int offset = script.offset(breakpoint.line_number, breakpoint.column_number);
+String16 breakpointHint(const V8DebuggerScript& script, int lineNumber,
+ int columnNumber) {
+ int offset = script.offset(lineNumber, columnNumber);
if (offset == V8DebuggerScript::kNoOffset) return String16();
const String16& source = script.source();
String16 hint =
@@ -138,11 +155,12 @@ String16 breakpointHint(const V8DebuggerScript& script,
}
void adjustBreakpointLocation(const V8DebuggerScript& script,
- const String16& hint,
- ScriptBreakpoint* breakpoint) {
+ const String16& hint, int* lineNumber,
+ int* columnNumber) {
+ if (*lineNumber < script.startLine() || *lineNumber > script.endLine())
+ return;
if (hint.isEmpty()) return;
- intptr_t sourceOffset =
- script.offset(breakpoint->line_number, breakpoint->column_number);
+ intptr_t sourceOffset = script.offset(*lineNumber, *columnNumber);
if (sourceOffset == V8DebuggerScript::kNoOffset) return;
intptr_t searchRegionOffset = std::max(
@@ -168,8 +186,8 @@ void adjustBreakpointLocation(const V8DebuggerScript& script,
v8::debug::Location hintPosition =
script.location(static_cast<int>(bestMatch));
if (hintPosition.IsEmpty()) return;
- breakpoint->line_number = hintPosition.GetLineNumber();
- breakpoint->column_number = hintPosition.GetColumnNumber();
+ *lineNumber = hintPosition.GetLineNumber();
+ *columnNumber = hintPosition.GetColumnNumber();
}
String16 breakLocationType(v8::debug::BreakLocationType type) {
@@ -308,6 +326,16 @@ bool liveEditExceptionToDetails(
return true;
}
+protocol::DictionaryValue* getOrCreateObject(protocol::DictionaryValue* object,
+ const String16& key) {
+ protocol::DictionaryValue* value = object->getObject(key);
+ if (value) return value;
+ std::unique_ptr<protocol::DictionaryValue> newDictionary =
+ protocol::DictionaryValue::create();
+ value = newDictionary.get();
+ object->setObject(key, std::move(newDictionary));
+ return value;
+}
} // namespace
V8DebuggerAgentImpl::V8DebuggerAgentImpl(
@@ -355,8 +383,11 @@ Response V8DebuggerAgentImpl::enable() {
Response V8DebuggerAgentImpl::disable() {
if (!enabled()) return Response::OK();
- m_state->setObject(DebuggerAgentState::javaScriptBreakpoints,
- protocol::DictionaryValue::create());
+ m_state->remove(DebuggerAgentState::breakpointsByRegex);
+ m_state->remove(DebuggerAgentState::breakpointsByUrl);
+ m_state->remove(DebuggerAgentState::breakpointsByScriptHash);
+ m_state->remove(DebuggerAgentState::breakpointHints);
+
m_state->setInteger(DebuggerAgentState::pauseOnExceptionsState,
v8::debug::NoBreakOnException);
m_state->setInteger(DebuggerAgentState::asyncCallStackDepth, 0);
@@ -371,12 +402,11 @@ Response V8DebuggerAgentImpl::disable() {
m_blackboxPattern.reset();
resetBlackboxedStateCache();
m_scripts.clear();
- for (const auto& it : m_breakpointIdToDebuggerBreakpointIds) {
- for (const auto& id : it.second) {
- v8::debug::RemoveBreakpoint(m_isolate, id);
- }
+ for (const auto& it : m_debuggerBreakpointIdToBreakpointId) {
+ v8::debug::RemoveBreakpoint(m_isolate, it.first);
}
m_breakpointIdToDebuggerBreakpointIds.clear();
+ m_debuggerBreakpointIdToBreakpointId.clear();
m_debugger->setAsyncCallStackDepth(this, 0);
clearBreakDetails();
m_skipAllPauses = false;
@@ -433,83 +463,105 @@ Response V8DebuggerAgentImpl::setSkipAllPauses(bool skip) {
return Response::OK();
}
-static std::unique_ptr<protocol::DictionaryValue>
-buildObjectForBreakpointCookie(const String16& url, int lineNumber,
- int columnNumber, const String16& condition,
- bool isRegex, const String16& hint) {
- std::unique_ptr<protocol::DictionaryValue> breakpointObject =
- protocol::DictionaryValue::create();
- breakpointObject->setString(DebuggerAgentState::url, url);
- breakpointObject->setInteger(DebuggerAgentState::lineNumber, lineNumber);
- breakpointObject->setInteger(DebuggerAgentState::columnNumber, columnNumber);
- breakpointObject->setString(DebuggerAgentState::condition, condition);
- breakpointObject->setBoolean(DebuggerAgentState::isRegex, isRegex);
- if (!hint.isEmpty()) {
- breakpointObject->setString(DebuggerAgentState::hint, hint);
- }
- return breakpointObject;
-}
-
-static bool matches(V8InspectorImpl* inspector, const String16& url,
- const String16& pattern, bool isRegex) {
- if (isRegex) {
- V8Regex regex(inspector, pattern, true);
- return regex.match(url) != -1;
+static bool matches(V8InspectorImpl* inspector, const V8DebuggerScript& script,
+ BreakpointType type, const String16& selector) {
+ switch (type) {
+ case BreakpointType::kByUrl:
+ return script.sourceURL() == selector;
+ case BreakpointType::kByScriptHash:
+ return script.hash() == selector;
+ case BreakpointType::kByUrlRegex: {
+ V8Regex regex(inspector, selector, true);
+ return regex.match(script.sourceURL()) != -1;
+ }
+ default:
+ UNREACHABLE();
+ return false;
}
- return url == pattern;
}
Response V8DebuggerAgentImpl::setBreakpointByUrl(
int lineNumber, Maybe<String16> optionalURL,
- Maybe<String16> optionalURLRegex, Maybe<int> optionalColumnNumber,
- Maybe<String16> optionalCondition, String16* outBreakpointId,
+ Maybe<String16> optionalURLRegex, Maybe<String16> optionalScriptHash,
+ Maybe<int> optionalColumnNumber, Maybe<String16> optionalCondition,
+ String16* outBreakpointId,
std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations) {
*locations = Array<protocol::Debugger::Location>::create();
- if (optionalURL.isJust() == optionalURLRegex.isJust())
- return Response::Error("Either url or urlRegex must be specified.");
- String16 url = optionalURL.isJust() ? optionalURL.fromJust()
- : optionalURLRegex.fromJust();
+ int specified = (optionalURL.isJust() ? 1 : 0) +
+ (optionalURLRegex.isJust() ? 1 : 0) +
+ (optionalScriptHash.isJust() ? 1 : 0);
+ if (specified != 1) {
+ return Response::Error(
+ "Either url or urlRegex or scriptHash must be specified.");
+ }
int columnNumber = 0;
if (optionalColumnNumber.isJust()) {
columnNumber = optionalColumnNumber.fromJust();
if (columnNumber < 0) return Response::Error("Incorrect column number");
}
- String16 condition = optionalCondition.fromMaybe("");
- bool isRegex = optionalURLRegex.isJust();
-
- String16 breakpointId = (isRegex ? "/" + url + "/" : url) + ":" +
- String16::fromInteger(lineNumber) + ":" +
- String16::fromInteger(columnNumber);
- protocol::DictionaryValue* breakpointsCookie =
- m_state->getObject(DebuggerAgentState::javaScriptBreakpoints);
- if (!breakpointsCookie) {
- std::unique_ptr<protocol::DictionaryValue> newValue =
- protocol::DictionaryValue::create();
- breakpointsCookie = newValue.get();
- m_state->setObject(DebuggerAgentState::javaScriptBreakpoints,
- std::move(newValue));
- }
- if (breakpointsCookie->get(breakpointId))
+
+ BreakpointType type = BreakpointType::kByUrl;
+ String16 selector;
+ if (optionalURLRegex.isJust()) {
+ selector = optionalURLRegex.fromJust();
+ type = BreakpointType::kByUrlRegex;
+ } else if (optionalURL.isJust()) {
+ selector = optionalURL.fromJust();
+ type = BreakpointType::kByUrl;
+ } else if (optionalScriptHash.isJust()) {
+ selector = optionalScriptHash.fromJust();
+ type = BreakpointType::kByScriptHash;
+ }
+
+ String16 condition = optionalCondition.fromMaybe(String16());
+ String16 breakpointId =
+ generateBreakpointId(type, selector, lineNumber, columnNumber);
+ protocol::DictionaryValue* breakpoints;
+ switch (type) {
+ case BreakpointType::kByUrlRegex:
+ breakpoints =
+ getOrCreateObject(m_state, DebuggerAgentState::breakpointsByRegex);
+ break;
+ case BreakpointType::kByUrl:
+ breakpoints = getOrCreateObject(
+ getOrCreateObject(m_state, DebuggerAgentState::breakpointsByUrl),
+ selector);
+ break;
+ case BreakpointType::kByScriptHash:
+ breakpoints = getOrCreateObject(
+ getOrCreateObject(m_state,
+ DebuggerAgentState::breakpointsByScriptHash),
+ selector);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (breakpoints->get(breakpointId)) {
return Response::Error("Breakpoint at specified location already exists.");
+ }
String16 hint;
- ScriptBreakpoint breakpoint(String16(), lineNumber, columnNumber, condition);
for (const auto& script : m_scripts) {
- if (!matches(m_inspector, script.second->sourceURL(), url, isRegex))
- continue;
- breakpoint.script_id = script.first;
- std::unique_ptr<protocol::Debugger::Location> location =
- resolveBreakpoint(breakpointId, breakpoint, UserBreakpointSource, hint);
- if (!isRegex) hint = breakpointHint(*script.second, breakpoint);
+ if (!matches(m_inspector, *script.second, type, selector)) continue;
+ if (!hint.isEmpty()) {
+ adjustBreakpointLocation(*script.second, hint, &lineNumber,
+ &columnNumber);
+ }
+ std::unique_ptr<protocol::Debugger::Location> location = setBreakpointImpl(
+ breakpointId, script.first, condition, lineNumber, columnNumber);
+ if (type != BreakpointType::kByUrlRegex) {
+ hint = breakpointHint(*script.second, lineNumber, columnNumber);
+ }
if (location) (*locations)->addItem(std::move(location));
}
-
- breakpointsCookie->setObject(
- breakpointId,
- buildObjectForBreakpointCookie(url, lineNumber, columnNumber, condition,
- isRegex, hint));
-
+ breakpoints->setString(breakpointId, condition);
+ if (!hint.isEmpty()) {
+ protocol::DictionaryValue* breakpointHints =
+ getOrCreateObject(m_state, DebuggerAgentState::breakpointHints);
+ breakpointHints->setString(breakpointId, hint);
+ }
*outBreakpointId = breakpointId;
return Response::OK();
}
@@ -518,18 +570,17 @@ Response V8DebuggerAgentImpl::setBreakpoint(
std::unique_ptr<protocol::Debugger::Location> location,
Maybe<String16> optionalCondition, String16* outBreakpointId,
std::unique_ptr<protocol::Debugger::Location>* actualLocation) {
- ScriptBreakpoint breakpoint(
- location->getScriptId(), location->getLineNumber(),
- location->getColumnNumber(0), optionalCondition.fromMaybe(String16()));
-
- String16 breakpointId =
- generateBreakpointId(breakpoint, UserBreakpointSource);
+ String16 breakpointId = generateBreakpointId(
+ BreakpointType::kByScriptId, location->getScriptId(),
+ location->getLineNumber(), location->getColumnNumber(0));
if (m_breakpointIdToDebuggerBreakpointIds.find(breakpointId) !=
m_breakpointIdToDebuggerBreakpointIds.end()) {
return Response::Error("Breakpoint at specified location already exists.");
}
- *actualLocation = resolveBreakpoint(
- breakpointId, breakpoint, UserBreakpointSource, /* hint */ String16());
+ *actualLocation = setBreakpointImpl(breakpointId, location->getScriptId(),
+ optionalCondition.fromMaybe(String16()),
+ location->getLineNumber(),
+ location->getColumnNumber(0));
if (!*actualLocation) return Response::Error("Could not resolve breakpoint");
*outBreakpointId = breakpointId;
return Response::OK();
@@ -537,9 +588,37 @@ Response V8DebuggerAgentImpl::setBreakpoint(
Response V8DebuggerAgentImpl::removeBreakpoint(const String16& breakpointId) {
if (!enabled()) return Response::Error(kDebuggerNotEnabled);
- protocol::DictionaryValue* breakpointsCookie =
- m_state->getObject(DebuggerAgentState::javaScriptBreakpoints);
- if (breakpointsCookie) breakpointsCookie->remove(breakpointId);
+ BreakpointType type;
+ String16 selector;
+ if (!parseBreakpointId(breakpointId, &type, &selector)) {
+ return Response::OK();
+ }
+ protocol::DictionaryValue* breakpoints = nullptr;
+ switch (type) {
+ case BreakpointType::kByUrl: {
+ protocol::DictionaryValue* breakpointsByUrl =
+ m_state->getObject(DebuggerAgentState::breakpointsByUrl);
+ if (breakpointsByUrl) {
+ breakpoints = breakpointsByUrl->getObject(selector);
+ }
+ } break;
+ case BreakpointType::kByScriptHash: {
+ protocol::DictionaryValue* breakpointsByScriptHash =
+ m_state->getObject(DebuggerAgentState::breakpointsByScriptHash);
+ if (breakpointsByScriptHash) {
+ breakpoints = breakpointsByScriptHash->getObject(selector);
+ }
+ } break;
+ case BreakpointType::kByUrlRegex:
+ breakpoints = m_state->getObject(DebuggerAgentState::breakpointsByRegex);
+ break;
+ default:
+ break;
+ }
+ if (breakpoints) breakpoints->remove(breakpointId);
+ protocol::DictionaryValue* breakpointHints =
+ m_state->getObject(DebuggerAgentState::breakpointHints);
+ if (breakpointHints) breakpointHints->remove(breakpointId);
removeBreakpointImpl(breakpointId);
return Response::OK();
}
@@ -555,7 +634,7 @@ void V8DebuggerAgentImpl::removeBreakpointImpl(const String16& breakpointId) {
}
for (const auto& id : debuggerBreakpointIdsIterator->second) {
v8::debug::RemoveBreakpoint(m_isolate, id);
- m_serverBreakpoints.erase(id);
+ m_debuggerBreakpointIdToBreakpointId.erase(id);
}
m_breakpointIdToDebuggerBreakpointIds.erase(breakpointId);
}
@@ -675,58 +754,42 @@ bool V8DebuggerAgentImpl::acceptsPause(bool isOOMBreak) const {
}
std::unique_ptr<protocol::Debugger::Location>
-V8DebuggerAgentImpl::resolveBreakpoint(const String16& breakpointId,
- const ScriptBreakpoint& breakpoint,
- BreakpointSource source,
- const String16& hint) {
+V8DebuggerAgentImpl::setBreakpointImpl(const String16& breakpointId,
+ const String16& scriptId,
+ const String16& condition,
+ int lineNumber, int columnNumber) {
v8::HandleScope handles(m_isolate);
DCHECK(enabled());
- // FIXME: remove these checks once crbug.com/520702 is resolved.
- CHECK(!breakpointId.isEmpty());
- CHECK(!breakpoint.script_id.isEmpty());
- ScriptsMap::iterator scriptIterator = m_scripts.find(breakpoint.script_id);
+
+ ScriptsMap::iterator scriptIterator = m_scripts.find(scriptId);
if (scriptIterator == m_scripts.end()) return nullptr;
V8DebuggerScript* script = scriptIterator->second.get();
- if (breakpoint.line_number < script->startLine() ||
- script->endLine() < breakpoint.line_number)
+ if (lineNumber < script->startLine() || script->endLine() < lineNumber) {
return nullptr;
-
- // Translate from protocol location to v8 location for the debugger.
- ScriptBreakpoint translatedBreakpoint = breakpoint;
- adjustBreakpointLocation(*script, hint, &translatedBreakpoint);
- m_debugger->wasmTranslation()->TranslateProtocolLocationToWasmScriptLocation(
- &translatedBreakpoint.script_id, &translatedBreakpoint.line_number,
- &translatedBreakpoint.column_number);
+ }
v8::debug::BreakpointId debuggerBreakpointId;
- v8::debug::Location location(translatedBreakpoint.line_number,
- translatedBreakpoint.column_number);
+ v8::debug::Location location(lineNumber, columnNumber);
int contextId = script->executionContextId();
InspectedContext* inspected = m_inspector->getContext(contextId);
if (!inspected) return nullptr;
{
v8::Context::Scope contextScope(inspected->context());
- if (!script->setBreakpoint(translatedBreakpoint.condition, &location,
- &debuggerBreakpointId)) {
+ if (!script->setBreakpoint(condition, &location, &debuggerBreakpointId)) {
return nullptr;
}
}
- int actualLineNumber = location.GetLineNumber();
- int actualColumnNumber = location.GetColumnNumber();
-
- // Translate back from v8 location to protocol location for the return value.
- m_debugger->wasmTranslation()->TranslateWasmScriptLocationToProtocolLocation(
- &translatedBreakpoint.script_id, &actualLineNumber, &actualColumnNumber);
-
- m_serverBreakpoints[debuggerBreakpointId] =
- std::make_pair(breakpointId, source);
- CHECK(!breakpointId.isEmpty());
+ m_debuggerBreakpointIdToBreakpointId[debuggerBreakpointId] = breakpointId;
m_breakpointIdToDebuggerBreakpointIds[breakpointId].push_back(
debuggerBreakpointId);
- return buildProtocolLocation(translatedBreakpoint.script_id, actualLineNumber,
- actualColumnNumber);
+
+ return protocol::Debugger::Location::create()
+ .setScriptId(scriptId)
+ .setLineNumber(location.GetLineNumber())
+ .setColumnNumber(location.GetColumnNumber())
+ .build();
}
Response V8DebuggerAgentImpl::searchInContent(
@@ -1107,8 +1170,6 @@ Response V8DebuggerAgentImpl::currentCallFrames(
String16 callFrameId =
RemoteCallFrameId::serialize(contextId, frameOrdinal);
- v8::Local<v8::debug::Script> script = iterator->GetScript();
- DCHECK(!script.IsEmpty());
v8::debug::Location loc = iterator->GetSourceLocation();
std::unique_ptr<Array<Scope>> scopes;
@@ -1128,15 +1189,29 @@ Response V8DebuggerAgentImpl::currentCallFrames(
.build();
}
+ v8::Local<v8::debug::Script> script = iterator->GetScript();
+ DCHECK(!script.IsEmpty());
+ std::unique_ptr<protocol::Debugger::Location> location =
+ protocol::Debugger::Location::create()
+ .setScriptId(String16::fromInteger(script->Id()))
+ .setLineNumber(loc.GetLineNumber())
+ .setColumnNumber(loc.GetColumnNumber())
+ .build();
+ TranslateLocation(location.get(), m_debugger->wasmTranslation());
+ String16 scriptId = String16::fromInteger(script->Id());
+ ScriptsMap::iterator scriptIterator =
+ m_scripts.find(location->getScriptId());
+ String16 url;
+ if (scriptIterator != m_scripts.end()) {
+ url = scriptIterator->second->sourceURL();
+ }
+
auto frame =
CallFrame::create()
.setCallFrameId(callFrameId)
.setFunctionName(toProtocolString(iterator->GetFunctionName()))
- .setLocation(protocol::Debugger::Location::create()
- .setScriptId(String16::fromInteger(script->Id()))
- .setLineNumber(loc.GetLineNumber())
- .setColumnNumber(loc.GetColumnNumber())
- .build())
+ .setLocation(std::move(location))
+ .setUrl(url)
.setScopeChain(std::move(scopes))
.setThis(std::move(receiver))
.build();
@@ -1161,8 +1236,6 @@ Response V8DebuggerAgentImpl::currentCallFrames(
}
(*result)->addItem(std::move(frame));
}
- TranslateWasmStackTraceLocations(result->get(),
- m_debugger->wasmTranslation());
return Response::OK();
}
@@ -1243,40 +1316,54 @@ void V8DebuggerAgentImpl::didParseSource(
static_cast<int>(scriptRef->source().length()), std::move(stackTrace));
}
- if (scriptURL.isEmpty() || !success) return;
-
- protocol::DictionaryValue* breakpointsCookie =
- m_state->getObject(DebuggerAgentState::javaScriptBreakpoints);
- if (!breakpointsCookie) return;
+ if (!success) return;
- for (size_t i = 0; i < breakpointsCookie->size(); ++i) {
- auto cookie = breakpointsCookie->at(i);
- protocol::DictionaryValue* breakpointObject =
- protocol::DictionaryValue::cast(cookie.second);
- bool isRegex;
- breakpointObject->getBoolean(DebuggerAgentState::isRegex, &isRegex);
- String16 url;
- breakpointObject->getString(DebuggerAgentState::url, &url);
- if (!matches(m_inspector, scriptURL, url, isRegex)) continue;
- ScriptBreakpoint breakpoint;
- breakpoint.script_id = scriptId;
- breakpointObject->getInteger(DebuggerAgentState::lineNumber,
- &breakpoint.line_number);
- breakpointObject->getInteger(DebuggerAgentState::columnNumber,
- &breakpoint.column_number);
- breakpointObject->getString(DebuggerAgentState::condition,
- &breakpoint.condition);
- String16 hint;
- bool hasHint = breakpointObject->getString(DebuggerAgentState::hint, &hint);
- std::unique_ptr<protocol::Debugger::Location> location =
- resolveBreakpoint(cookie.first, breakpoint, UserBreakpointSource, hint);
- if (!hasHint) {
- hint = breakpointHint(*scriptRef, breakpoint);
- if (!hint.isEmpty())
- breakpointObject->setString(DebuggerAgentState::hint, hint);
+ std::vector<protocol::DictionaryValue*> potentialBreakpoints;
+ if (!scriptURL.isEmpty()) {
+ protocol::DictionaryValue* breakpointsByUrl =
+ m_state->getObject(DebuggerAgentState::breakpointsByUrl);
+ if (breakpointsByUrl) {
+ potentialBreakpoints.push_back(breakpointsByUrl->getObject(scriptURL));
+ }
+ potentialBreakpoints.push_back(
+ m_state->getObject(DebuggerAgentState::breakpointsByRegex));
+ }
+ protocol::DictionaryValue* breakpointsByScriptHash =
+ m_state->getObject(DebuggerAgentState::breakpointsByScriptHash);
+ if (breakpointsByScriptHash) {
+ potentialBreakpoints.push_back(
+ breakpointsByScriptHash->getObject(scriptRef->hash()));
+ }
+ protocol::DictionaryValue* breakpointHints =
+ m_state->getObject(DebuggerAgentState::breakpointHints);
+ for (auto breakpoints : potentialBreakpoints) {
+ if (!breakpoints) continue;
+ for (size_t i = 0; i < breakpoints->size(); ++i) {
+ auto breakpointWithCondition = breakpoints->at(i);
+ String16 breakpointId = breakpointWithCondition.first;
+
+ BreakpointType type;
+ String16 selector;
+ int lineNumber = 0;
+ int columnNumber = 0;
+ parseBreakpointId(breakpointId, &type, &selector, &lineNumber,
+ &columnNumber);
+
+ if (!matches(m_inspector, *scriptRef, type, selector)) continue;
+ String16 condition;
+ breakpointWithCondition.second->asString(&condition);
+ String16 hint;
+ bool hasHint =
+ breakpointHints && breakpointHints->getString(breakpointId, &hint);
+ if (hasHint) {
+ adjustBreakpointLocation(*scriptRef, hint, &lineNumber, &columnNumber);
+ }
+ std::unique_ptr<protocol::Debugger::Location> location =
+ setBreakpointImpl(breakpointId, scriptId, condition, lineNumber,
+ columnNumber);
+ if (location)
+ m_frontend.breakpointResolved(breakpointId, std::move(location));
}
- if (location)
- m_frontend.breakpointResolved(cookie.first, std::move(location));
}
}
@@ -1319,22 +1406,18 @@ void V8DebuggerAgentImpl::didPause(
std::unique_ptr<Array<String16>> hitBreakpointIds = Array<String16>::create();
- bool hasDebugCommandBreakpointReason = false;
for (const auto& id : hitBreakpoints) {
- DebugServerBreakpointToBreakpointIdAndSourceMap::iterator
- breakpointIterator = m_serverBreakpoints.find(id);
- if (breakpointIterator != m_serverBreakpoints.end()) {
- const String16& localId = breakpointIterator->second.first;
- hitBreakpointIds->addItem(localId);
-
- BreakpointSource source = breakpointIterator->second.second;
- if (!hasDebugCommandBreakpointReason &&
- source == DebugCommandBreakpointSource) {
- hasDebugCommandBreakpointReason = true;
- hitReasons.push_back(std::make_pair(
- protocol::Debugger::Paused::ReasonEnum::DebugCommand, nullptr));
- }
+ auto breakpointIterator = m_debuggerBreakpointIdToBreakpointId.find(id);
+ if (breakpointIterator == m_debuggerBreakpointIdToBreakpointId.end()) {
+ continue;
}
+ const String16& breakpointId = breakpointIterator->second;
+ hitBreakpointIds->addItem(breakpointId);
+ BreakpointType type;
+ parseBreakpointId(breakpointId, &type);
+ if (type != BreakpointType::kDebugCommand) continue;
+ hitReasons.push_back(std::make_pair(
+ protocol::Debugger::Paused::ReasonEnum::DebugCommand, nullptr));
}
for (size_t i = 0; i < m_breakReason.size(); ++i) {
@@ -1403,17 +1486,26 @@ void V8DebuggerAgentImpl::setBreakpointAt(const String16& scriptId,
int lineNumber, int columnNumber,
BreakpointSource source,
const String16& condition) {
- ScriptBreakpoint breakpoint(scriptId, lineNumber, columnNumber, condition);
- String16 breakpointId = generateBreakpointId(breakpoint, source);
- resolveBreakpoint(breakpointId, breakpoint, source, /* hint */ String16());
+ String16 breakpointId = generateBreakpointId(
+ source == DebugCommandBreakpointSource ? BreakpointType::kDebugCommand
+ : BreakpointType::kMonitorCommand,
+ scriptId, lineNumber, columnNumber);
+ if (m_breakpointIdToDebuggerBreakpointIds.find(breakpointId) !=
+ m_breakpointIdToDebuggerBreakpointIds.end()) {
+ return;
+ }
+ setBreakpointImpl(breakpointId, scriptId, condition, lineNumber,
+ columnNumber);
}
void V8DebuggerAgentImpl::removeBreakpointAt(const String16& scriptId,
int lineNumber, int columnNumber,
BreakpointSource source) {
- removeBreakpointImpl(generateBreakpointId(
- ScriptBreakpoint(scriptId, lineNumber, columnNumber, String16()),
- source));
+ String16 breakpointId = generateBreakpointId(
+ source == DebugCommandBreakpointSource ? BreakpointType::kDebugCommand
+ : BreakpointType::kMonitorCommand,
+ scriptId, lineNumber, columnNumber);
+ removeBreakpointImpl(breakpointId);
}
void V8DebuggerAgentImpl::reset() {
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index 6f6a397e07..306e5fb48c 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -16,8 +16,6 @@
namespace v8_inspector {
struct ScriptBreakpoint;
-class JavaScriptCallFrame;
-class PromiseTracker;
class V8Debugger;
class V8DebuggerScript;
class V8InspectorImpl;
@@ -47,8 +45,9 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
Response setSkipAllPauses(bool skip) override;
Response setBreakpointByUrl(
int lineNumber, Maybe<String16> optionalURL,
- Maybe<String16> optionalURLRegex, Maybe<int> optionalColumnNumber,
- Maybe<String16> optionalCondition, String16*,
+ Maybe<String16> optionalURLRegex, Maybe<String16> optionalScriptHash,
+ Maybe<int> optionalColumnNumber, Maybe<String16> optionalCondition,
+ String16*,
std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations)
override;
Response setBreakpoint(
@@ -150,9 +149,9 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
void setPauseOnExceptionsImpl(int);
- std::unique_ptr<protocol::Debugger::Location> resolveBreakpoint(
- const String16& breakpointId, const ScriptBreakpoint&, BreakpointSource,
- const String16& hint);
+ std::unique_ptr<protocol::Debugger::Location> setBreakpointImpl(
+ const String16& breakpointId, const String16& scriptId,
+ const String16& condition, int lineNumber, int columnNumber);
void removeBreakpointImpl(const String16& breakpointId);
void clearBreakDetails();
@@ -168,10 +167,8 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
protocol::HashMap<String16, std::unique_ptr<V8DebuggerScript>>;
using BreakpointIdToDebuggerBreakpointIdsMap =
protocol::HashMap<String16, std::vector<v8::debug::BreakpointId>>;
- using DebugServerBreakpointToBreakpointIdAndSourceMap =
- protocol::HashMap<v8::debug::BreakpointId,
- std::pair<String16, BreakpointSource>>;
- using MuteBreakpoins = protocol::HashMap<String16, std::pair<String16, int>>;
+ using DebuggerBreakpointIdToBreakpointIdMap =
+ protocol::HashMap<v8::debug::BreakpointId, String16>;
V8InspectorImpl* m_inspector;
V8Debugger* m_debugger;
@@ -182,7 +179,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
v8::Isolate* m_isolate;
ScriptsMap m_scripts;
BreakpointIdToDebuggerBreakpointIdsMap m_breakpointIdToDebuggerBreakpointIds;
- DebugServerBreakpointToBreakpointIdAndSourceMap m_serverBreakpoints;
+ DebuggerBreakpointIdToBreakpointIdMap m_debuggerBreakpointIdToBreakpointId;
using BreakReason =
std::pair<String16, std::unique_ptr<protocol::DictionaryValue>>;
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index 5f1a75e166..6ec7f32c89 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -229,6 +229,13 @@ class ActualScript : public V8DebuggerScript {
return m_script.Get(m_isolate)->GetSourceLocation(offset);
}
+ bool setBreakpoint(const String16& condition, v8::debug::Location* location,
+ int* id) const override {
+ v8::HandleScope scope(m_isolate);
+ return script()->SetBreakpoint(toV8String(m_isolate, condition), location,
+ id);
+ }
+
private:
String16 GetNameOrSourceUrl(v8::Local<v8::debug::Script> script) {
v8::Local<v8::String> name;
@@ -318,6 +325,22 @@ class WasmVirtualScript : public V8DebuggerScript {
return v8::debug::Location();
}
+ bool setBreakpoint(const String16& condition, v8::debug::Location* location,
+ int* id) const override {
+ v8::HandleScope scope(m_isolate);
+ v8::Local<v8::debug::Script> script = m_script.Get(m_isolate);
+ String16 v8ScriptId = String16::fromInteger(script->Id());
+
+ TranslateProtocolLocationToV8Location(m_wasmTranslation, location,
+ scriptId(), v8ScriptId);
+ if (location->IsEmpty()) return false;
+ if (!script->SetBreakpoint(toV8String(m_isolate, condition), location, id))
+ return false;
+ TranslateV8LocationToProtocolLocation(m_wasmTranslation, location,
+ v8ScriptId, scriptId());
+ return true;
+ }
+
private:
static const String16& emptyString() {
static const String16 singleEmptyString;
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
index d10f9bfd68..f1e28184b5 100644
--- a/deps/v8/src/inspector/v8-debugger-script.h
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -84,8 +84,8 @@ class V8DebuggerScript {
virtual int offset(int lineNumber, int columnNumber) const = 0;
virtual v8::debug::Location location(int offset) const = 0;
- bool setBreakpoint(const String16& condition, v8::debug::Location* location,
- int* id) const;
+ virtual bool setBreakpoint(const String16& condition,
+ v8::debug::Location* location, int* id) const = 0;
protected:
V8DebuggerScript(v8::Isolate*, String16 id, String16 url);
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index bc5437c6d4..3e321a4275 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -6,7 +6,6 @@
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
-#include "src/inspector/script-breakpoint.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-debugger-agent-impl.h"
#include "src/inspector/v8-inspector-impl.h"
@@ -563,9 +562,6 @@ std::shared_ptr<AsyncStackTrace> V8Debugger::currentAsyncCreation() {
v8::MaybeLocal<v8::Value> V8Debugger::getTargetScopes(
v8::Local<v8::Context> context, v8::Local<v8::Value> value,
ScopeTargetKind kind) {
- if (!enabled()) {
- UNREACHABLE();
- }
v8::Local<v8::Value> scopesValue;
std::unique_ptr<v8::debug::ScopeIterator> iterator;
switch (kind) {
@@ -582,7 +578,7 @@ v8::MaybeLocal<v8::Value> V8Debugger::getTargetScopes(
m_isolate, v8::Local<v8::Object>::Cast(value));
break;
}
-
+ if (!iterator) return v8::MaybeLocal<v8::Value>();
v8::Local<v8::Array> result = v8::Array::New(m_isolate);
if (!result->SetPrototype(context, v8::Null(m_isolate)).FromMaybe(false)) {
return v8::MaybeLocal<v8::Value>();
@@ -664,7 +660,6 @@ v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
toV8StringInternalized(m_isolate, "[[GeneratorLocation]]"));
createDataProperty(context, properties, properties->Length(), location);
}
- if (!enabled()) return properties;
v8::Local<v8::Value> scopes;
if (generatorScopes(context, value).ToLocal(&scopes)) {
createDataProperty(context, properties, properties->Length(),
@@ -672,13 +667,10 @@ v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
createDataProperty(context, properties, properties->Length(), scopes);
}
}
- if (!enabled()) return properties;
if (value->IsFunction()) {
v8::Local<v8::Function> function = value.As<v8::Function>();
- v8::Local<v8::Value> boundFunction = function->GetBoundFunction();
v8::Local<v8::Value> scopes;
- if (boundFunction->IsUndefined() &&
- functionScopes(context, function).ToLocal(&scopes)) {
+ if (functionScopes(context, function).ToLocal(&scopes)) {
createDataProperty(context, properties, properties->Length(),
toV8StringInternalized(m_isolate, "[[Scopes]]"));
createDataProperty(context, properties, properties->Length(), scopes);
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index 6e2328d97b..945b2fd115 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -21,7 +21,6 @@
namespace v8_inspector {
class AsyncStackTrace;
-struct ScriptBreakpoint;
class StackFrame;
class V8Debugger;
class V8DebuggerAgentImpl;
diff --git a/deps/v8/src/inspector/v8-injected-script-host.cc b/deps/v8/src/inspector/v8-injected-script-host.cc
index 2702aa6cf9..b970087917 100644
--- a/deps/v8/src/inspector/v8-injected-script-host.cc
+++ b/deps/v8/src/inspector/v8-injected-script-host.cc
@@ -105,7 +105,7 @@ v8::Local<v8::Object> V8InjectedScriptHost::create(
void V8InjectedScriptHost::nullifyPrototypeCallback(
const v8::FunctionCallbackInfo<v8::Value>& info) {
- CHECK(info.Length() == 1);
+ CHECK_EQ(1, info.Length());
DCHECK(info[0]->IsObject());
if (!info[0]->IsObject()) return;
v8::Isolate* isolate = info.GetIsolate();
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
index 0c1c2e2af2..2675216143 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -26,6 +26,7 @@ static const char profilerEnabled[] = "profilerEnabled";
static const char preciseCoverageStarted[] = "preciseCoverageStarted";
static const char preciseCoverageCallCount[] = "preciseCoverageCallCount";
static const char preciseCoverageDetailed[] = "preciseCoverageDetailed";
+static const char typeProfileStarted[] = "typeProfileStarted";
}
namespace {
@@ -244,9 +245,8 @@ void V8ProfilerAgentImpl::restore() {
false)) {
bool callCount = m_state->booleanProperty(
ProfilerAgentState::preciseCoverageCallCount, false);
- bool detailed =
- m_state->booleanProperty(ProfilerAgentState::preciseCoverageDetailed,
- v8::internal::FLAG_block_coverage);
+ bool detailed = m_state->booleanProperty(
+ ProfilerAgentState::preciseCoverageDetailed, false);
startPreciseCoverage(Maybe<bool>(callCount), Maybe<bool>(detailed));
}
}
@@ -282,7 +282,7 @@ Response V8ProfilerAgentImpl::startPreciseCoverage(Maybe<bool> callCount,
Maybe<bool> detailed) {
if (!m_enabled) return Response::Error("Profiler is not enabled");
bool callCountValue = callCount.fromMaybe(false);
- bool detailedValue = detailed.fromMaybe(v8::internal::FLAG_block_coverage);
+ bool detailedValue = detailed.fromMaybe(false);
m_state->setBoolean(ProfilerAgentState::preciseCoverageStarted, true);
m_state->setBoolean(ProfilerAgentState::preciseCoverageCallCount,
callCountValue);
@@ -396,6 +396,76 @@ Response V8ProfilerAgentImpl::getBestEffortCoverage(
return coverageToProtocol(m_isolate, coverage, out_result);
}
+namespace {
+std::unique_ptr<protocol::Array<protocol::Profiler::ScriptTypeProfile>>
+typeProfileToProtocol(v8::Isolate* isolate,
+ const v8::debug::TypeProfile& type_profile) {
+ std::unique_ptr<protocol::Array<protocol::Profiler::ScriptTypeProfile>>
+ result = protocol::Array<protocol::Profiler::ScriptTypeProfile>::create();
+ for (size_t i = 0; i < type_profile.ScriptCount(); i++) {
+ v8::debug::TypeProfile::ScriptData script_data =
+ type_profile.GetScriptData(i);
+ v8::Local<v8::debug::Script> script = script_data.GetScript();
+ std::unique_ptr<protocol::Array<protocol::Profiler::TypeProfileEntry>>
+ entries =
+ protocol::Array<protocol::Profiler::TypeProfileEntry>::create();
+
+ for (const auto& entry : script_data.Entries()) {
+ std::unique_ptr<protocol::Array<protocol::Profiler::TypeObject>> types =
+ protocol::Array<protocol::Profiler::TypeObject>::create();
+ for (const auto& type : entry.Types()) {
+ types->addItem(protocol::Profiler::TypeObject::create()
+ .setName(toProtocolString(
+ type.FromMaybe(v8::Local<v8::String>())))
+ .build());
+ }
+ entries->addItem(protocol::Profiler::TypeProfileEntry::create()
+ .setOffset(entry.SourcePosition())
+ .setTypes(std::move(types))
+ .build());
+ }
+ String16 url;
+ v8::Local<v8::String> name;
+ if (script->Name().ToLocal(&name) || script->SourceURL().ToLocal(&name)) {
+ url = toProtocolString(name);
+ }
+ result->addItem(protocol::Profiler::ScriptTypeProfile::create()
+ .setScriptId(String16::fromInteger(script->Id()))
+ .setUrl(url)
+ .setEntries(std::move(entries))
+ .build());
+ }
+ return result;
+}
+} // anonymous namespace
+
+Response V8ProfilerAgentImpl::startTypeProfile() {
+ m_state->setBoolean(ProfilerAgentState::typeProfileStarted, true);
+ v8::debug::TypeProfile::SelectMode(m_isolate,
+ v8::debug::TypeProfile::kCollect);
+ return Response::OK();
+}
+
+Response V8ProfilerAgentImpl::stopTypeProfile() {
+ m_state->setBoolean(ProfilerAgentState::typeProfileStarted, false);
+ v8::debug::TypeProfile::SelectMode(m_isolate, v8::debug::TypeProfile::kNone);
+ return Response::OK();
+}
+
+Response V8ProfilerAgentImpl::takeTypeProfile(
+ std::unique_ptr<protocol::Array<protocol::Profiler::ScriptTypeProfile>>*
+ out_result) {
+ if (!m_state->booleanProperty(ProfilerAgentState::typeProfileStarted,
+ false)) {
+ return Response::Error("Type profile has not been started.");
+ }
+ v8::HandleScope handle_scope(m_isolate);
+ v8::debug::TypeProfile type_profile =
+ v8::debug::TypeProfile::Collect(m_isolate);
+ *out_result = typeProfileToProtocol(m_isolate, type_profile);
+ return Response::OK();
+}
+
String16 V8ProfilerAgentImpl::nextProfileId() {
return String16::fromInteger(
v8::base::Relaxed_AtomicIncrement(&s_lastProfileId, 1));
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.h b/deps/v8/src/inspector/v8-profiler-agent-impl.h
index 0ac80f866a..e758a900fa 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.h
@@ -48,6 +48,12 @@ class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>*
out_result) override;
+ Response startTypeProfile() override;
+ Response stopTypeProfile() override;
+ Response takeTypeProfile(
+ std::unique_ptr<protocol::Array<protocol::Profiler::ScriptTypeProfile>>*
+ out_result) override;
+
void consoleProfile(const String16& title);
void consoleProfileEnd(const String16& title);
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index bdae0ef7a1..8ecfbc5791 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -35,7 +35,6 @@
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/remote-object-id.h"
-#include "src/inspector/string-util.h"
#include "src/inspector/v8-console-message.h"
#include "src/inspector/v8-debugger-agent-impl.h"
#include "src/inspector/v8-debugger.h"
@@ -104,6 +103,97 @@ bool wrapEvaluateResultAsync(InjectedScript* injectedScript,
return false;
}
+void innerCallFunctionOn(
+ V8InspectorSessionImpl* session, InjectedScript::Scope& scope,
+ v8::Local<v8::Value> recv, const String16& expression,
+ Maybe<protocol::Array<protocol::Runtime::CallArgument>> optionalArguments,
+ bool silent, bool returnByValue, bool generatePreview, bool userGesture,
+ bool awaitPromise, const String16& objectGroup,
+ std::unique_ptr<V8RuntimeAgentImpl::CallFunctionOnCallback> callback) {
+ V8InspectorImpl* inspector = session->inspector();
+
+ std::unique_ptr<v8::Local<v8::Value>[]> argv = nullptr;
+ int argc = 0;
+ if (optionalArguments.isJust()) {
+ protocol::Array<protocol::Runtime::CallArgument>* arguments =
+ optionalArguments.fromJust();
+ argc = static_cast<int>(arguments->length());
+ argv.reset(new v8::Local<v8::Value>[argc]);
+ for (int i = 0; i < argc; ++i) {
+ v8::Local<v8::Value> argumentValue;
+ Response response = scope.injectedScript()->resolveCallArgument(
+ arguments->get(i), &argumentValue);
+ if (!response.isSuccess()) {
+ callback->sendFailure(response);
+ return;
+ }
+ argv[i] = argumentValue;
+ }
+ }
+
+ if (silent) scope.ignoreExceptionsAndMuteConsole();
+ if (userGesture) scope.pretendUserGesture();
+
+ v8::MaybeLocal<v8::Value> maybeFunctionValue;
+ v8::Local<v8::Script> functionScript;
+ if (inspector
+ ->compileScript(scope.context(), "(" + expression + ")", String16())
+ .ToLocal(&functionScript)) {
+ v8::MicrotasksScope microtasksScope(inspector->isolate(),
+ v8::MicrotasksScope::kRunMicrotasks);
+ maybeFunctionValue = functionScript->Run(scope.context());
+ }
+ // Re-initialize after running client's code, as it could have destroyed
+ // context or session.
+ Response response = scope.initialize();
+ if (!response.isSuccess()) {
+ callback->sendFailure(response);
+ return;
+ }
+
+ if (scope.tryCatch().HasCaught()) {
+ wrapEvaluateResultAsync(scope.injectedScript(), maybeFunctionValue,
+ scope.tryCatch(), objectGroup, false, false,
+ callback.get());
+ return;
+ }
+
+ v8::Local<v8::Value> functionValue;
+ if (!maybeFunctionValue.ToLocal(&functionValue) ||
+ !functionValue->IsFunction()) {
+ callback->sendFailure(
+ Response::Error("Given expression does not evaluate to a function"));
+ return;
+ }
+
+ v8::MaybeLocal<v8::Value> maybeResultValue;
+ {
+ v8::MicrotasksScope microtasksScope(inspector->isolate(),
+ v8::MicrotasksScope::kRunMicrotasks);
+ maybeResultValue = functionValue.As<v8::Function>()->Call(
+ scope.context(), recv, argc, argv.get());
+ }
+ // Re-initialize after running client's code, as it could have destroyed
+ // context or session.
+ response = scope.initialize();
+ if (!response.isSuccess()) {
+ callback->sendFailure(response);
+ return;
+ }
+
+ if (!awaitPromise || scope.tryCatch().HasCaught()) {
+ wrapEvaluateResultAsync(scope.injectedScript(), maybeResultValue,
+ scope.tryCatch(), objectGroup, returnByValue,
+ generatePreview, callback.get());
+ return;
+ }
+
+ scope.injectedScript()->addPromiseCallback(
+ session, maybeResultValue, objectGroup, returnByValue, generatePreview,
+ EvaluateCallbackWrapper<V8RuntimeAgentImpl::CallFunctionOnCallback>::wrap(
+ std::move(callback)));
+}
+
Response ensureContext(V8InspectorImpl* inspector, int contextGroupId,
Maybe<int> executionContextId, int* contextId) {
if (executionContextId.isJust()) {
@@ -218,100 +308,59 @@ void V8RuntimeAgentImpl::awaitPromise(
}
void V8RuntimeAgentImpl::callFunctionOn(
- const String16& objectId, const String16& expression,
+ const String16& expression, Maybe<String16> objectId,
Maybe<protocol::Array<protocol::Runtime::CallArgument>> optionalArguments,
Maybe<bool> silent, Maybe<bool> returnByValue, Maybe<bool> generatePreview,
Maybe<bool> userGesture, Maybe<bool> awaitPromise,
+ Maybe<int> executionContextId, Maybe<String16> objectGroup,
std::unique_ptr<CallFunctionOnCallback> callback) {
- InjectedScript::ObjectScope scope(m_session, objectId);
- Response response = scope.initialize();
- if (!response.isSuccess()) {
- callback->sendFailure(response);
- return;
- }
-
- std::unique_ptr<v8::Local<v8::Value>[]> argv = nullptr;
- int argc = 0;
- if (optionalArguments.isJust()) {
- protocol::Array<protocol::Runtime::CallArgument>* arguments =
- optionalArguments.fromJust();
- argc = static_cast<int>(arguments->length());
- argv.reset(new v8::Local<v8::Value>[argc]);
- for (int i = 0; i < argc; ++i) {
- v8::Local<v8::Value> argumentValue;
- response = scope.injectedScript()->resolveCallArgument(arguments->get(i),
- &argumentValue);
- if (!response.isSuccess()) {
- callback->sendFailure(response);
- return;
- }
- argv[i] = argumentValue;
- }
- }
-
- if (silent.fromMaybe(false)) scope.ignoreExceptionsAndMuteConsole();
- if (userGesture.fromMaybe(false)) scope.pretendUserGesture();
-
- v8::MaybeLocal<v8::Value> maybeFunctionValue;
- v8::Local<v8::Script> functionScript;
- if (m_inspector
- ->compileScript(scope.context(), "(" + expression + ")", String16())
- .ToLocal(&functionScript)) {
- v8::MicrotasksScope microtasksScope(m_inspector->isolate(),
- v8::MicrotasksScope::kRunMicrotasks);
- maybeFunctionValue = functionScript->Run(scope.context());
- }
- // Re-initialize after running client's code, as it could have destroyed
- // context or session.
- response = scope.initialize();
- if (!response.isSuccess()) {
- callback->sendFailure(response);
- return;
- }
-
- if (scope.tryCatch().HasCaught()) {
- wrapEvaluateResultAsync(scope.injectedScript(), maybeFunctionValue,
- scope.tryCatch(), scope.objectGroupName(), false,
- false, callback.get());
+ if (objectId.isJust() && executionContextId.isJust()) {
+ callback->sendFailure(Response::Error(
+ "ObjectId must not be specified together with executionContextId"));
return;
}
-
- v8::Local<v8::Value> functionValue;
- if (!maybeFunctionValue.ToLocal(&functionValue) ||
- !functionValue->IsFunction()) {
- callback->sendFailure(
- Response::Error("Given expression does not evaluate to a function"));
+ if (!objectId.isJust() && !executionContextId.isJust()) {
+ callback->sendFailure(Response::Error(
+ "Either ObjectId or executionContextId must be specified"));
return;
}
-
- v8::MaybeLocal<v8::Value> maybeResultValue;
- {
- v8::MicrotasksScope microtasksScope(m_inspector->isolate(),
- v8::MicrotasksScope::kRunMicrotasks);
- maybeResultValue = functionValue.As<v8::Function>()->Call(
- scope.context(), scope.object(), argc, argv.get());
- }
- // Re-initialize after running client's code, as it could have destroyed
- // context or session.
- response = scope.initialize();
- if (!response.isSuccess()) {
- callback->sendFailure(response);
- return;
- }
-
- if (!awaitPromise.fromMaybe(false) || scope.tryCatch().HasCaught()) {
- wrapEvaluateResultAsync(scope.injectedScript(), maybeResultValue,
- scope.tryCatch(), scope.objectGroupName(),
- returnByValue.fromMaybe(false),
- generatePreview.fromMaybe(false), callback.get());
- return;
+ if (objectId.isJust()) {
+ InjectedScript::ObjectScope scope(m_session, objectId.fromJust());
+ Response response = scope.initialize();
+ if (!response.isSuccess()) {
+ callback->sendFailure(response);
+ return;
+ }
+ innerCallFunctionOn(
+ m_session, scope, scope.object(), expression,
+ std::move(optionalArguments), silent.fromMaybe(false),
+ returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
+ userGesture.fromMaybe(false), awaitPromise.fromMaybe(false),
+ objectGroup.isJust() ? objectGroup.fromMaybe(String16())
+ : scope.objectGroupName(),
+ std::move(callback));
+ } else {
+ int contextId = 0;
+ Response response =
+ ensureContext(m_inspector, m_session->contextGroupId(),
+ std::move(executionContextId.fromJust()), &contextId);
+ if (!response.isSuccess()) {
+ callback->sendFailure(response);
+ return;
+ }
+ InjectedScript::ContextScope scope(m_session, contextId);
+ response = scope.initialize();
+ if (!response.isSuccess()) {
+ callback->sendFailure(response);
+ return;
+ }
+ innerCallFunctionOn(
+ m_session, scope, scope.context()->Global(), expression,
+ std::move(optionalArguments), silent.fromMaybe(false),
+ returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
+ userGesture.fromMaybe(false), awaitPromise.fromMaybe(false),
+ objectGroup.fromMaybe(""), std::move(callback));
}
-
- scope.injectedScript()->addPromiseCallback(
- m_session, maybeResultValue, scope.objectGroupName(),
- returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
- EvaluateCallbackWrapper<CallFunctionOnCallback>::wrap(
- std::move(callback)));
}
Response V8RuntimeAgentImpl::getProperties(
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.h b/deps/v8/src/inspector/v8-runtime-agent-impl.h
index 6f3b98cf44..1d5067b560 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.h
@@ -69,11 +69,12 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
Maybe<bool> generatePreview,
std::unique_ptr<AwaitPromiseCallback>) override;
void callFunctionOn(
- const String16& objectId, const String16& expression,
+ const String16& expression, Maybe<String16> objectId,
Maybe<protocol::Array<protocol::Runtime::CallArgument>> optionalArguments,
Maybe<bool> silent, Maybe<bool> returnByValue,
Maybe<bool> generatePreview, Maybe<bool> userGesture,
- Maybe<bool> awaitPromise,
+ Maybe<bool> awaitPromise, Maybe<int> executionContextId,
+ Maybe<String16> objectGroup,
std::unique_ptr<CallFunctionOnCallback>) override;
Response releaseObject(const String16& objectId) override;
Response getProperties(
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index 9db6b47caf..12ddd96b58 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -96,8 +96,8 @@ StackFrame::StackFrame(v8::Local<v8::StackFrame> v8Frame)
m_sourceURL(toProtocolString(v8Frame->GetScriptNameOrSourceURL())),
m_lineNumber(v8Frame->GetLineNumber() - 1),
m_columnNumber(v8Frame->GetColumn() - 1) {
- DCHECK(m_lineNumber + 1 != v8::Message::kNoLineNumberInfo);
- DCHECK(m_columnNumber + 1 != v8::Message::kNoColumnInfo);
+ DCHECK_NE(v8::Message::kNoLineNumberInfo, m_lineNumber + 1);
+ DCHECK_NE(v8::Message::kNoColumnInfo, m_columnNumber + 1);
}
void StackFrame::translate(WasmTranslation* wasmTranslation) {
diff --git a/deps/v8/src/inspector/wasm-translation.cc b/deps/v8/src/inspector/wasm-translation.cc
index 6d763e83d5..431573d842 100644
--- a/deps/v8/src/inspector/wasm-translation.cc
+++ b/deps/v8/src/inspector/wasm-translation.cc
@@ -7,27 +7,13 @@
#include <algorithm>
#include "src/debug/debug-interface.h"
-#include "src/inspector/protocol/Debugger.h"
-#include "src/inspector/script-breakpoint.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-debugger-agent-impl.h"
#include "src/inspector/v8-debugger-script.h"
#include "src/inspector/v8-debugger.h"
#include "src/inspector/v8-inspector-impl.h"
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
-
-using namespace v8_inspector;
-using namespace v8;
-
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic pop
-#endif
+namespace v8_inspector {
class WasmTranslation::TranslatorImpl {
public:
@@ -44,7 +30,7 @@ class WasmTranslation::TranslatorImpl {
column(column) {}
};
- virtual void Init(Isolate*, WasmTranslation*, V8DebuggerAgentImpl*) = 0;
+ virtual void Init(v8::Isolate*, WasmTranslation*, V8DebuggerAgentImpl*) = 0;
virtual void Translate(TransLocation*) = 0;
virtual void TranslateBack(TransLocation*) = 0;
virtual ~TranslatorImpl() {}
@@ -56,23 +42,24 @@ class WasmTranslation::TranslatorImpl {
class WasmTranslation::TranslatorImpl::RawTranslator
: public WasmTranslation::TranslatorImpl {
public:
- void Init(Isolate*, WasmTranslation*, V8DebuggerAgentImpl*) {}
+ void Init(v8::Isolate*, WasmTranslation*, V8DebuggerAgentImpl*) {}
void Translate(TransLocation*) {}
void TranslateBack(TransLocation*) {}
};
class WasmTranslation::TranslatorImpl::DisassemblingTranslator
: public WasmTranslation::TranslatorImpl {
- using OffsetTable = debug::WasmDisassembly::OffsetTable;
+ using OffsetTable = v8::debug::WasmDisassembly::OffsetTable;
public:
- DisassemblingTranslator(Isolate* isolate, Local<debug::WasmScript> script)
+ DisassemblingTranslator(v8::Isolate* isolate,
+ v8::Local<v8::debug::WasmScript> script)
: script_(isolate, script) {}
- void Init(Isolate* isolate, WasmTranslation* translation,
+ void Init(v8::Isolate* isolate, WasmTranslation* translation,
V8DebuggerAgentImpl* agent) override {
// Register fake scripts for each function in this wasm module/script.
- Local<debug::WasmScript> script = script_.Get(isolate);
+ v8::Local<v8::debug::WasmScript> script = script_.Get(isolate);
int num_functions = script->NumFunctions();
int num_imported_functions = script->NumImportedFunctions();
DCHECK_LE(0, num_imported_functions);
@@ -163,7 +150,7 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
private:
String16 GetFakeScriptUrl(v8::Isolate* isolate, int func_index) {
- Local<debug::WasmScript> script = script_.Get(isolate);
+ v8::Local<v8::debug::WasmScript> script = script_.Get(isolate);
String16 script_name = toProtocolString(script->Name().ToLocalChecked());
int numFunctions = script->NumFunctions();
int numImported = script->NumImportedFunctions();
@@ -195,9 +182,10 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
String16 fake_script_id = GetFakeScriptId(underlyingScriptId, func_idx);
String16 fake_script_url = GetFakeScriptUrl(isolate, func_idx);
- v8::Local<debug::WasmScript> script = script_.Get(isolate);
+ v8::Local<v8::debug::WasmScript> script = script_.Get(isolate);
// TODO(clemensh): Generate disassembly lazily when queried by the frontend.
- debug::WasmDisassembly disassembly = script->DisassembleFunction(func_idx);
+ v8::debug::WasmDisassembly disassembly =
+ script->DisassembleFunction(func_idx);
DCHECK_EQ(0, offset_tables_.count(func_idx));
offset_tables_.insert(
@@ -254,7 +242,7 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
return &inserted.first->second;
}
- Global<debug::WasmScript> script_;
+ v8::Global<v8::debug::WasmScript> script_;
// We assume to only disassemble a subset of the functions, so store them in a
// map instead of an array.
@@ -267,7 +255,7 @@ WasmTranslation::WasmTranslation(v8::Isolate* isolate)
WasmTranslation::~WasmTranslation() { Clear(); }
-void WasmTranslation::AddScript(Local<debug::WasmScript> script,
+void WasmTranslation::AddScript(v8::Local<v8::debug::WasmScript> script,
V8DebuggerAgentImpl* agent) {
std::unique_ptr<TranslatorImpl> impl;
switch (mode_) {
@@ -338,3 +326,5 @@ void WasmTranslation::AddFakeScript(const String16& scriptId,
DCHECK_EQ(0, fake_scripts_.count(scriptId));
fake_scripts_.insert(std::make_pair(scriptId, translator));
}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index b0cafc48e2..94ad2efc72 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -15,7 +15,7 @@ void CallInterfaceDescriptorData::InitializePlatformSpecific(
register_param_count_ = register_parameter_count;
// InterfaceDescriptor owns a copy of the registers array.
- register_params_.reset(NewArray<Register>(register_parameter_count));
+ register_params_.reset(NewArray<Register>(register_parameter_count, no_reg));
for (int i = 0; i < register_parameter_count; i++) {
register_params_[i] = registers[i];
}
@@ -101,8 +101,9 @@ const Register FastNewArgumentsDescriptor::TargetRegister() {
void RecordWriteDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
MachineType machine_types[] = {MachineType::TaggedPointer(),
- MachineType::Pointer(),
- MachineType::Pointer()};
+ MachineType::Pointer(), MachineType::Pointer(),
+ MachineType::TaggedSigned(),
+ MachineType::TaggedSigned()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
@@ -386,8 +387,9 @@ void GrowArrayElementsDescriptor::InitializePlatformSpecific(
void NewArgumentsElementsDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
- // kFrame, kLength
+ // kFrame, kLength, kMappedCount
MachineType const kMachineTypes[] = {MachineType::Pointer(),
+ MachineType::TaggedSigned(),
MachineType::TaggedSigned()};
data->InitializePlatformIndependent(arraysize(kMachineTypes), 0,
kMachineTypes);
@@ -395,27 +397,7 @@ void NewArgumentsElementsDescriptor::InitializePlatformIndependent(
void NewArgumentsElementsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 2);
-}
-
-void FastCloneRegExpDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kClosure, kLiteralIndex, kPattern, kFlags
- MachineType machine_types[] = {
- MachineType::AnyTagged(), MachineType::TaggedSigned(),
- MachineType::AnyTagged(), MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void FastCloneShallowArrayDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kClosure, kLiteralIndex, kConstantElements
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::TaggedSigned(),
- MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
+ DefaultInitializePlatformSpecific(data, 3);
}
void CallTrampolineDescriptor::InitializePlatformIndependent(
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index 7b67e795b4..e6cdf7fdd2 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -37,9 +37,6 @@ class PlatformInterfaceDescriptor;
V(TypeConversion) \
V(TypeConversionStackParameter) \
V(Typeof) \
- V(FastCloneRegExp) \
- V(FastCloneShallowArray) \
- V(FastCloneShallowObject) \
V(CallFunction) \
V(CallVarargs) \
V(CallForwardVarargs) \
@@ -343,7 +340,7 @@ static const int kMaxBuiltinRegisterParams = 5;
kStackParameterCount = kArity + 1 \
};
-class VoidDescriptor : public CallInterfaceDescriptor {
+class V8_EXPORT_PRIVATE VoidDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor)
};
@@ -519,7 +516,7 @@ class FastNewArgumentsDescriptor : public CallInterfaceDescriptor {
class RecordWriteDescriptor final : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kObject, kSlot, kIsolate)
+ DEFINE_PARAMETERS(kObject, kSlot, kIsolate, kRememberedSet, kFPMode)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(RecordWriteDescriptor,
CallInterfaceDescriptor)
};
@@ -560,29 +557,6 @@ class TypeofDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(TypeofDescriptor, CallInterfaceDescriptor)
};
-
-class FastCloneRegExpDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kClosure, kLiteralIndex, kPattern, kFlags)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FastCloneRegExpDescriptor,
- CallInterfaceDescriptor)
-};
-
-
-class FastCloneShallowArrayDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kClosure, kLiteralIndex, kConstantElements)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FastCloneShallowArrayDescriptor,
- CallInterfaceDescriptor)
-};
-
-
-class FastCloneShallowObjectDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kClosure, kLiteralIndex, kBoilerplateDescription, kFlags)
- DECLARE_DESCRIPTOR(FastCloneShallowObjectDescriptor, CallInterfaceDescriptor)
-};
-
class CallTrampolineDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kFunction, kActualArgumentsCount)
@@ -834,7 +808,7 @@ class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
class NewArgumentsElementsDescriptor final : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kFrame, kLength)
+ DEFINE_PARAMETERS(kFrame, kLength, kMappedCount)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(NewArgumentsElementsDescriptor,
CallInterfaceDescriptor)
};
diff --git a/deps/v8/src/interpreter/block-coverage-builder.h b/deps/v8/src/interpreter/block-coverage-builder.h
index dc1b4d704b..150d46c49c 100644
--- a/deps/v8/src/interpreter/block-coverage-builder.h
+++ b/deps/v8/src/interpreter/block-coverage-builder.h
@@ -29,7 +29,7 @@ class BlockCoverageBuilder final : public ZoneObject {
static constexpr int kNoCoverageArraySlot = -1;
- int AllocateBlockCoverageSlot(AstNode* node, SourceRangeKind kind) {
+ int AllocateBlockCoverageSlot(ZoneObject* node, SourceRangeKind kind) {
AstNodeSourceRanges* ranges = source_range_map_->Find(node);
if (ranges == nullptr) return kNoCoverageArraySlot;
@@ -46,7 +46,7 @@ class BlockCoverageBuilder final : public ZoneObject {
builder_->IncBlockCounter(coverage_array_slot);
}
- void IncrementBlockCounter(AstNode* node, SourceRangeKind kind) {
+ void IncrementBlockCounter(ZoneObject* node, SourceRangeKind kind) {
int slot = AllocateBlockCoverageSlot(node, kind);
IncrementBlockCounter(slot);
}
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index f597981514..64b7a219c0 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -159,6 +159,14 @@ Runtime::FunctionId BytecodeArrayAccessor::GetRuntimeIdOperand(
return static_cast<Runtime::FunctionId>(raw_id);
}
+uint32_t BytecodeArrayAccessor::GetNativeContextIndexOperand(
+ int operand_index) const {
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK(operand_type == OperandType::kNativeContextIndex);
+ return GetUnsignedOperand(operand_index, operand_type);
+}
+
Runtime::FunctionId BytecodeArrayAccessor::GetIntrinsicIdOperand(
int operand_index) const {
OperandType operand_type =
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index e465a5c881..42185feeca 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -85,6 +85,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
int GetRegisterOperandRange(int operand_index) const;
Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
+ uint32_t GetNativeContextIndexOperand(int operand_index) const;
Handle<Object> GetConstantAtIndex(int offset) const;
Handle<Object> GetConstantForIndexOperand(int operand_index) const;
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index a5cf2b0370..09789fe8b2 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -428,13 +428,26 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperationSmiLiteral(
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op,
+BytecodeArrayBuilder& BytecodeArrayBuilder::UnaryOperation(Token::Value op,
int feedback_slot) {
- if (op == Token::Value::ADD) {
- OutputInc(feedback_slot);
- } else {
- DCHECK_EQ(op, Token::Value::SUB);
- OutputDec(feedback_slot);
+ switch (op) {
+ case Token::Value::INC:
+ OutputInc(feedback_slot);
+ break;
+ case Token::Value::DEC:
+ OutputDec(feedback_slot);
+ break;
+ case Token::Value::ADD:
+ OutputToNumber(feedback_slot);
+ break;
+ case Token::Value::SUB:
+ OutputNegate(feedback_slot);
+ break;
+ case Token::Value::BIT_NOT:
+ OutputBitwiseNot(feedback_slot);
+ break;
+ default:
+ UNREACHABLE();
}
return *this;
}
@@ -821,7 +834,6 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreDataPropertyInLiteral(
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CollectTypeProfile(int position) {
- DCHECK(FLAG_type_profile);
OutputCollectTypeProfile(position);
return *this;
}
@@ -829,20 +841,17 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CollectTypeProfile(int position) {
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
Register object, size_t name_index, int feedback_slot,
LanguageMode language_mode) {
+#if DEBUG
// Ensure that language mode is in sync with the IC slot kind if the function
// literal is available (not a unit test case).
- // TODO(ishell): check only in debug mode.
if (literal_) {
FeedbackSlot slot = FeedbackVector::ToSlot(feedback_slot);
- CHECK_EQ(GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
- language_mode);
- }
- if (language_mode == SLOPPY) {
- OutputStaNamedPropertySloppy(object, name_index, feedback_slot);
- } else {
- DCHECK_EQ(language_mode, STRICT);
- OutputStaNamedPropertyStrict(object, name_index, feedback_slot);
+ DCHECK_EQ(
+ GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
+ language_mode);
}
+#endif
+ OutputStaNamedProperty(object, name_index, feedback_slot);
return *this;
}
@@ -856,14 +865,15 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedOwnProperty(
Register object, const AstRawString* name, int feedback_slot) {
size_t name_index = GetConstantPoolEntry(name);
+#if DEBUG
// Ensure that the store operation is in sync with the IC slot kind if
// the function literal is available (not a unit test case).
- // TODO(ishell): check only in debug mode.
if (literal_) {
FeedbackSlot slot = FeedbackVector::ToSlot(feedback_slot);
- CHECK_EQ(FeedbackSlotKind::kStoreOwnNamed,
- feedback_vector_spec()->GetKind(slot));
+ DCHECK_EQ(FeedbackSlotKind::kStoreOwnNamed,
+ feedback_vector_spec()->GetKind(slot));
}
+#endif
OutputStaNamedOwnProperty(object, name_index, feedback_slot);
return *this;
}
@@ -871,20 +881,17 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedOwnProperty(
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
Register object, Register key, int feedback_slot,
LanguageMode language_mode) {
+#if DEBUG
// Ensure that language mode is in sync with the IC slot kind if the function
// literal is available (not a unit test case).
- // TODO(ishell): check only in debug mode.
if (literal_) {
FeedbackSlot slot = FeedbackVector::ToSlot(feedback_slot);
- CHECK_EQ(GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
- language_mode);
- }
- if (language_mode == SLOPPY) {
- OutputStaKeyedPropertySloppy(object, key, feedback_slot);
- } else {
- DCHECK_EQ(language_mode, STRICT);
- OutputStaKeyedPropertyStrict(object, key, feedback_slot);
+ DCHECK_EQ(
+ GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
+ language_mode);
}
+#endif
+ OutputStaKeyedProperty(object, key, feedback_slot);
return *this;
}
@@ -982,6 +989,12 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateEmptyObjectLiteral() {
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::GetTemplateObject(
+ size_t template_object_description_entry) {
+ OutputGetTemplateObject(template_object_description_entry);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
OutputPushContext(context);
return *this;
@@ -1002,9 +1015,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ToName(Register out) {
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::ToNumber(Register out,
- int feedback_slot) {
- OutputToNumber(out, feedback_slot);
+BytecodeArrayBuilder& BytecodeArrayBuilder::ToNumber(int feedback_slot) {
+ OutputToNumber(feedback_slot);
return *this;
}
@@ -1180,6 +1192,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ReThrow() {
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::Abort(BailoutReason reason) {
+ OutputAbort(reason);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
OutputReturn();
return_seen_in_block_ = true;
@@ -1214,10 +1231,15 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::IncBlockCounter(
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInEnumerate(Register receiver) {
+ OutputForInEnumerate(receiver);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
- Register receiver, RegisterList cache_info_triple) {
+ RegisterList cache_info_triple, int feedback_slot) {
DCHECK_EQ(3, cache_info_triple.register_count());
- OutputForInPrepare(receiver, cache_info_triple);
+ OutputForInPrepare(cache_info_triple, feedback_slot);
return *this;
}
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 664599b907..e536b98cda 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -227,6 +227,11 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
Register output);
BytecodeArrayBuilder& CreateEmptyObjectLiteral();
+ // Gets or creates the template for a TemplateObjectDescription which will
+ // be inserted at constant pool index |template_object_description_entry|.
+ BytecodeArrayBuilder& GetTemplateObject(
+ size_t template_object_description_entry);
+
// Push the context in accumulator as the new context, and store in register
// |context|.
BytecodeArrayBuilder& PushContext(Register context);
@@ -304,20 +309,22 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
Register arg,
RegisterList return_pair);
- // Call the JS runtime function with |context_index| and arguments |args|.
+ // Call the JS runtime function with |context_index| and arguments |args|,
+ // with no receiver as it is implicitly set to undefined.
BytecodeArrayBuilder& CallJSRuntime(int context_index, RegisterList args);
// Operators (register holds the lhs value, accumulator holds the rhs value).
// Type feedback will be recorded in the |feedback_slot|
BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg,
int feedback_slot);
+ // Same as above, but lhs in the accumulator and rhs in |literal|.
BytecodeArrayBuilder& BinaryOperationSmiLiteral(Token::Value binop,
Smi* literal,
int feedback_slot);
- // Count Operators (value stored in accumulator).
+ // Unary and Count Operators (value stored in accumulator).
// Type feedback will be recorded in the |feedback_slot|
- BytecodeArrayBuilder& CountOperation(Token::Value op, int feedback_slot);
+ BytecodeArrayBuilder& UnaryOperation(Token::Value op, int feedback_slot);
enum class ToBooleanMode {
kConvertToBoolean, // Perform ToBoolean conversion on accumulator.
@@ -351,7 +358,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Converts accumulator and stores result in register |out|.
BytecodeArrayBuilder& ToObject(Register out);
BytecodeArrayBuilder& ToName(Register out);
- BytecodeArrayBuilder& ToNumber(Register out, int feedback_slot);
+
+ // Converts accumulator and stores result back in accumulator.
+ BytecodeArrayBuilder& ToNumber(int feedback_slot);
// Flow Control.
BytecodeArrayBuilder& Bind(BytecodeLabel* label);
@@ -384,6 +393,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
BytecodeArrayBuilder& Throw();
BytecodeArrayBuilder& ReThrow();
+ BytecodeArrayBuilder& Abort(BailoutReason reason);
BytecodeArrayBuilder& Return();
BytecodeArrayBuilder& ThrowReferenceErrorIfHole(const AstRawString* name);
BytecodeArrayBuilder& ThrowSuperNotCalledIfHole();
@@ -396,8 +406,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
BytecodeArrayBuilder& IncBlockCounter(int slot);
// Complex flow control.
- BytecodeArrayBuilder& ForInPrepare(Register receiver,
- RegisterList cache_info_triple);
+ BytecodeArrayBuilder& ForInEnumerate(Register receiver);
+ BytecodeArrayBuilder& ForInPrepare(RegisterList cache_info_triple,
+ int feedback_slot);
BytecodeArrayBuilder& ForInContinue(Register index, Register cache_length);
BytecodeArrayBuilder& ForInNext(Register receiver, Register index,
RegisterList cache_type_array_pair,
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 67a52dcdcb..e467e1d527 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -153,6 +153,7 @@ void BytecodeArrayWriter::UpdateExitSeenInBlock(Bytecode bytecode) {
case Bytecode::kReturn:
case Bytecode::kThrow:
case Bytecode::kReThrow:
+ case Bytecode::kAbort:
case Bytecode::kJump:
case Bytecode::kJumpConstant:
exit_seen_in_block_ = true;
diff --git a/deps/v8/src/interpreter/bytecode-decoder.cc b/deps/v8/src/interpreter/bytecode-decoder.cc
index d5b64629f7..fb2ae376ea 100644
--- a/deps/v8/src/interpreter/bytecode-decoder.cc
+++ b/deps/v8/src/interpreter/bytecode-decoder.cc
@@ -6,6 +6,7 @@
#include <iomanip>
+#include "src/contexts.h"
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects-inl.h"
@@ -69,6 +70,7 @@ uint32_t BytecodeDecoder::DecodeUnsignedOperand(const uint8_t* operand_start,
}
namespace {
+
const char* NameForRuntimeId(uint32_t idx) {
switch (idx) {
#define CASE(name, nargs, ressize) \
@@ -82,6 +84,19 @@ const char* NameForRuntimeId(uint32_t idx) {
UNREACHABLE();
}
}
+
+const char* NameForNativeContextIndex(uint32_t idx) {
+ switch (idx) {
+#define CASE(index_name, type, name) \
+ case Context::index_name: \
+ return #name;
+ NATIVE_CONTEXT_FIELDS(CASE)
+#undef CASE
+ default:
+ UNREACHABLE();
+ }
+}
+
} // anonymous namespace
// static
@@ -139,6 +154,11 @@ std::ostream& BytecodeDecoder::Decode(std::ostream& os,
os << "[" << NameForRuntimeId(IntrinsicsHelper::ToRuntimeId(id)) << "]";
break;
}
+ case interpreter::OperandType::kNativeContextIndex: {
+ auto id = DecodeUnsignedOperand(operand_start, op_type, operand_scale);
+ os << "[" << NameForNativeContextIndex(id) << "]";
+ break;
+ }
case interpreter::OperandType::kRuntimeId:
os << "[" << NameForRuntimeId(DecodeUnsignedOperand(
operand_start, op_type, operand_scale))
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index f9e3664cfc..ac5367b7e5 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -17,6 +17,7 @@
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/control-flow-builders.h"
#include "src/objects-inl.h"
+#include "src/objects/debug-objects.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/token.h"
@@ -677,7 +678,8 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
declarations_.push_back(Declaration(name, slot, nullptr));
}
- Handle<FixedArray> AllocateDeclarations(CompilationInfo* info) {
+ Handle<FixedArray> AllocateDeclarations(CompilationInfo* info,
+ Handle<Script> script) {
DCHECK(has_constant_pool_entry_);
int array_index = 0;
Handle<FixedArray> data = info->isolate()->factory()->NewFixedArray(
@@ -688,8 +690,8 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
if (func == nullptr) {
initial_value = info->isolate()->factory()->undefined_value();
} else {
- initial_value = Compiler::GetSharedFunctionInfo(func, info->script(),
- info->isolate());
+ initial_value =
+ Compiler::GetSharedFunctionInfo(func, script, info->isolate());
}
// Return a null handle if any initial values can't be created. Caller
@@ -785,6 +787,7 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
native_function_literals_(0, info->zone()),
object_literals_(0, info->zone()),
array_literals_(0, info->zone()),
+ template_objects_(0, info->zone()),
execution_control_(nullptr),
execution_context_(nullptr),
execution_result_(nullptr),
@@ -795,16 +798,16 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
catch_prediction_(HandlerTable::UNCAUGHT) {
DCHECK_EQ(closure_scope(), closure_scope()->GetClosureScope());
if (info->has_source_range_map()) {
- DCHECK(FLAG_block_coverage);
block_coverage_builder_ = new (zone())
BlockCoverageBuilder(zone(), builder(), info->source_range_map());
}
}
-Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(Isolate* isolate) {
+Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
+ Isolate* isolate, Handle<Script> script) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
- AllocateDeferredConstants(isolate);
+ AllocateDeferredConstants(isolate, script);
if (block_coverage_builder_) {
info()->set_coverage_info(
@@ -825,11 +828,12 @@ Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(Isolate* isolate) {
return bytecode_array;
}
-void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate) {
+void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate,
+ Handle<Script> script) {
// Build global declaration pair arrays.
for (GlobalDeclarationsBuilder* globals_builder : global_declarations_) {
Handle<FixedArray> declarations =
- globals_builder->AllocateDeclarations(info());
+ globals_builder->AllocateDeclarations(info(), script);
if (declarations.is_null()) return SetStackOverflow();
builder()->SetDeferredConstantPoolEntry(
globals_builder->constant_pool_entry(), declarations);
@@ -839,7 +843,7 @@ void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate) {
for (std::pair<FunctionLiteral*, size_t> literal : function_literals_) {
FunctionLiteral* expr = literal.first;
Handle<SharedFunctionInfo> shared_info =
- Compiler::GetSharedFunctionInfo(expr, info()->script(), isolate);
+ Compiler::GetSharedFunctionInfo(expr, script, isolate);
if (shared_info.is_null()) return SetStackOverflow();
builder()->SetDeferredConstantPoolEntry(literal.second, shared_info);
}
@@ -876,6 +880,14 @@ void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate) {
array_literal->GetOrBuildConstantElements(isolate);
builder()->SetDeferredConstantPoolEntry(literal.second, constant_elements);
}
+
+ // Build template literals.
+ for (std::pair<GetTemplateObject*, size_t> literal : template_objects_) {
+ GetTemplateObject* get_template_object = literal.first;
+ Handle<TemplateObjectDescription> description =
+ get_template_object->GetOrBuildDescription(isolate);
+ builder()->SetDeferredConstantPoolEntry(literal.second, description);
+ }
}
void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
@@ -1025,7 +1037,7 @@ void BytecodeGenerator::VisitIterationHeader(int first_suspend_id,
.JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &not_resuming);
// Otherwise this is an error.
- BuildAbort(BailoutReason::kInvalidJumpTableIndex);
+ builder()->Abort(BailoutReason::kInvalidJumpTableIndex);
builder()->Bind(&not_resuming);
}
@@ -1057,7 +1069,7 @@ void BytecodeGenerator::BuildGeneratorPrologue() {
}
// We fall through when the generator state is not in the jump table.
// TODO(leszeks): Only generate this for debug builds.
- BuildAbort(BailoutReason::kInvalidJumpTableIndex);
+ builder()->Abort(BailoutReason::kInvalidJumpTableIndex);
// This is a regular call.
builder()
@@ -1368,11 +1380,6 @@ void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
}
}
-void BytecodeGenerator::VisitCaseClause(CaseClause* clause) {
- // Handled entirely in VisitSwitchStatement.
- UNREACHABLE();
-}
-
void BytecodeGenerator::VisitIterationBody(IterationStatement* stmt,
LoopBuilder* loop_builder) {
loop_builder->LoopBody();
@@ -1525,6 +1532,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
BytecodeLabel subject_null_label, subject_undefined_label;
+ FeedbackSlot slot = stmt->ForInFeedbackSlot();
// Prepare the state for executing ForIn.
builder()->SetExpressionAsStatementPosition(stmt->subject());
@@ -1537,7 +1545,8 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Used as kRegTriple and kRegPair in ForInPrepare and ForInNext.
RegisterList triple = register_allocator()->NewRegisterList(3);
Register cache_length = triple[2];
- builder()->ForInPrepare(receiver, triple);
+ builder()->ForInEnumerate(receiver);
+ builder()->ForInPrepare(triple, feedback_index(slot));
// Set up loop counter
Register index = register_allocator()->NewRegister();
@@ -1551,7 +1560,6 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
builder()->SetExpressionAsStatementPosition(stmt->each());
builder()->ForInContinue(index, cache_length);
loop_builder.BreakIfFalse(ToBooleanMode::kAlreadyBoolean);
- FeedbackSlot slot = stmt->ForInFeedbackSlot();
builder()->ForInNext(receiver, index, triple.Truncate(2),
feedback_index(slot));
loop_builder.ContinueIfUndefined();
@@ -1738,12 +1746,11 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
BuildClassLiteralNameProperty(expr, constructor);
builder()->CallRuntime(Runtime::kToFastProperties, constructor);
// Assign to class variable.
- if (expr->class_variable_proxy() != nullptr) {
- VariableProxy* proxy = expr->class_variable_proxy();
- FeedbackSlot slot =
- expr->NeedsProxySlot() ? expr->ProxySlot() : FeedbackSlot::Invalid();
- BuildVariableAssignment(proxy->var(), Token::INIT, slot,
- HoleCheckMode::kElided);
+ if (expr->class_variable() != nullptr) {
+ DCHECK(expr->class_variable()->IsStackLocal() ||
+ expr->class_variable()->IsContextSlot());
+ BuildVariableAssignment(expr->class_variable(), Token::INIT,
+ FeedbackSlot::Invalid(), HoleCheckMode::kElided);
}
}
@@ -1928,7 +1935,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
object_literals_.push_back(std::make_pair(expr, entry));
}
// TODO(cbruni): Directly generate runtime call for literals we cannot
- // optimize once the FastCloneShallowObject stub is in sync with the TF
+ // optimize once the CreateShallowObjectLiteral stub is in sync with the TF
// optimizations.
builder()->CreateObjectLiteral(entry, literal_index, flags, literal);
@@ -2303,10 +2310,9 @@ void BytecodeGenerator::BuildAsyncReturn(int source_position) {
.CallRuntime(Runtime::kInlineAsyncGeneratorResolve, args);
} else {
DCHECK(IsAsyncFunction(info()->literal()->kind()));
- RegisterList args = register_allocator()->NewRegisterList(3);
- Register receiver = args[0];
- Register promise = args[1];
- Register return_value = args[2];
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ Register promise = args[0];
+ Register return_value = args[1];
builder()->StoreAccumulatorInRegister(return_value);
Variable* var_promise = closure_scope()->promise_var();
@@ -2315,8 +2321,6 @@ void BytecodeGenerator::BuildAsyncReturn(int source_position) {
HoleCheckMode::kElided);
builder()
->StoreAccumulatorInRegister(promise)
- .LoadUndefined()
- .StoreAccumulatorInRegister(receiver)
.CallJSRuntime(Context::PROMISE_RESOLVE_INDEX, args)
.LoadAccumulatorWithRegister(promise);
}
@@ -2326,16 +2330,6 @@ void BytecodeGenerator::BuildAsyncReturn(int source_position) {
void BytecodeGenerator::BuildReThrow() { builder()->ReThrow(); }
-void BytecodeGenerator::BuildAbort(BailoutReason bailout_reason) {
- RegisterAllocationScope register_scope(this);
- Register reason = register_allocator()->NewRegister();
- builder()
- ->LoadLiteral(Smi::FromInt(static_cast<int>(bailout_reason)))
- .StoreAccumulatorInRegister(reason)
- .CallRuntime(Runtime::kAbort, reason);
-}
-
-
void BytecodeGenerator::BuildThrowIfHole(Variable* variable) {
if (variable->is_this()) {
DCHECK(variable->mode() == CONST);
@@ -2468,7 +2462,9 @@ void BytecodeGenerator::BuildVariableAssignment(
}
void BytecodeGenerator::VisitAssignment(Assignment* expr) {
- DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
+ DCHECK(expr->target()->IsValidReferenceExpression() ||
+ (expr->op() == Token::INIT && expr->target()->IsVariableProxy() &&
+ expr->target()->AsVariableProxy()->is_this()));
Register object, key;
RegisterList super_property_args;
const AstRawString* name;
@@ -3392,10 +3388,6 @@ void BytecodeGenerator::VisitCallNew(CallNew* expr) {
void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
RegisterList args = register_allocator()->NewGrowableRegisterList();
- // Allocate a register for the receiver and load it with undefined.
- // TODO(leszeks): If CallJSRuntime always has an undefined receiver, use the
- // same mechanism as CallUndefinedReceiver.
- BuildPushUndefinedIntoRegisterList(&args);
VisitArguments(expr->arguments(), &args);
builder()->CallJSRuntime(expr->context_index(), args);
} else {
@@ -3462,12 +3454,14 @@ void BytecodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::Value::DELETE:
VisitDelete(expr);
break;
- case Token::Value::BIT_NOT:
case Token::Value::ADD:
case Token::Value::SUB:
- // These operators are converted to an equivalent binary operators in
- // the parser. These operators are not expected to be visited here.
- UNREACHABLE();
+ case Token::Value::BIT_NOT:
+ VisitForAccumulatorValue(expr->expression());
+ builder()->SetExpressionPosition(expr);
+ builder()->UnaryOperation(
+ expr->op(), feedback_index(expr->UnaryOperationFeedbackSlot()));
+ break;
default:
UNREACHABLE();
}
@@ -3483,13 +3477,15 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
builder()->Delete(object, language_mode());
} else if (expr->expression()->IsVariableProxy()) {
// Delete of an unqualified identifier is allowed in sloppy mode but is
- // not allowed in strict mode. Deleting 'this' is allowed in both modes.
+ // not allowed in strict mode. Deleting 'this' and 'new.target' is allowed
+ // in both modes.
VariableProxy* proxy = expr->expression()->AsVariableProxy();
- Variable* variable = proxy->var();
- DCHECK(is_sloppy(language_mode()) || variable->is_this());
- if (variable->is_this()) {
+ DCHECK(is_sloppy(language_mode()) || proxy->is_this() ||
+ proxy->is_new_target());
+ if (proxy->is_this() || proxy->is_new_target()) {
builder()->LoadTrue();
} else {
+ Variable* variable = proxy->var();
switch (variable->location()) {
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
@@ -3526,7 +3522,7 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
}
void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
- DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
+ DCHECK(expr->expression()->IsValidReferenceExpression());
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->expression()->AsProperty();
@@ -3593,17 +3589,17 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
// Save result for postfix expressions.
FeedbackSlot count_slot = expr->CountBinaryOpFeedbackSlot();
if (is_postfix) {
- // Convert old value into a number before saving it.
old_value = register_allocator()->NewRegister();
+ // Convert old value into a number before saving it.
// TODO(ignition): Think about adding proper PostInc/PostDec bytecodes
// instead of this ToNumber + Inc/Dec dance.
builder()
- ->ToNumber(old_value, feedback_index(count_slot))
- .LoadAccumulatorWithRegister(old_value);
+ ->ToNumber(feedback_index(count_slot))
+ .StoreAccumulatorInRegister(old_value);
}
// Perform +1/-1 operation.
- builder()->CountOperation(expr->binary_op(), feedback_index(count_slot));
+ builder()->UnaryOperation(expr->op(), feedback_index(count_slot));
// Store the value.
builder()->SetExpressionPosition(expr);
@@ -3722,8 +3718,6 @@ void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
- // TODO(rmcilroy): Special case "x * 1.0" and "x * -1" which are generated for
- // +x and -x by the parser.
FeedbackSlot slot = expr->BinaryOperationFeedbackSlot();
Expression* subexpr;
Smi* literal;
@@ -3829,6 +3823,13 @@ void BytecodeGenerator::VisitGetIterator(GetIterator* expr) {
expr->AsyncIteratorCallFeedbackSlot());
}
+void BytecodeGenerator::VisitGetTemplateObject(GetTemplateObject* expr) {
+ builder()->SetExpressionPosition(expr);
+ size_t entry = builder()->AllocateDeferredConstantPoolEntry();
+ template_objects_.push_back(std::make_pair(expr, entry));
+ builder()->GetTemplateObject(entry);
+}
+
void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
builder()->LoadAccumulatorWithRegister(Register::function_closure());
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index a879d14971..15bcdddbec 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -31,7 +31,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
explicit BytecodeGenerator(CompilationInfo* info);
void GenerateBytecode(uintptr_t stack_limit);
- Handle<BytecodeArray> FinalizeBytecode(Isolate* isolate);
+ Handle<BytecodeArray> FinalizeBytecode(Isolate* isolate,
+ Handle<Script> script);
#define DECLARE_VISIT(type) void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
@@ -63,7 +64,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
enum class TypeHint { kAny, kBoolean };
void GenerateBytecodeBody();
- void AllocateDeferredConstants(Isolate* isolate);
+ void AllocateDeferredConstants(Isolate* isolate, Handle<Script> script);
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -122,7 +123,6 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildAsyncReturn(int source_position = kNoSourcePosition);
void BuildAsyncGeneratorReturn();
void BuildReThrow();
- void BuildAbort(BailoutReason bailout_reason);
void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
void BuildThrowIfHole(Variable* variable);
@@ -280,6 +280,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
native_function_literals_;
ZoneVector<std::pair<ObjectLiteral*, size_t>> object_literals_;
ZoneVector<std::pair<ArrayLiteral*, size_t>> array_literals_;
+ ZoneVector<std::pair<GetTemplateObject*, size_t>> template_objects_;
ControlScope* execution_control_;
ContextScope* execution_context_;
diff --git a/deps/v8/src/interpreter/bytecode-label.h b/deps/v8/src/interpreter/bytecode-label.h
index ef031efa1c..9622c1513e 100644
--- a/deps/v8/src/interpreter/bytecode-label.h
+++ b/deps/v8/src/interpreter/bytecode-label.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_LABEL_H_
#define V8_INTERPRETER_BYTECODE_LABEL_H_
+#include <algorithm>
+
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/interpreter/bytecode-operands.h b/deps/v8/src/interpreter/bytecode-operands.h
index 62e0bf382f..9195a72467 100644
--- a/deps/v8/src/interpreter/bytecode-operands.h
+++ b/deps/v8/src/interpreter/bytecode-operands.h
@@ -35,7 +35,8 @@ namespace interpreter {
#define UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(V) \
V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \
V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
- V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
+ V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort) \
+ V(NativeContextIndex, OperandTypeInfo::kScalableUnsignedByte)
// Carefully ordered for operand type range checks below.
#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 5dbf89a250..e7bcd50611 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -93,15 +93,11 @@ namespace interpreter {
OperandType::kUImm) \
\
/* Propery stores (StoreIC) operations */ \
- V(StaNamedPropertySloppy, AccumulatorUse::kRead, OperandType::kReg, \
- OperandType::kIdx, OperandType::kIdx) \
- V(StaNamedPropertyStrict, AccumulatorUse::kRead, OperandType::kReg, \
+ V(StaNamedProperty, AccumulatorUse::kRead, OperandType::kReg, \
OperandType::kIdx, OperandType::kIdx) \
V(StaNamedOwnProperty, AccumulatorUse::kRead, OperandType::kReg, \
OperandType::kIdx, OperandType::kIdx) \
- V(StaKeyedPropertySloppy, AccumulatorUse::kRead, OperandType::kReg, \
- OperandType::kReg, OperandType::kIdx) \
- V(StaKeyedPropertyStrict, AccumulatorUse::kRead, OperandType::kReg, \
+ V(StaKeyedProperty, AccumulatorUse::kRead, OperandType::kReg, \
OperandType::kReg, OperandType::kIdx) \
V(StaDataPropertyInLiteral, AccumulatorUse::kRead, OperandType::kReg, \
OperandType::kReg, OperandType::kFlag8, OperandType::kIdx) \
@@ -148,6 +144,8 @@ namespace interpreter {
/* Unary Operators */ \
V(Inc, AccumulatorUse::kReadWrite, OperandType::kIdx) \
V(Dec, AccumulatorUse::kReadWrite, OperandType::kIdx) \
+ V(Negate, AccumulatorUse::kReadWrite, OperandType::kIdx) \
+ V(BitwiseNot, AccumulatorUse::kReadWrite, OperandType::kIdx) \
V(ToBooleanLogicalNot, AccumulatorUse::kReadWrite) \
V(LogicalNot, AccumulatorUse::kReadWrite) \
V(TypeOf, AccumulatorUse::kReadWrite) \
@@ -183,7 +181,7 @@ namespace interpreter {
OperandType::kRegList, OperandType::kRegCount) \
V(CallRuntimeForPair, AccumulatorUse::kNone, OperandType::kRuntimeId, \
OperandType::kRegList, OperandType::kRegCount, OperandType::kRegOutPair) \
- V(CallJSRuntime, AccumulatorUse::kWrite, OperandType::kIdx, \
+ V(CallJSRuntime, AccumulatorUse::kWrite, OperandType::kNativeContextIndex, \
OperandType::kRegList, OperandType::kRegCount) \
\
/* Intrinsics */ \
@@ -219,7 +217,7 @@ namespace interpreter {
\
/* Cast operators */ \
V(ToName, AccumulatorUse::kRead, OperandType::kRegOut) \
- V(ToNumber, AccumulatorUse::kRead, OperandType::kRegOut, OperandType::kIdx) \
+ V(ToNumber, AccumulatorUse::kReadWrite, OperandType::kIdx) \
V(ToObject, AccumulatorUse::kRead, OperandType::kRegOut) \
\
/* Literals */ \
@@ -232,6 +230,9 @@ namespace interpreter {
OperandType::kIdx, OperandType::kFlag8, OperandType::kRegOut) \
V(CreateEmptyObjectLiteral, AccumulatorUse::kWrite) \
\
+ /* Tagged templates */ \
+ V(GetTemplateObject, AccumulatorUse::kWrite, OperandType::kIdx) \
+ \
/* Closure allocation */ \
V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx, \
OperandType::kIdx, OperandType::kFlag8) \
@@ -287,8 +288,9 @@ namespace interpreter {
OperandType::kUImm, OperandType::kImm) \
\
/* Complex flow control For..in */ \
- V(ForInPrepare, AccumulatorUse::kNone, OperandType::kReg, \
- OperandType::kRegOutTriple) \
+ V(ForInEnumerate, AccumulatorUse::kWrite, OperandType::kReg) \
+ V(ForInPrepare, AccumulatorUse::kRead, OperandType::kRegOutTriple, \
+ OperandType::kIdx) \
V(ForInContinue, AccumulatorUse::kWrite, OperandType::kReg, \
OperandType::kReg) \
V(ForInNext, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg, \
@@ -338,7 +340,10 @@ namespace interpreter {
/* Block Coverage */ \
V(IncBlockCounter, AccumulatorUse::kNone, OperandType::kIdx) \
\
- /* Illegal bytecode (terminates execution) */ \
+ /* Execution Abort (internal error) */ \
+ V(Abort, AccumulatorUse::kNone, OperandType::kIdx) \
+ \
+ /* Illegal bytecode */ \
V(Illegal, AccumulatorUse::kNone)
// List of debug break bytecodes.
@@ -773,13 +778,13 @@ class V8_EXPORT_PRIVATE Bytecodes final {
case Bytecode::kCallUndefinedReceiver0:
case Bytecode::kCallUndefinedReceiver1:
case Bytecode::kCallUndefinedReceiver2:
+ case Bytecode::kCallJSRuntime:
return ConvertReceiverMode::kNullOrUndefined;
case Bytecode::kCallAnyReceiver:
case Bytecode::kConstruct:
case Bytecode::kCallWithSpread:
case Bytecode::kConstructWithSpread:
case Bytecode::kInvokeIntrinsic:
- case Bytecode::kCallJSRuntime:
return ConvertReceiverMode::kAny;
default:
UNREACHABLE();
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index b0e55b9ae5..2db780b979 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -517,6 +517,16 @@ Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
return BytecodeUnsignedOperand(operand_index, operand_size);
}
+Node* InterpreterAssembler::BytecodeOperandNativeContextIndex(
+ int operand_index) {
+ DCHECK(OperandType::kNativeContextIndex ==
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ OperandSize operand_size =
+ Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+ return ChangeUint32ToWord(
+ BytecodeUnsignedOperand(operand_index, operand_size));
+}
+
Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
DCHECK(OperandType::kIntrinsicId ==
Bytecodes::GetOperandType(bytecode_, operand_index));
@@ -1027,7 +1037,8 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
}
void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
- Label ok(this), interrupt_check(this, Label::kDeferred), end(this);
+ Comment("[ UpdateInterruptBudget");
+
Node* budget_offset =
IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
@@ -1042,28 +1053,35 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
// Make sure we include the current bytecode in the budget calculation.
Node* budget_after_bytecode =
Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize()));
+
if (backward) {
new_budget.Bind(Int32Sub(budget_after_bytecode, weight));
+
+ Node* condition =
+ Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
+ Label ok(this), interrupt_check(this, Label::kDeferred);
+ Branch(condition, &ok, &interrupt_check);
+
+ // Perform interrupt and reset budget.
+ BIND(&interrupt_check);
+ {
+ CallRuntime(Runtime::kInterrupt, GetContext());
+ new_budget.Bind(Int32Constant(Interpreter::kInterruptBudget));
+ Goto(&ok);
+ }
+
+ BIND(&ok);
} else {
+ // For a forward jump, we know we only increase the interrupt budget, so
+ // no need to check if it's below zero.
new_budget.Bind(Int32Add(budget_after_bytecode, weight));
}
- Node* condition =
- Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
- Branch(condition, &ok, &interrupt_check);
-
- // Perform interrupt and reset budget.
- BIND(&interrupt_check);
- {
- CallRuntime(Runtime::kInterrupt, GetContext());
- new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
- Goto(&ok);
- }
// Update budget.
- BIND(&ok);
StoreNoWriteBarrier(MachineRepresentation::kWord32,
BytecodeArrayTaggedPointer(), budget_offset,
new_budget.value());
+ Comment("] UpdateInterruptBudget");
}
Node* InterpreterAssembler::Advance() { return Advance(CurrentBytecodeSize()); }
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 6395a1b114..312fa3198d 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -62,6 +62,9 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Returns the 32-bit unsigned runtime id immediate for bytecode operand
// |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandRuntimeId(int operand_index);
+ // Returns the 32-bit unsigned native context index immediate for bytecode
+ // operand |operand_index| in the current bytecode.
+ compiler::Node* BytecodeOperandNativeContextIndex(int operand_index);
// Returns the 32-bit unsigned intrinsic id immediate for bytecode operand
// |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandIntrinsicId(int operand_index);
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 0036cdd1bd..6b2e2d8190 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -9,9 +9,9 @@
#include "src/builtins/builtins-arguments-gen.h"
#include "src/builtins/builtins-constructor-gen.h"
-#include "src/builtins/builtins-forin-gen.h"
#include "src/code-events.h"
#include "src/code-factory.h"
+#include "src/debug/debug.h"
#include "src/factory.h"
#include "src/ic/accessor-assembler.h"
#include "src/ic/binary-op-assembler.h"
@@ -636,25 +636,13 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
}
};
-// StaNamedPropertySloppy <object> <name_index> <slot>
+// StaNamedProperty <object> <name_index> <slot>
//
-// Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
+// Calls the StoreIC at FeedBackVector slot <slot> for <object> and
// the name in constant pool entry <name_index> with the value in the
// accumulator.
-IGNITION_HANDLER(StaNamedPropertySloppy,
- InterpreterStoreNamedPropertyAssembler) {
- Callable ic = CodeFactory::StoreICInOptimizedCode(isolate(), SLOPPY);
- StaNamedProperty(ic);
-}
-
-// StaNamedPropertyStrict <object> <name_index> <slot>
-//
-// Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
-// the name in constant pool entry <name_index> with the value in the
-// accumulator.
-IGNITION_HANDLER(StaNamedPropertyStrict,
- InterpreterStoreNamedPropertyAssembler) {
- Callable ic = CodeFactory::StoreICInOptimizedCode(isolate(), STRICT);
+IGNITION_HANDLER(StaNamedProperty, InterpreterStoreNamedPropertyAssembler) {
+ Callable ic = Builtins::CallableFor(isolate(), Builtins::kStoreIC);
StaNamedProperty(ic);
}
@@ -668,48 +656,25 @@ IGNITION_HANDLER(StaNamedOwnProperty, InterpreterStoreNamedPropertyAssembler) {
StaNamedProperty(ic);
}
-class InterpreterStoreKeyedPropertyAssembler : public InterpreterAssembler {
- public:
- InterpreterStoreKeyedPropertyAssembler(CodeAssemblerState* state,
- Bytecode bytecode,
- OperandScale operand_scale)
- : InterpreterAssembler(state, bytecode, operand_scale) {}
-
- void StaKeyedProperty(Callable ic) {
- Node* code_target = HeapConstant(ic.code());
- Node* object_reg_index = BytecodeOperandReg(0);
- Node* object = LoadRegister(object_reg_index);
- Node* name_reg_index = BytecodeOperandReg(1);
- Node* name = LoadRegister(name_reg_index);
- Node* value = GetAccumulator();
- Node* raw_slot = BytecodeOperandIdx(2);
- Node* smi_slot = SmiTag(raw_slot);
- Node* feedback_vector = LoadFeedbackVector();
- Node* context = GetContext();
- CallStub(ic.descriptor(), code_target, context, object, name, value,
- smi_slot, feedback_vector);
- Dispatch();
- }
-};
-
-// StaKeyedPropertySloppy <object> <key> <slot>
-//
-// Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
-// and the key <key> with the value in the accumulator.
-IGNITION_HANDLER(StaKeyedPropertySloppy,
- InterpreterStoreKeyedPropertyAssembler) {
- Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate(), SLOPPY);
- StaKeyedProperty(ic);
-}
-
-// StaKeyedPropertyStrict <object> <key> <slot>
+// StaKeyedProperty <object> <key> <slot>
//
-// Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
-// and the key <key> with the value in the accumulator.
-IGNITION_HANDLER(StaKeyedPropertyStrict,
- InterpreterStoreKeyedPropertyAssembler) {
- Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate(), STRICT);
- StaKeyedProperty(ic);
+// Calls the KeyedStoreIC at FeedbackVector slot <slot> for <object> and
+// the key <key> with the value in the accumulator.
+IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
+ Callable ic = Builtins::CallableFor(isolate(), Builtins::kKeyedStoreIC);
+ Node* code_target = HeapConstant(ic.code());
+ Node* object_reg_index = BytecodeOperandReg(0);
+ Node* object = LoadRegister(object_reg_index);
+ Node* name_reg_index = BytecodeOperandReg(1);
+ Node* name = LoadRegister(name_reg_index);
+ Node* value = GetAccumulator();
+ Node* raw_slot = BytecodeOperandIdx(2);
+ Node* smi_slot = SmiTag(raw_slot);
+ Node* feedback_vector = LoadFeedbackVector();
+ Node* context = GetContext();
+ CallStub(ic.descriptor(), code_target, context, object, name, value, smi_slot,
+ feedback_vector);
+ Dispatch();
}
// StaDataPropertyInLiteral <object> <name> <flags>
@@ -856,9 +821,11 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
OperandScale operand_scale)
: InterpreterAssembler(state, bytecode, operand_scale) {}
- typedef Node* (BinaryOpAssembler::*BinaryOpGenerator)(
- Node* context, Node* left, Node* right, Node* slot, Node* vector,
- Node* function, bool lhs_is_smi);
+ typedef Node* (BinaryOpAssembler::*BinaryOpGenerator)(Node* context,
+ Node* left, Node* right,
+ Node* slot,
+ Node* vector,
+ bool lhs_is_smi);
void BinaryOpWithFeedback(BinaryOpGenerator generator) {
Node* reg_index = BytecodeOperandReg(0);
@@ -867,11 +834,10 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
Node* context = GetContext();
Node* slot_index = BytecodeOperandIdx(1);
Node* feedback_vector = LoadFeedbackVector();
- Node* function = LoadRegister(Register::function_closure());
BinaryOpAssembler binop_asm(state());
Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
- feedback_vector, function, false);
+ feedback_vector, false);
SetAccumulator(result);
Dispatch();
}
@@ -882,11 +848,10 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
Node* context = GetContext();
Node* slot_index = BytecodeOperandIdx(1);
Node* feedback_vector = LoadFeedbackVector();
- Node* function = LoadRegister(Register::function_closure());
BinaryOpAssembler binop_asm(state());
Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
- feedback_vector, function, true);
+ feedback_vector, true);
SetAccumulator(result);
Dispatch();
}
@@ -1033,9 +998,8 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
Node* input_feedback =
SmiOr(var_lhs_type_feedback.value(), var_rhs_type_feedback.value());
- Node* function = LoadRegister(Register::function_closure());
UpdateFeedback(SmiOr(result_type, input_feedback), feedback_vector,
- slot_index, function);
+ slot_index);
SetAccumulator(result);
Dispatch();
}
@@ -1111,9 +1075,8 @@ IGNITION_HANDLER(BitwiseOrSmi, InterpreterAssembler) {
Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
- Node* function = LoadRegister(Register::function_closure());
UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index, function);
+ feedback_vector, slot_index);
SetAccumulator(result);
Dispatch();
}
@@ -1137,9 +1100,8 @@ IGNITION_HANDLER(BitwiseXorSmi, InterpreterAssembler) {
Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
- Node* function = LoadRegister(Register::function_closure());
UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index, function);
+ feedback_vector, slot_index);
SetAccumulator(result);
Dispatch();
}
@@ -1163,9 +1125,31 @@ IGNITION_HANDLER(BitwiseAndSmi, InterpreterAssembler) {
Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
- Node* function = LoadRegister(Register::function_closure());
UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index, function);
+ feedback_vector, slot_index);
+ SetAccumulator(result);
+ Dispatch();
+}
+
+// BitwiseNot <feedback_slot>
+//
+// Perform bitwise-not on the accumulator.
+IGNITION_HANDLER(BitwiseNot, InterpreterAssembler) {
+ Node* operand = GetAccumulator();
+ Node* slot_index = BytecodeOperandIdx(0);
+ Node* feedback_vector = LoadFeedbackVector();
+ Node* context = GetContext();
+
+ Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned);
+ Node* truncated_value =
+ TruncateTaggedToWord32WithFeedback(context, operand, &var_type_feedback);
+ Node* value = Word32Not(truncated_value);
+ Node* result = ChangeInt32ToTagged(value);
+ Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
+ BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber);
+ UpdateFeedback(SmiOr(result_type, var_type_feedback.value()), feedback_vector,
+ slot_index);
SetAccumulator(result);
Dispatch();
}
@@ -1192,9 +1176,8 @@ IGNITION_HANDLER(ShiftLeftSmi, InterpreterAssembler) {
Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
- Node* function = LoadRegister(Register::function_closure());
UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index, function);
+ feedback_vector, slot_index);
SetAccumulator(result);
Dispatch();
}
@@ -1221,9 +1204,8 @@ IGNITION_HANDLER(ShiftRightSmi, InterpreterAssembler) {
Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
- Node* function = LoadRegister(Register::function_closure());
UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index, function);
+ feedback_vector, slot_index);
SetAccumulator(result);
Dispatch();
}
@@ -1250,13 +1232,86 @@ IGNITION_HANDLER(ShiftRightLogicalSmi, InterpreterAssembler) {
Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
- Node* function = LoadRegister(Register::function_closure());
UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index, function);
+ feedback_vector, slot_index);
SetAccumulator(result);
Dispatch();
}
+// Negate <feedback_slot>
+//
+// Perform arithmetic negation on the accumulator.
+IGNITION_HANDLER(Negate, InterpreterAssembler) {
+ Node* operand = GetAccumulator();
+
+ Label end(this);
+ VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned);
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+
+ Label if_smi(this), if_heapnumber(this), if_notnumber(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(operand), &if_smi);
+ Branch(IsHeapNumber(operand), &if_heapnumber, &if_notnumber);
+
+ BIND(&if_smi);
+ {
+ Label if_zero(this), if_min_smi(this);
+ // Return -0 if operand is 0.
+ GotoIf(SmiEqual(operand, SmiConstant(0)), &if_zero);
+
+ // Special-case the minimum smi to avoid overflow.
+ GotoIf(SmiEqual(operand, SmiConstant(Smi::kMinValue)), &if_min_smi);
+
+ // Else simply subtract operand from 0.
+ var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ var_result.Bind(SmiSub(SmiConstant(0), operand));
+ Goto(&end);
+
+ BIND(&if_zero);
+ var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
+ var_result.Bind(MinusZeroConstant());
+ Goto(&end);
+
+ BIND(&if_min_smi);
+ var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
+ var_result.Bind(AllocateHeapNumberWithValue(
+ Float64Constant(-static_cast<double>(Smi::kMinValue))));
+ Goto(&end);
+ }
+
+ BIND(&if_heapnumber);
+ {
+ Node* result = Float64Neg(LoadHeapNumberValue(operand));
+ var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
+ var_result.Bind(AllocateHeapNumberWithValue(result));
+ Goto(&end);
+ }
+
+ BIND(&if_notnumber);
+ {
+ Node* instance_type = LoadInstanceType(operand);
+ Node* is_oddball = Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE));
+
+ var_type_feedback.Bind(
+ SelectSmiConstant(is_oddball, BinaryOperationFeedback::kNumberOrOddball,
+ BinaryOperationFeedback::kAny));
+
+ Node* context = GetContext();
+ Node* result =
+ CallBuiltin(Builtins::kMultiply, context, operand, SmiConstant(-1));
+ var_result.Bind(result);
+ Goto(&end);
+ }
+
+ BIND(&end);
+
+ Node* slot_index = BytecodeOperandIdx(0);
+ Node* feedback_vector = LoadFeedbackVector();
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
+
+ SetAccumulator(var_result.value());
+ Dispatch();
+}
+
// ToName <dst>
//
// Convert the object referenced by the accumulator to a name.
@@ -1268,7 +1323,7 @@ IGNITION_HANDLER(ToName, InterpreterAssembler) {
Dispatch();
}
-// ToNumber <dst> <slot>
+// ToNumber <slot>
//
// Convert the object referenced by the accumulator to a number.
IGNITION_HANDLER(ToNumber, InterpreterAssembler) {
@@ -1307,15 +1362,13 @@ IGNITION_HANDLER(ToNumber, InterpreterAssembler) {
}
BIND(&if_done);
- StoreRegister(var_result.value(), BytecodeOperandReg(0));
// Record the type feedback collected for {object}.
- Node* slot_index = BytecodeOperandIdx(1);
+ Node* slot_index = BytecodeOperandIdx(0);
Node* feedback_vector = LoadFeedbackVector();
- Node* function = LoadRegister(Register::function_closure());
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index,
- function);
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
+ SetAccumulator(var_result.value());
Dispatch();
}
@@ -1449,9 +1502,7 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
}
BIND(&end);
- Node* function = LoadRegister(Register::function_closure());
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index,
- function);
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
SetAccumulator(result_var.value());
Dispatch();
@@ -1574,9 +1625,7 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
}
BIND(&end);
- Node* function = LoadRegister(Register::function_closure());
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index,
- function);
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
SetAccumulator(result_var.value());
Dispatch();
@@ -1878,12 +1927,10 @@ IGNITION_HANDLER(CallRuntimeForPair, InterpreterAssembler) {
// Call the JS runtime function that has the |context_index| with the receiver
// in register |receiver| and |arg_count| arguments in subsequent registers.
IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) {
- Node* context_index = BytecodeOperandIdx(0);
+ Node* context_index = BytecodeOperandNativeContextIndex(0);
Node* receiver_reg = BytecodeOperandReg(1);
Node* first_arg = RegisterLocation(receiver_reg);
- Node* receiver_args_count = BytecodeOperandCount(2);
- Node* receiver_count = Int32Constant(1);
- Node* args_count = Int32Sub(receiver_args_count, receiver_count);
+ Node* args_count = BytecodeOperandCount(2);
// Get the function to call from the native context.
Node* context = GetContext();
@@ -1892,7 +1939,7 @@ IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) {
// Call the function.
CallJSAndDispatch(function, context, first_arg, args_count,
- ConvertReceiverMode::kAny);
+ ConvertReceiverMode::kNullOrUndefined);
}
// CallWithSpread <callable> <first_arg> <arg_count>
@@ -2006,9 +2053,7 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
Node* slot_index = BytecodeOperandIdx(1);
Node* feedback_vector = LoadFeedbackVector();
- Node* function = LoadRegister(Register::function_closure());
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index,
- function);
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
SetAccumulator(result);
Dispatch();
}
@@ -2598,15 +2643,15 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
// Creates a regular expression literal for literal index <literal_idx> with
// <flags> and the pattern in <pattern_idx>.
IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
- Node* index = BytecodeOperandIdx(0);
- Node* pattern = LoadConstantPoolEntry(index);
- Node* literal_index = BytecodeOperandIdxSmi(1);
+ Node* pattern_index = BytecodeOperandIdx(0);
+ Node* pattern = LoadConstantPoolEntry(pattern_index);
+ Node* feedback_vector = LoadFeedbackVector();
+ Node* slot_id = BytecodeOperandIdx(1);
Node* flags = SmiFromWord32(BytecodeOperandFlag(2));
- Node* closure = LoadRegister(Register::function_closure());
Node* context = GetContext();
ConstructorBuiltinsAssembler constructor_assembler(state());
- Node* result = constructor_assembler.EmitFastCloneRegExp(
- closure, literal_index, pattern, flags, context);
+ Node* result = constructor_assembler.EmitCreateRegExpLiteral(
+ feedback_vector, slot_id, pattern, flags, context);
SetAccumulator(result);
Dispatch();
}
@@ -2616,8 +2661,8 @@ IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
// Creates an array literal for literal index <literal_idx> with
// CreateArrayLiteral flags <flags> and constant elements in <element_idx>.
IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
- Node* literal_index = BytecodeOperandIdxSmi(1);
- Node* closure = LoadRegister(Register::function_closure());
+ Node* feedback_vector = LoadFeedbackVector();
+ Node* slot_id = BytecodeOperandIdx(1);
Node* context = GetContext();
Node* bytecode_flags = BytecodeOperandFlag(2);
@@ -2629,8 +2674,9 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
BIND(&fast_shallow_clone);
{
ConstructorBuiltinsAssembler constructor_assembler(state());
- Node* result = constructor_assembler.EmitFastCloneShallowArray(
- closure, literal_index, context, &call_runtime, TRACK_ALLOCATION_SITE);
+ Node* result = constructor_assembler.EmitCreateShallowArrayLiteral(
+ feedback_vector, slot_id, context, &call_runtime,
+ TRACK_ALLOCATION_SITE);
SetAccumulator(result);
Dispatch();
}
@@ -2642,8 +2688,9 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
Node* flags = SmiTag(flags_raw);
Node* index = BytecodeOperandIdx(0);
Node* constant_elements = LoadConstantPoolEntry(index);
- Node* result = CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
- literal_index, constant_elements, flags);
+ Node* result =
+ CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector,
+ SmiTag(slot_id), constant_elements, flags);
SetAccumulator(result);
Dispatch();
}
@@ -2653,12 +2700,12 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
//
// Creates an empty JSArray literal for literal index <literal_idx>.
IGNITION_HANDLER(CreateEmptyArrayLiteral, InterpreterAssembler) {
- Node* literal_index = BytecodeOperandIdxSmi(0);
- Node* closure = LoadRegister(Register::function_closure());
+ Node* feedback_vector = LoadFeedbackVector();
+ Node* slot_id = BytecodeOperandIdx(0);
Node* context = GetContext();
ConstructorBuiltinsAssembler constructor_assembler(state());
Node* result = constructor_assembler.EmitCreateEmptyArrayLiteral(
- closure, literal_index, context);
+ feedback_vector, slot_id, context);
SetAccumulator(result);
Dispatch();
}
@@ -2668,9 +2715,9 @@ IGNITION_HANDLER(CreateEmptyArrayLiteral, InterpreterAssembler) {
// Creates an object literal for literal index <literal_idx> with
// CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
- Node* literal_index = BytecodeOperandIdxSmi(1);
+ Node* feedback_vector = LoadFeedbackVector();
+ Node* slot_id = BytecodeOperandIdx(1);
Node* bytecode_flags = BytecodeOperandFlag(2);
- Node* closure = LoadRegister(Register::function_closure());
// Check if we can do a fast clone or have to call the runtime.
Label if_fast_clone(this), if_not_fast_clone(this, Label::kDeferred);
@@ -2680,10 +2727,10 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
BIND(&if_fast_clone);
{
- // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
+ // If we can do a fast clone do the fast-path in CreateShallowObjectLiteral.
ConstructorBuiltinsAssembler constructor_assembler(state());
- Node* result = constructor_assembler.EmitFastCloneShallowObject(
- &if_not_fast_clone, closure, literal_index);
+ Node* result = constructor_assembler.EmitCreateShallowObjectLiteral(
+ feedback_vector, slot_id, &if_not_fast_clone);
StoreRegister(result, BytecodeOperandReg(3));
Dispatch();
}
@@ -2699,8 +2746,9 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
bytecode_flags);
Node* flags = SmiTag(flags_raw);
- Node* result = CallRuntime(Runtime::kCreateObjectLiteral, context, closure,
- literal_index, boilerplate_description, flags);
+ Node* result =
+ CallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector,
+ SmiTag(slot_id), boilerplate_description, flags);
StoreRegister(result, BytecodeOperandReg(3));
// TODO(klaasb) build a single dispatch once the call is inlined
Dispatch();
@@ -2718,6 +2766,21 @@ IGNITION_HANDLER(CreateEmptyObjectLiteral, InterpreterAssembler) {
Dispatch();
}
+// GetTemplateObject
+//
+// Creates the template to pass for tagged templates and returns it in the
+// accumulator, creating and caching the site object on-demand as per the
+// specification.
+IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
+ Node* description_index = BytecodeOperandIdx(0);
+ Node* description = LoadConstantPoolEntry(description_index);
+ Node* context = GetContext();
+
+ Node* result = CallRuntime(Runtime::kGetTemplateObject, context, description);
+ SetAccumulator(result);
+ Dispatch();
+}
+
// CreateClosure <index> <slot> <tenured>
//
// Creates a new closure for SharedFunctionInfo at position |index| in the
@@ -2954,6 +3017,15 @@ IGNITION_HANDLER(ReThrow, InterpreterAssembler) {
Abort(kUnexpectedReturnFromThrow);
}
+// Abort <bailout_reason>
+//
+// Aborts execution (via a call to the runtime function).
+IGNITION_HANDLER(Abort, InterpreterAssembler) {
+ Node* reason = BytecodeOperandIdx(0);
+ CallRuntime(Runtime::kAbort, NoContextConstant(), SmiTag(reason));
+ Unreachable();
+}
+
// Return
//
// Return the value in the accumulator.
@@ -3076,51 +3148,100 @@ class InterpreterForInPrepareAssembler : public InterpreterAssembler {
}
};
-// ForInPrepare <receiver> <cache_info_triple>
+// ForInEnumerate <receiver>
+//
+// Enumerates the enumerable keys of the |receiver| and either returns the
+// map of the |receiver| if it has a usable enum cache or a fixed array
+// with the keys to enumerate in the accumulator.
+IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) {
+ Node* receiver_register = BytecodeOperandReg(0);
+ Node* receiver = LoadRegister(receiver_register);
+ Node* context = GetContext();
+
+ Label if_empty(this), if_runtime(this, Label::kDeferred);
+ Node* receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime);
+ SetAccumulator(receiver_map);
+ Dispatch();
+
+ BIND(&if_empty);
+ {
+ Node* result = EmptyFixedArrayConstant();
+ SetAccumulator(result);
+ Dispatch();
+ }
+
+ BIND(&if_runtime);
+ {
+ Node* result = CallRuntime(Runtime::kForInEnumerate, context, receiver);
+ SetAccumulator(result);
+ Dispatch();
+ }
+}
+
+// ForInPrepare <cache_info_triple>
//
-// Returns state for for..in loop execution based on the object in the register
-// |receiver|. The object must not be null or undefined and must have been
-// converted to a receiver already.
+// Returns state for for..in loop execution based on the enumerator in
+// the accumulator register, which is the result of calling ForInEnumerate
+// on a JSReceiver object.
// The result is output in registers |cache_info_triple| to
// |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
// and cache_length respectively.
IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) {
- Node* object_register = BytecodeOperandReg(0);
- Node* output_register = BytecodeOperandReg(1);
- Node* receiver = LoadRegister(object_register);
- Node* context = GetContext();
+ Node* enumerator = GetAccumulator();
+ Node* output_register = BytecodeOperandReg(0);
+ Node* vector_index = BytecodeOperandIdx(1);
+ Node* feedback_vector = LoadFeedbackVector();
- Node* cache_type;
- Node* cache_array;
- Node* cache_length;
- Label call_runtime(this, Label::kDeferred),
- nothing_to_iterate(this, Label::kDeferred);
+ // The {enumerator} is either a Map or a FixedArray.
+ CSA_ASSERT(this, TaggedIsNotSmi(enumerator));
- ForInBuiltinsAssembler forin_assembler(state());
- std::tie(cache_type, cache_array, cache_length) =
- forin_assembler.EmitForInPrepare(receiver, context, &call_runtime,
- &nothing_to_iterate);
+ // Check if we're using an enum cache.
+ Label if_fast(this), if_slow(this);
+ Branch(IsMap(enumerator), &if_fast, &if_slow);
- BuildForInPrepareResult(output_register, cache_type, cache_array,
- cache_length);
- Dispatch();
-
- BIND(&call_runtime);
+ BIND(&if_fast);
{
- Node* result_triple =
- CallRuntime(Runtime::kForInPrepare, context, receiver);
- Node* cache_type = Projection(0, result_triple);
- Node* cache_array = Projection(1, result_triple);
- Node* cache_length = Projection(2, result_triple);
+ // Load the enumeration length and cache from the {enumerator}.
+ Node* enum_length = LoadMapEnumLength(enumerator);
+ CSA_ASSERT(this, WordNotEqual(enum_length,
+ IntPtrConstant(kInvalidEnumCacheSentinel)));
+ Node* descriptors = LoadMapDescriptors(enumerator);
+ Node* enum_cache =
+ LoadObjectField(descriptors, DescriptorArray::kEnumCacheOffset);
+ Node* enum_keys = LoadObjectField(enum_cache, EnumCache::kKeysOffset);
+
+ // Check if we have enum indices available.
+ Node* enum_indices = LoadObjectField(enum_cache, EnumCache::kIndicesOffset);
+ Node* enum_indices_length = LoadAndUntagFixedArrayBaseLength(enum_indices);
+ Node* feedback = SelectSmiConstant(
+ IntPtrLessThanOrEqual(enum_length, enum_indices_length),
+ ForInFeedback::kEnumCacheKeysAndIndices, ForInFeedback::kEnumCacheKeys);
+ UpdateFeedback(feedback, feedback_vector, vector_index);
+
+ // Construct the cache info triple.
+ Node* cache_type = enumerator;
+ Node* cache_array = enum_keys;
+ Node* cache_length = SmiTag(enum_length);
BuildForInPrepareResult(output_register, cache_type, cache_array,
cache_length);
Dispatch();
}
- BIND(&nothing_to_iterate);
+
+ BIND(&if_slow);
{
- // Receiver is null or undefined or descriptors are zero length.
- Node* zero = SmiConstant(0);
- BuildForInPrepareResult(output_register, zero, zero, zero);
+ // The {enumerator} is a FixedArray with all the keys to iterate.
+ CSA_ASSERT(this, IsFixedArray(enumerator));
+
+ // Record the fact that we hit the for-in slow-path.
+ UpdateFeedback(SmiConstant(ForInFeedback::kAny), feedback_vector,
+ vector_index);
+
+ // Construct the cache info triple.
+ Node* cache_type = enumerator;
+ Node* cache_array = enumerator;
+ Node* cache_length = LoadFixedArrayBaseLength(enumerator);
+ BuildForInPrepareResult(output_register, cache_type, cache_array,
+ cache_length);
Dispatch();
}
}
@@ -3150,34 +3271,15 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
Branch(WordEqual(receiver_map, cache_type), &if_fast, &if_slow);
BIND(&if_fast);
{
- // Check if we need to transition to megamorphic state.
- Node* feedback_value =
- LoadFeedbackVectorSlot(feedback_vector, vector_index);
- Node* uninitialized_sentinel =
- HeapConstant(FeedbackVector::UninitializedSentinel(isolate()));
- Label if_done(this);
- GotoIfNot(WordEqual(feedback_value, uninitialized_sentinel), &if_done);
- {
- // Transition to megamorphic state.
- Node* megamorphic_sentinel =
- HeapConstant(FeedbackVector::MegamorphicSentinel(isolate()));
- StoreFeedbackVectorSlot(feedback_vector, vector_index,
- megamorphic_sentinel, SKIP_WRITE_BARRIER);
- }
- Goto(&if_done);
-
// Enum cache in use for {receiver}, the {key} is definitely valid.
- BIND(&if_done);
SetAccumulator(key);
Dispatch();
}
BIND(&if_slow);
{
- // Record the fact that we hit the for-in slow path.
- Node* generic_sentinel =
- HeapConstant(FeedbackVector::GenericSentinel(isolate()));
- StoreFeedbackVectorSlot(feedback_vector, vector_index, generic_sentinel,
- SKIP_WRITE_BARRIER);
+ // Record the fact that we hit the for-in slow-path.
+ UpdateFeedback(SmiConstant(ForInFeedback::kAny), feedback_vector,
+ vector_index);
// Need to filter the {key} for the {receiver}.
Node* context = GetContext();
@@ -3344,7 +3446,7 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
Zone zone(isolate->allocator(), ZONE_NAME);
InterpreterDispatchDescriptor descriptor(isolate);
compiler::CodeAssemblerState state(
- isolate, &zone, descriptor, Code::ComputeFlags(Code::BYTECODE_HANDLER),
+ isolate, &zone, descriptor, Code::BYTECODE_HANDLER,
Bytecodes::ToString(bytecode), Bytecodes::ReturnCount(bytecode));
switch (bytecode) {
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 0509facc3c..420f17adc6 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -7,14 +7,18 @@
#include <fstream>
#include <memory>
+#include "src/ast/prettyprinter.h"
+#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
-#include "src/counters.h"
+#include "src/counters-inl.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/bytecodes.h"
#include "src/log.h"
-#include "src/objects.h"
+#include "src/objects-inl.h"
+#include "src/objects/shared-function-info.h"
+#include "src/parsing/parse-info.h"
#include "src/setup-isolate.h"
#include "src/visitors.h"
@@ -117,13 +121,34 @@ void Interpreter::IterateDispatchTable(RootVisitor* v) {
}
}
-// static
-int Interpreter::InterruptBudget() {
- return FLAG_interrupt_budget * kCodeSizeMultiplier;
-}
-
namespace {
+void MaybePrintAst(ParseInfo* parse_info, CompilationInfo* compilation_info) {
+ Isolate* isolate = compilation_info->isolate();
+ bool print_ast = isolate->bootstrapper()->IsActive() ? FLAG_print_builtin_ast
+ : FLAG_print_ast;
+ if (!print_ast) return;
+
+ // Requires internalizing the AST, so make sure we are on the main thread and
+ // allow handle dereference and allocations.
+ // TODO(rmcilroy): Make ast-printer print ast raw strings instead of
+ // internalized strings to avoid internalizing here.
+ DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
+ AllowHandleDereference allow_deref;
+ AllowHandleAllocation allow_handles;
+ AllowHeapAllocation allow_gc;
+ parse_info->ast_value_factory()->Internalize(isolate);
+
+ OFStream os(stdout);
+ std::unique_ptr<char[]> name = compilation_info->GetDebugName();
+ os << "[generating bytecode for function: "
+ << compilation_info->GetDebugName().get() << "]" << std::endl;
+#ifdef DEBUG
+ os << "--- AST ---" << std::endl
+ << AstPrinter(isolate).PrintProgram(parse_info->literal()) << std::endl;
+#endif // DEBUG
+}
+
bool ShouldPrintBytecode(Handle<SharedFunctionInfo> shared) {
if (!FLAG_print_bytecode) return false;
@@ -149,9 +174,7 @@ InterpreterCompilationJob::InterpreterCompilationJob(ParseInfo* parse_info,
background_execute_counter_("CompileBackgroundIgnition") {}
InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
- // TODO(5203): Move code out of codegen.cc once FCG goes away.
- CodeGenerator::MakeCodePrologue(parse_info(), compilation_info(),
- "interpreter");
+ MaybePrintAst(parse_info(), compilation_info());
return SUCCEEDED;
}
@@ -184,7 +207,8 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
!executed_on_background_thread() ? runtime_call_stats_ : nullptr,
&RuntimeCallStats::CompileIgnitionFinalization);
- Handle<BytecodeArray> bytecodes = generator()->FinalizeBytecode(isolate());
+ Handle<BytecodeArray> bytecodes =
+ generator()->FinalizeBytecode(isolate(), parse_info()->script());
if (generator()->HasStackOverflow()) {
return FAILED;
}
@@ -192,7 +216,7 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
if (ShouldPrintBytecode(compilation_info()->shared_info())) {
OFStream os(stdout);
std::unique_ptr<char[]> name = compilation_info()->GetDebugName();
- os << "[generating bytecode for function: "
+ os << "[generated bytecode for function: "
<< compilation_info()->GetDebugName().get() << "]" << std::endl;
bytecodes->Disassemble(os);
os << std::flush;
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 19c881e9cd..04d6435620 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -37,9 +37,6 @@ class Interpreter {
explicit Interpreter(Isolate* isolate);
virtual ~Interpreter() {}
- // Returns the interrupt budget which should be used for the profiler counter.
- static int InterruptBudget();
-
// Creates a compilation job which will generate bytecode for |literal|.
static CompilationJob* NewCompilationJob(ParseInfo* parse_info,
FunctionLiteral* literal,
@@ -64,8 +61,8 @@ class Interpreter {
return reinterpret_cast<Address>(bytecode_dispatch_counters_table_.get());
}
- // TODO(ignition): Tune code size multiplier.
- static const int kCodeSizeMultiplier = 24;
+ // The interrupt budget which should be used for the profiler counter.
+ static const int kInterruptBudget = 144 * KB;
private:
friend class SetupInterpreter;
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index f891161aca..a914811e5c 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -47,12 +47,12 @@ bool Isolate::has_pending_exception() {
return !thread_local_top_.pending_exception_->IsTheHole(this);
}
-Object* Isolate::get_wasm_caught_exception() const {
+Object* Isolate::get_wasm_caught_exception() {
return thread_local_top_.wasm_caught_exception_;
}
-void Isolate::set_wasm_caught_exception(Object* exception_obj) {
- thread_local_top_.wasm_caught_exception_ = exception_obj;
+void Isolate::set_wasm_caught_exception(Object* exception) {
+ thread_local_top_.wasm_caught_exception_ = exception;
}
void Isolate::clear_wasm_caught_exception() {
@@ -82,24 +82,19 @@ void Isolate::clear_scheduled_exception() {
thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
}
-
bool Isolate::is_catchable_by_javascript(Object* exception) {
return exception != heap()->termination_exception();
}
-bool Isolate::is_catchable_by_wasm(Object* exception) {
- if (!is_catchable_by_javascript(exception) || !exception->IsJSError())
- return false;
- HandleScope scope(this);
- Handle<Object> exception_handle(exception, this);
- return JSReceiver::HasProperty(Handle<JSReceiver>::cast(exception_handle),
- factory()->WasmExceptionTag_string())
- .IsJust();
+void Isolate::FireBeforeCallEnteredCallback() {
+ for (auto& callback : before_call_entered_callbacks_) {
+ callback(reinterpret_cast<v8::Isolate*>(this));
+ }
}
-void Isolate::FireBeforeCallEnteredCallback() {
- for (int i = 0; i < before_call_entered_callbacks_.length(); i++) {
- before_call_entered_callbacks_.at(i)(reinterpret_cast<v8::Isolate*>(this));
+void Isolate::FireMicrotasksCompletedCallback() {
+ for (auto& callback : microtasks_completed_callbacks_) {
+ callback(reinterpret_cast<v8::Isolate*>(this));
}
}
@@ -131,6 +126,11 @@ Isolate::ExceptionScope::~ExceptionScope() {
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
+bool Isolate::IsArrayConstructorIntact() {
+ Cell* array_constructor_cell = heap()->array_constructor_protector();
+ return array_constructor_cell->value() == Smi::FromInt(kProtectorValid);
+}
+
bool Isolate::IsArraySpeciesLookupChainIntact() {
// Note: It would be nice to have debug checks to make sure that the
// species protector is accurate, but this would be hard to do for most of
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 64c082b429..9312432763 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -9,6 +9,7 @@
#include <fstream> // NOLINT(readability/streams)
#include <sstream>
+#include "src/api.h"
#include "src/assembler-inl.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/context-slot-cache.h"
@@ -47,12 +48,12 @@
#include "src/simulator.h"
#include "src/snapshot/startup-deserializer.h"
#include "src/tracing/tracing-category-observer.h"
+#include "src/unicode-cache.h"
#include "src/v8.h"
#include "src/version.h"
#include "src/visitors.h"
#include "src/vm-state-inl.h"
#include "src/wasm/compilation-manager.h"
-#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
#include "src/zone/accounting-allocator.h"
@@ -120,6 +121,7 @@ void ThreadLocalTop::Initialize() {
void ThreadLocalTop::Free() {
+ wasm_caught_exception_ = nullptr;
// Match unmatched PopPromise calls.
while (promise_on_stack_) isolate_->PopPromise();
}
@@ -259,8 +261,8 @@ bool Isolate::IsDeferredHandle(Object** handle) {
for (DeferredHandles* deferred = deferred_handles_head_;
deferred != NULL;
deferred = deferred->next_) {
- List<Object**>* blocks = &deferred->blocks_;
- for (int i = 0; i < blocks->length(); i++) {
+ std::vector<Object**>* blocks = &deferred->blocks_;
+ for (size_t i = 0; i < blocks->size(); i++) {
Object** block_limit = (i == 0) ? deferred->first_block_limit_
: blocks->at(i) + kHandleBlockSize;
if (blocks->at(i) <= handle && handle < block_limit) return true;
@@ -368,13 +370,11 @@ void Isolate::PushCodeObjectsAndDie(unsigned int magic1, void* ptr1, void* ptr2,
namespace {
-class StackTraceHelper {
+class FrameArrayBuilder {
public:
- StackTraceHelper(Isolate* isolate, FrameSkipMode mode, Handle<Object> caller)
- : isolate_(isolate),
- mode_(mode),
- caller_(caller),
- skip_next_frame_(true) {
+ FrameArrayBuilder(Isolate* isolate, FrameSkipMode mode, int limit,
+ Handle<Object> caller)
+ : isolate_(isolate), mode_(mode), limit_(limit), caller_(caller) {
switch (mode_) {
case SKIP_FIRST:
skip_next_frame_ = true;
@@ -387,33 +387,132 @@ class StackTraceHelper {
skip_next_frame_ = false;
break;
}
- encountered_strict_function_ = false;
+
+ elements_ = isolate->factory()->NewFrameArray(Min(limit, 10));
}
+ void AppendStandardFrame(StandardFrame* frame) {
+ std::vector<FrameSummary> frames;
+ frames.reserve(FLAG_max_inlining_levels + 1);
+ frame->Summarize(&frames);
+ // A standard frame may include many summarized frames (due to inlining).
+ for (size_t i = frames.size(); i != 0 && !full(); i--) {
+ const auto& summ = frames[i - 1];
+ if (summ.IsJavaScript()) {
+ //====================================================================
+ // Handle a JavaScript frame.
+ //====================================================================
+ const auto& summary = summ.AsJavaScript();
+
+ // Filter out internal frames that we do not want to show.
+ if (!IsVisibleInStackTrace(summary.function())) continue;
+
+ Handle<AbstractCode> abstract_code = summary.abstract_code();
+ const int offset = summary.code_offset();
+
+ bool is_constructor = summary.is_constructor();
+ // Help CallSite::IsConstructor correctly detect hand-written
+ // construct stubs.
+ if (abstract_code->IsCode() &&
+ Code::cast(*abstract_code)->is_construct_stub()) {
+ is_constructor = true;
+ }
+
+ int flags = 0;
+ Handle<JSFunction> function = summary.function();
+ if (IsStrictFrame(function)) flags |= FrameArray::kIsStrict;
+ if (is_constructor) flags |= FrameArray::kIsConstructor;
+
+ elements_ = FrameArray::AppendJSFrame(
+ elements_, TheHoleToUndefined(isolate_, summary.receiver()),
+ function, abstract_code, offset, flags);
+ } else if (summ.IsWasmCompiled()) {
+ //====================================================================
+ // Handle a WASM compiled frame.
+ //====================================================================
+ const auto& summary = summ.AsWasmCompiled();
+ Handle<WasmInstanceObject> instance = summary.wasm_instance();
+ int flags = 0;
+ if (instance->compiled_module()->is_asm_js()) {
+ flags |= FrameArray::kIsAsmJsWasmFrame;
+ if (WasmCompiledFrame::cast(frame)->at_to_number_conversion()) {
+ flags |= FrameArray::kAsmJsAtNumberConversion;
+ }
+ } else {
+ flags |= FrameArray::kIsWasmFrame;
+ }
+
+ elements_ = FrameArray::AppendWasmFrame(
+ elements_, instance, summary.function_index(),
+ Handle<AbstractCode>::cast(summary.code()), summary.code_offset(),
+ flags);
+ } else if (summ.IsWasmInterpreted()) {
+ //====================================================================
+ // Handle a WASM interpreted frame.
+ //====================================================================
+ const auto& summary = summ.AsWasmInterpreted();
+ Handle<WasmInstanceObject> instance = summary.wasm_instance();
+ int flags = FrameArray::kIsWasmInterpretedFrame;
+ DCHECK(!instance->compiled_module()->is_asm_js());
+ elements_ = FrameArray::AppendWasmFrame(
+ elements_, instance, summary.function_index(),
+ Handle<AbstractCode>::null(), summary.byte_offset(), flags);
+ }
+ }
+ }
+
+ void AppendBuiltinExitFrame(BuiltinExitFrame* exit_frame) {
+ Handle<JSFunction> function = handle(exit_frame->function(), isolate_);
+
+ // Filter out internal frames that we do not want to show.
+ if (!IsVisibleInStackTrace(function)) return;
+
+ Handle<Object> receiver(exit_frame->receiver(), isolate_);
+ Handle<Code> code(exit_frame->LookupCode(), isolate_);
+ const int offset =
+ static_cast<int>(exit_frame->pc() - code->instruction_start());
+
+ int flags = 0;
+ if (IsStrictFrame(function)) flags |= FrameArray::kIsStrict;
+ if (exit_frame->IsConstructor()) flags |= FrameArray::kIsConstructor;
+
+ elements_ = FrameArray::AppendJSFrame(elements_, receiver, function,
+ Handle<AbstractCode>::cast(code),
+ offset, flags);
+ }
+
+ bool full() { return elements_->FrameCount() >= limit_; }
+
+ Handle<FrameArray> GetElements() {
+ elements_->ShrinkToFit();
+ return elements_;
+ }
+
+ private:
// Poison stack frames below the first strict mode frame.
// The stack trace API should not expose receivers and function
// objects on frames deeper than the top-most one with a strict mode
// function.
- bool IsStrictFrame(JSFunction* fun) {
+ bool IsStrictFrame(Handle<JSFunction> function) {
if (!encountered_strict_function_) {
- encountered_strict_function_ = is_strict(fun->shared()->language_mode());
+ encountered_strict_function_ =
+ is_strict(function->shared()->language_mode());
}
return encountered_strict_function_;
}
// Determines whether the given stack frame should be displayed in a stack
// trace.
- bool IsVisibleInStackTrace(JSFunction* fun) {
- return ShouldIncludeFrame(fun) && IsNotHidden(fun) &&
- IsInSameSecurityContext(fun);
+ bool IsVisibleInStackTrace(Handle<JSFunction> function) {
+ return ShouldIncludeFrame(function) && IsNotHidden(function) &&
+ IsInSameSecurityContext(function);
}
- private:
// This mechanism excludes a number of uninteresting frames from the stack
// trace. This can be be the first frame (which will be a builtin-exit frame
// for the error constructor builtin) or every frame until encountering a
// user-specified function.
- bool ShouldIncludeFrame(JSFunction* fun) {
+ bool ShouldIncludeFrame(Handle<JSFunction> function) {
switch (mode_) {
case SKIP_NONE:
return true;
@@ -422,7 +521,7 @@ class StackTraceHelper {
skip_next_frame_ = false;
return false;
case SKIP_UNTIL_SEEN:
- if (skip_next_frame_ && (fun == *caller_)) {
+ if (skip_next_frame_ && (*function == *caller_)) {
skip_next_frame_ = false;
return false;
}
@@ -431,38 +530,39 @@ class StackTraceHelper {
UNREACHABLE();
}
- bool IsNotHidden(JSFunction* fun) {
+ bool IsNotHidden(Handle<JSFunction> function) {
// Functions defined not in user scripts are not visible unless directly
// exposed, in which case the native flag is set.
// The --builtins-in-stack-traces command line flag allows including
// internal call sites in the stack trace for debugging purposes.
- if (!FLAG_builtins_in_stack_traces && !fun->shared()->IsUserJavaScript()) {
- return fun->shared()->native();
+ if (!FLAG_builtins_in_stack_traces &&
+ !function->shared()->IsUserJavaScript()) {
+ return function->shared()->native();
}
return true;
}
- bool IsInSameSecurityContext(JSFunction* fun) {
- return isolate_->context()->HasSameSecurityTokenAs(fun->context());
+ bool IsInSameSecurityContext(Handle<JSFunction> function) {
+ return isolate_->context()->HasSameSecurityTokenAs(function->context());
}
- Isolate* isolate_;
+ // TODO(jgruber): Fix all cases in which frames give us a hole value (e.g. the
+ // receiver in RegExp constructor frames.
+ Handle<Object> TheHoleToUndefined(Isolate* isolate, Handle<Object> in) {
+ return (in->IsTheHole(isolate))
+ ? Handle<Object>::cast(isolate->factory()->undefined_value())
+ : in;
+ }
+ Isolate* isolate_;
const FrameSkipMode mode_;
+ int limit_;
const Handle<Object> caller_;
- bool skip_next_frame_;
-
- bool encountered_strict_function_;
+ bool skip_next_frame_ = true;
+ bool encountered_strict_function_ = false;
+ Handle<FrameArray> elements_;
};
-// TODO(jgruber): Fix all cases in which frames give us a hole value (e.g. the
-// receiver in RegExp constructor frames.
-Handle<Object> TheHoleToUndefined(Isolate* isolate, Handle<Object> in) {
- return (in->IsTheHole(isolate))
- ? Handle<Object>::cast(isolate->factory()->undefined_value())
- : in;
-}
-
bool GetStackTraceLimit(Isolate* isolate, int* result) {
Handle<JSObject> error = isolate->error_function();
@@ -486,133 +586,38 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
int limit;
if (!GetStackTraceLimit(this, &limit)) return factory()->undefined_value();
- const int initial_size = Min(limit, 10);
- Handle<FrameArray> elements = factory()->NewFrameArray(initial_size);
+ FrameArrayBuilder builder(this, mode, limit, caller);
- StackTraceHelper helper(this, mode, caller);
-
- for (StackFrameIterator iter(this);
- !iter.done() && elements->FrameCount() < limit; iter.Advance()) {
+ for (StackFrameIterator iter(this); !iter.done() && !builder.full();
+ iter.Advance()) {
StackFrame* frame = iter.frame();
switch (frame->type()) {
- case StackFrame::JAVA_SCRIPT:
case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION:
case StackFrame::OPTIMIZED:
case StackFrame::INTERPRETED:
- case StackFrame::BUILTIN: {
- JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
- // Set initial size to the maximum inlining level + 1 for the outermost
- // function.
- std::vector<FrameSummary> frames;
- frames.reserve(FLAG_max_inlining_levels + 1);
- js_frame->Summarize(&frames);
- for (size_t i = frames.size(); i != 0 && elements->FrameCount() < limit;
- i--) {
- const FrameSummary& summary = frames[i - 1];
- const auto& summ = summary.AsJavaScript();
- Handle<JSFunction> fun = summ.function();
-
- // Filter out internal frames that we do not want to show.
- if (!helper.IsVisibleInStackTrace(*fun)) continue;
-
- Handle<Object> recv = summary.receiver();
- Handle<AbstractCode> abstract_code = summ.abstract_code();
- const int offset = summary.code_offset();
-
- bool is_constructor = summary.is_constructor();
- if (frame->type() == StackFrame::BUILTIN) {
- // Help CallSite::IsConstructor correctly detect hand-written
- // construct stubs.
- if (Code::cast(*abstract_code)->is_construct_stub()) {
- is_constructor = true;
- }
- }
-
- int flags = 0;
- if (helper.IsStrictFrame(*fun)) flags |= FrameArray::kIsStrict;
- if (is_constructor) flags |= FrameArray::kIsConstructor;
-
- elements = FrameArray::AppendJSFrame(
- elements, TheHoleToUndefined(this, recv), fun, abstract_code,
- offset, flags);
- }
- } break;
-
- case StackFrame::BUILTIN_EXIT: {
- BuiltinExitFrame* exit_frame = BuiltinExitFrame::cast(frame);
- Handle<JSFunction> fun = handle(exit_frame->function(), this);
-
- // Filter out internal frames that we do not want to show.
- if (!helper.IsVisibleInStackTrace(*fun)) continue;
-
- Handle<Object> recv(exit_frame->receiver(), this);
- Handle<Code> code(exit_frame->LookupCode(), this);
- const int offset =
- static_cast<int>(exit_frame->pc() - code->instruction_start());
-
- int flags = 0;
- if (helper.IsStrictFrame(*fun)) flags |= FrameArray::kIsStrict;
- if (exit_frame->IsConstructor()) flags |= FrameArray::kIsConstructor;
-
- elements = FrameArray::AppendJSFrame(elements, recv, fun,
- Handle<AbstractCode>::cast(code),
- offset, flags);
- } break;
-
- case StackFrame::WASM_COMPILED: {
- WasmCompiledFrame* wasm_frame = WasmCompiledFrame::cast(frame);
- Handle<WasmInstanceObject> instance(wasm_frame->wasm_instance(), this);
- const int wasm_function_index = wasm_frame->function_index();
- Code* code = wasm_frame->unchecked_code();
- Handle<AbstractCode> abstract_code(AbstractCode::cast(code), this);
- const int offset =
- static_cast<int>(wasm_frame->pc() - code->instruction_start());
-
- int flags = 0;
- if (instance->compiled_module()->is_asm_js()) {
- flags |= FrameArray::kIsAsmJsWasmFrame;
- if (wasm_frame->at_to_number_conversion()) {
- flags |= FrameArray::kAsmJsAtNumberConversion;
- }
- } else {
- flags |= FrameArray::kIsWasmFrame;
- }
-
- elements =
- FrameArray::AppendWasmFrame(elements, instance, wasm_function_index,
- abstract_code, offset, flags);
- } break;
-
- case StackFrame::WASM_INTERPRETER_ENTRY: {
- WasmInterpreterEntryFrame* interpreter_frame =
- WasmInterpreterEntryFrame::cast(frame);
- Handle<WasmInstanceObject> instance(interpreter_frame->wasm_instance(),
- this);
- // Get the interpreted stack (<func_index, offset> pairs).
- std::vector<std::pair<uint32_t, int>> interpreted_stack =
- instance->debug_info()->GetInterpretedStack(
- interpreter_frame->fp());
-
- // interpreted_stack is bottom-up, i.e. caller before callee. We need it
- // the other way around.
- for (auto pair : base::Reversed(interpreted_stack)) {
- elements = FrameArray::AppendWasmFrame(
- elements, instance, pair.first, Handle<AbstractCode>::null(),
- pair.second, FrameArray::kIsWasmInterpretedFrame);
- if (elements->FrameCount() >= limit) break;
- }
- } break;
+ case StackFrame::BUILTIN:
+ builder.AppendStandardFrame(JavaScriptFrame::cast(frame));
+ break;
+ case StackFrame::BUILTIN_EXIT:
+ // BuiltinExitFrames are not standard frames, so they do not have
+ // Summarize(). However, they may have one JS frame worth showing.
+ builder.AppendBuiltinExitFrame(BuiltinExitFrame::cast(frame));
+ break;
+ case StackFrame::WASM_COMPILED:
+ builder.AppendStandardFrame(WasmCompiledFrame::cast(frame));
+ break;
+ case StackFrame::WASM_INTERPRETER_ENTRY:
+ builder.AppendStandardFrame(WasmInterpreterEntryFrame::cast(frame));
+ break;
default:
break;
}
}
- elements->ShrinkToFit();
-
// TODO(yangguo): Queue this structured stack trace for preprocessing on GC.
- return factory()->NewJSArrayWithElements(elements);
+ return factory()->NewJSArrayWithElements(builder.GetElements());
}
MaybeHandle<JSReceiver> Isolate::CaptureAndSetDetailedStackTrace(
@@ -667,15 +672,15 @@ class CaptureStackTraceHelper {
const FrameSummary::JavaScriptFrameSummary& summ) {
int code_offset;
Handle<ByteArray> source_position_table;
- Object* maybe_cache;
+ Handle<Object> maybe_cache;
Handle<UnseededNumberDictionary> cache;
if (!FLAG_optimize_for_size) {
code_offset = summ.code_offset();
source_position_table =
handle(summ.abstract_code()->source_position_table(), isolate_);
- maybe_cache = summ.abstract_code()->stack_frame_cache();
+ maybe_cache = handle(summ.abstract_code()->stack_frame_cache(), isolate_);
if (maybe_cache->IsUnseededNumberDictionary()) {
- cache = handle(UnseededNumberDictionary::cast(maybe_cache));
+ cache = Handle<UnseededNumberDictionary>::cast(maybe_cache);
} else {
cache = UnseededNumberDictionary::New(isolate_, 1);
}
@@ -1060,6 +1065,16 @@ void ReportBootstrappingException(Handle<Object> exception,
#endif
}
+bool Isolate::is_catchable_by_wasm(Object* exception) {
+ if (!is_catchable_by_javascript(exception) || !exception->IsJSError())
+ return false;
+ HandleScope scope(this);
+ Handle<Object> exception_handle(exception, this);
+ return JSReceiver::HasProperty(Handle<JSReceiver>::cast(exception_handle),
+ factory()->InternalizeUtf8String(
+ wasm::WasmException::kRuntimeIdStr))
+ .IsJust();
+}
Object* Isolate::Throw(Object* exception, MessageLocation* location) {
DCHECK(!has_pending_exception());
@@ -1244,6 +1259,7 @@ Object* Isolate::UnwindAndFindHandler() {
// again.
trap_handler::SetThreadInWasm();
+ set_wasm_caught_exception(exception);
return FoundHandler(nullptr, frame->LookupCode(), offset, return_sp,
frame->fp());
}
@@ -1306,7 +1322,8 @@ Object* Isolate::UnwindAndFindHandler() {
// For interpreted frame we perform a range lookup in the handler table.
if (!catchable_by_js) break;
InterpretedFrame* js_frame = static_cast<InterpretedFrame*>(frame);
- int register_slots = js_frame->GetBytecodeArray()->register_count();
+ int register_slots = InterpreterFrameConstants::RegisterStackSlotCount(
+ js_frame->GetBytecodeArray()->register_count());
int context_reg = 0; // Will contain register index holding context.
int offset =
js_frame->LookupExceptionHandlerInTable(&context_reg, nullptr);
@@ -1333,9 +1350,8 @@ Object* Isolate::UnwindAndFindHandler() {
return FoundHandler(context, code, 0, return_sp, frame->fp());
}
- case StackFrame::JAVA_SCRIPT:
case StackFrame::BUILTIN:
- // For JavaScript frames we are guaranteed not to find a handler.
+ // For builtin frames we are guaranteed not to find a handler.
if (catchable_by_js) {
CHECK_EQ(-1,
JavaScriptFrame::cast(frame)->LookupExceptionHandlerInTable(
@@ -1447,7 +1463,6 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
} break;
// For JavaScript frames we perform a lookup in the handler table.
- case StackFrame::JAVA_SCRIPT:
case StackFrame::OPTIMIZED:
case StackFrame::INTERPRETED:
case StackFrame::BUILTIN: {
@@ -1623,21 +1638,14 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
int func_index = elements->WasmFunctionIndex(i)->value();
int code_offset = elements->Offset(i)->value();
// TODO(wasm): Clean this up (bug 5007).
- int pos = code_offset < 0
- ? (-1 - code_offset)
- : elements->Code(i)->SourcePosition(code_offset);
- if (elements->IsAsmJsWasmFrame(i)) {
- // For asm.js frames, make an additional translation step to get the
- // asm.js source position.
- bool at_to_number_conversion =
- elements->Flags(i)->value() & FrameArray::kAsmJsAtNumberConversion;
- pos = WasmCompiledModule::GetAsmJsSourcePosition(
- compiled_module, func_index, pos, at_to_number_conversion);
- } else {
- // For pure wasm, make the function-local position module-relative by
- // adding the function offset.
- pos += compiled_module->GetFunctionOffset(func_index);
- }
+ int byte_offset = code_offset < 0
+ ? (-1 - code_offset)
+ : elements->Code(i)->SourcePosition(code_offset);
+ bool is_at_number_conversion =
+ elements->IsAsmJsWasmFrame(i) &&
+ elements->Flags(i)->value() & FrameArray::kAsmJsAtNumberConversion;
+ int pos = WasmCompiledModule::GetSourcePosition(
+ compiled_module, func_index, byte_offset, is_at_number_conversion);
Handle<Script> script(compiled_module->script());
*target = MessageLocation(script, pos, pos + 1);
@@ -2032,10 +2040,24 @@ void Isolate::SetAbortOnUncaughtExceptionCallback(
abort_on_uncaught_exception_callback_ = callback;
}
+namespace {
+void AdvanceWhileDebugContext(JavaScriptFrameIterator& it, Debug* debug) {
+ if (!debug->in_debug_scope()) return;
+
+ while (!it.done()) {
+ Context* context = Context::cast(it.frame()->context());
+ if (context->native_context() == *debug->debug_context()) {
+ it.Advance();
+ } else {
+ break;
+ }
+ }
+}
+} // namespace
Handle<Context> Isolate::GetCallingNativeContext() {
JavaScriptFrameIterator it(this);
- it.AdvanceWhileDebugContext(debug_);
+ AdvanceWhileDebugContext(it, debug_);
if (it.done()) return Handle<Context>::null();
JavaScriptFrame* frame = it.frame();
Context* context = Context::cast(frame->context());
@@ -2044,7 +2066,7 @@ Handle<Context> Isolate::GetCallingNativeContext() {
Handle<Context> Isolate::GetIncumbentContext() {
JavaScriptFrameIterator it(this);
- it.AdvanceWhileDebugContext(debug_);
+ AdvanceWhileDebugContext(it, debug_);
// 1st candidate: most-recently-entered author function's context
// if it's newer than the last Context::BackupIncumbentScope entry.
@@ -2670,7 +2692,7 @@ void PrintBuiltinSizes(Isolate* isolate) {
for (int i = 0; i < Builtins::builtin_count; i++) {
const char* name = builtins->name(i);
const char* kind = Builtins::KindNameOf(i);
- Code* code = builtins->builtin(static_cast<Builtins::Name>(i));
+ Code* code = builtins->builtin(i);
PrintF(stdout, "%s Builtin, %s, %d\n", kind, name,
code->instruction_size());
}
@@ -2713,8 +2735,8 @@ bool Isolate::Init(StartupDeserializer* des) {
eternal_handles_ = new EternalHandles();
bootstrapper_ = new Bootstrapper(this);
handle_scope_implementer_ = new HandleScopeImplementer(this);
- load_stub_cache_ = new StubCache(this, Code::LOAD_IC);
- store_stub_cache_ = new StubCache(this, Code::STORE_IC);
+ load_stub_cache_ = new StubCache(this);
+ store_stub_cache_ = new StubCache(this);
materialized_object_store_ = new MaterializedObjectStore(this);
regexp_stack_ = new RegExpStack();
regexp_stack_->isolate_ = this;
@@ -2763,7 +2785,11 @@ bool Isolate::Init(StartupDeserializer* des) {
deoptimizer_data_ = new DeoptimizerData(heap()->memory_allocator());
const bool create_heap_objects = (des == NULL);
- if (create_heap_objects && !heap_.CreateHeapObjects()) {
+ if (setup_delegate_ == nullptr) {
+ setup_delegate_ = new SetupIsolateDelegate(create_heap_objects);
+ }
+
+ if (!setup_delegate_->SetupHeap(&heap_)) {
V8::FatalProcessOutOfMemory("heap object creation");
return false;
}
@@ -2776,10 +2802,7 @@ bool Isolate::Init(StartupDeserializer* des) {
InitializeThreadLocal();
bootstrapper_->Initialize(create_heap_objects);
- if (setup_delegate_ == nullptr) {
- setup_delegate_ = new SetupIsolateDelegate();
- }
- setup_delegate_->SetupBuiltins(this, create_heap_objects);
+ setup_delegate_->SetupBuiltins(this);
if (create_heap_objects) heap_.CreateFixedStubs();
if (FLAG_log_internal_timer_events) {
@@ -2803,7 +2826,7 @@ bool Isolate::Init(StartupDeserializer* des) {
if (!create_heap_objects) des->DeserializeInto(this);
load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
- setup_delegate_->SetupInterpreter(interpreter_, create_heap_objects);
+ setup_delegate_->SetupInterpreter(interpreter_);
heap_.NotifyDeserializationComplete();
}
@@ -3032,16 +3055,17 @@ bool Isolate::IsInAnyContext(Object* object, uint32_t index) {
return false;
}
-bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
+bool Isolate::IsFastArrayConstructorPrototypeChainIntact(Context* context) {
PropertyCell* no_elements_cell = heap()->array_protector();
bool cell_reports_intact =
no_elements_cell->value()->IsSmi() &&
Smi::ToInt(no_elements_cell->value()) == kProtectorValid;
#ifdef DEBUG
+ Context* native_context = context->native_context();
+
Map* root_array_map =
- raw_native_context()->GetInitialJSArrayMap(GetInitialFastElementsKind());
- Context* native_context = context()->native_context();
+ native_context->GetInitialJSArrayMap(GetInitialFastElementsKind());
JSObject* initial_array_proto = JSObject::cast(
native_context->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
JSObject* initial_object_proto = JSObject::cast(
@@ -3070,8 +3094,11 @@ bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
PrototypeIterator iter(this, initial_array_proto);
if (iter.IsAtEnd() || iter.GetCurrent() != initial_object_proto) {
DCHECK_EQ(false, cell_reports_intact);
+ DCHECK(!has_pending_exception());
return cell_reports_intact;
}
+ DCHECK(!has_pending_exception());
+ DCHECK(!has_pending_exception());
elements = initial_object_proto->elements();
if (elements != heap()->empty_fixed_array() &&
@@ -3085,12 +3112,15 @@ bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
DCHECK_EQ(false, cell_reports_intact);
return cell_reports_intact;
}
-
#endif
return cell_reports_intact;
}
+bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
+ return Isolate::IsFastArrayConstructorPrototypeChainIntact(context());
+}
+
bool Isolate::IsIsConcatSpreadableLookupChainIntact() {
Cell* is_concat_spreadable_cell = heap()->is_concat_spreadable_protector();
bool is_is_concat_spreadable_set =
@@ -3141,6 +3171,14 @@ void Isolate::InvalidateIsConcatSpreadableProtector() {
DCHECK(!IsIsConcatSpreadableLookupChainIntact());
}
+void Isolate::InvalidateArrayConstructorProtector() {
+ DCHECK(factory()->array_constructor_protector()->value()->IsSmi());
+ DCHECK(IsArrayConstructorIntact());
+ factory()->array_constructor_protector()->set_value(
+ Smi::FromInt(kProtectorInvalid));
+ DCHECK(!IsArrayConstructorIntact());
+}
+
void Isolate::InvalidateArraySpeciesProtector() {
DCHECK(factory()->species_protector()->value()->IsSmi());
DCHECK(IsArraySpeciesLookupChainIntact());
@@ -3254,39 +3292,49 @@ Handle<Symbol> Isolate::SymbolFor(Heap::RootListIndex dictionary_index,
}
void Isolate::AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback) {
- for (int i = 0; i < before_call_entered_callbacks_.length(); i++) {
- if (callback == before_call_entered_callbacks_.at(i)) return;
- }
- before_call_entered_callbacks_.Add(callback);
+ auto pos = std::find(before_call_entered_callbacks_.begin(),
+ before_call_entered_callbacks_.end(), callback);
+ if (pos != before_call_entered_callbacks_.end()) return;
+ before_call_entered_callbacks_.push_back(callback);
}
-
void Isolate::RemoveBeforeCallEnteredCallback(
BeforeCallEnteredCallback callback) {
- for (int i = 0; i < before_call_entered_callbacks_.length(); i++) {
- if (callback == before_call_entered_callbacks_.at(i)) {
- before_call_entered_callbacks_.Remove(i);
- }
- }
+ auto pos = std::find(before_call_entered_callbacks_.begin(),
+ before_call_entered_callbacks_.end(), callback);
+ if (pos == before_call_entered_callbacks_.end()) return;
+ before_call_entered_callbacks_.erase(pos);
}
-
void Isolate::AddCallCompletedCallback(CallCompletedCallback callback) {
- for (int i = 0; i < call_completed_callbacks_.length(); i++) {
- if (callback == call_completed_callbacks_.at(i)) return;
- }
- call_completed_callbacks_.Add(callback);
+ auto pos = std::find(call_completed_callbacks_.begin(),
+ call_completed_callbacks_.end(), callback);
+ if (pos != call_completed_callbacks_.end()) return;
+ call_completed_callbacks_.push_back(callback);
}
-
void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) {
- for (int i = 0; i < call_completed_callbacks_.length(); i++) {
- if (callback == call_completed_callbacks_.at(i)) {
- call_completed_callbacks_.Remove(i);
- }
- }
+ auto pos = std::find(call_completed_callbacks_.begin(),
+ call_completed_callbacks_.end(), callback);
+ if (pos == call_completed_callbacks_.end()) return;
+ call_completed_callbacks_.erase(pos);
}
+void Isolate::AddMicrotasksCompletedCallback(
+ MicrotasksCompletedCallback callback) {
+ auto pos = std::find(microtasks_completed_callbacks_.begin(),
+ microtasks_completed_callbacks_.end(), callback);
+ if (pos != microtasks_completed_callbacks_.end()) return;
+ microtasks_completed_callbacks_.push_back(callback);
+}
+
+void Isolate::RemoveMicrotasksCompletedCallback(
+ MicrotasksCompletedCallback callback) {
+ auto pos = std::find(microtasks_completed_callbacks_.begin(),
+ microtasks_completed_callbacks_.end(), callback);
+ if (pos == microtasks_completed_callbacks_.end()) return;
+ microtasks_completed_callbacks_.erase(pos);
+}
void Isolate::FireCallCompletedCallback() {
if (!handle_scope_implementer()->CallDepthIsZero()) return;
@@ -3299,12 +3347,12 @@ void Isolate::FireCallCompletedCallback() {
if (run_microtasks) RunMicrotasks();
- if (call_completed_callbacks_.is_empty()) return;
+ if (call_completed_callbacks_.empty()) return;
// Fire callbacks. Increase call depth to prevent recursive callbacks.
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this);
v8::Isolate::SuppressMicrotaskExecutionScope suppress(isolate);
- for (int i = 0; i < call_completed_callbacks_.length(); i++) {
- call_completed_callbacks_.at(i)(isolate);
+ for (auto& callback : call_completed_callbacks_) {
+ callback(reinterpret_cast<v8::Isolate*>(this));
}
}
@@ -3384,10 +3432,10 @@ void Isolate::SetPromiseRejectCallback(PromiseRejectCallback callback) {
promise_reject_callback_ = callback;
}
-
-void Isolate::ReportPromiseReject(Handle<JSObject> promise,
+void Isolate::ReportPromiseReject(Handle<JSPromise> promise,
Handle<Object> value,
v8::PromiseRejectEvent event) {
+ DCHECK_EQ(v8::Promise::kRejected, promise->status());
if (promise_reject_callback_ == NULL) return;
Handle<FixedArray> stack_trace;
if (event == v8::kPromiseRejectWithNoHandler && value->IsJSObject()) {
@@ -3564,33 +3612,6 @@ void Isolate::RunMicrotasksInternal() {
}
}
-
-void Isolate::AddMicrotasksCompletedCallback(
- MicrotasksCompletedCallback callback) {
- for (int i = 0; i < microtasks_completed_callbacks_.length(); i++) {
- if (callback == microtasks_completed_callbacks_.at(i)) return;
- }
- microtasks_completed_callbacks_.Add(callback);
-}
-
-
-void Isolate::RemoveMicrotasksCompletedCallback(
- MicrotasksCompletedCallback callback) {
- for (int i = 0; i < microtasks_completed_callbacks_.length(); i++) {
- if (callback == microtasks_completed_callbacks_.at(i)) {
- microtasks_completed_callbacks_.Remove(i);
- }
- }
-}
-
-
-void Isolate::FireMicrotasksCompletedCallback() {
- for (int i = 0; i < microtasks_completed_callbacks_.length(); i++) {
- microtasks_completed_callbacks_.at(i)(reinterpret_cast<v8::Isolate*>(this));
- }
-}
-
-
void Isolate::SetUseCounterCallback(v8::Isolate::UseCounterCallback callback) {
DCHECK(!use_counter_callback_);
use_counter_callback_ = callback;
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index aeb6685f61..44a5250808 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -8,6 +8,7 @@
#include <cstddef>
#include <memory>
#include <queue>
+#include <vector>
#include "include/v8-debug.h"
#include "src/allocation.h"
@@ -26,8 +27,6 @@
#include "src/runtime/runtime.h"
#include "src/zone/zone.h"
-class TestIsolate;
-
namespace v8 {
namespace base {
@@ -449,6 +448,7 @@ typedef std::vector<HeapObject*> DebugObjectCache;
V(bool, needs_side_effect_check, false) \
/* Current code coverage mode */ \
V(debug::Coverage::Mode, code_coverage_mode, debug::Coverage::kBestEffort) \
+ V(debug::TypeProfile::Mode, type_profile_mode, debug::TypeProfile::kNone) \
V(int, last_stack_frame_info_id, 0) \
V(int, last_console_context_id, 0) \
ISOLATE_INIT_SIMULATOR_LIST(V)
@@ -608,8 +608,8 @@ class Isolate {
inline void clear_pending_exception();
// Interface to wasm caught exception.
- inline Object* get_wasm_caught_exception() const;
- inline void set_wasm_caught_exception(Object* exception_obj);
+ inline Object* get_wasm_caught_exception();
+ inline void set_wasm_caught_exception(Object* exception);
inline void clear_wasm_caught_exception();
THREAD_LOCAL_TOP_ADDRESS(Object*, pending_exception)
@@ -646,7 +646,7 @@ class Isolate {
bool IsExternalHandlerOnTop(Object* exception);
inline bool is_catchable_by_javascript(Object* exception);
- inline bool is_catchable_by_wasm(Object* exception);
+ bool is_catchable_by_wasm(Object* exception);
// JS execution stack (see frames.h).
static Address c_entry_fp(ThreadLocalTop* thread) {
@@ -1005,6 +1005,9 @@ class Isolate {
}
bool serializer_enabled() const { return serializer_enabled_; }
+ void set_serializer_enabled_for_test(bool serializer_enabled) {
+ serializer_enabled_ = serializer_enabled;
+ }
bool snapshot_available() const {
return snapshot_blob_ != NULL && snapshot_blob_->raw_size != 0;
}
@@ -1042,6 +1045,10 @@ class Isolate {
return is_block_count_code_coverage() || is_block_binary_code_coverage();
}
+ bool is_collecting_type_profile() const {
+ return type_profile_mode() == debug::TypeProfile::kCollect;
+ }
+
void SetCodeCoverageList(Object* value);
double time_millis_since_init() {
@@ -1062,7 +1069,14 @@ class Isolate {
static const int kProtectorValid = 1;
static const int kProtectorInvalid = 0;
+ inline bool IsArrayConstructorIntact();
+
+ // The version with an explicit context parameter can be used when
+ // Isolate::context is not set up, e.g. when calling directly into C++ from
+ // CSA.
+ bool IsFastArrayConstructorPrototypeChainIntact(Context* context);
bool IsFastArrayConstructorPrototypeChainIntact();
+
inline bool IsArraySpeciesLookupChainIntact();
bool IsIsConcatSpreadableLookupChainIntact();
bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
@@ -1089,6 +1103,7 @@ class Isolate {
void UpdateArrayProtectorOnNormalizeElements(Handle<JSObject> object) {
UpdateArrayProtectorOnSetElement(object);
}
+ void InvalidateArrayConstructorProtector();
void InvalidateArraySpeciesProtector();
void InvalidateIsConcatSpreadableProtector();
void InvalidateStringLengthOverflowProtector();
@@ -1163,10 +1178,10 @@ class Isolate {
void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
- void FireMicrotasksCompletedCallback();
+ inline void FireMicrotasksCompletedCallback();
void SetPromiseRejectCallback(PromiseRejectCallback callback);
- void ReportPromiseReject(Handle<JSObject> promise, Handle<Object> value,
+ void ReportPromiseReject(Handle<JSPromise> promise, Handle<Object> value,
v8::PromiseRejectEvent event);
void PromiseReactionJob(Handle<PromiseReactionJobInfo> info,
@@ -1567,14 +1582,14 @@ class Isolate {
int next_unique_sfi_id_;
#endif
- // List of callbacks before a Call starts execution.
- List<BeforeCallEnteredCallback> before_call_entered_callbacks_;
+ // Vector of callbacks before a Call starts execution.
+ std::vector<BeforeCallEnteredCallback> before_call_entered_callbacks_;
- // List of callbacks when a Call completes.
- List<CallCompletedCallback> call_completed_callbacks_;
+ // Vector of callbacks when a Call completes.
+ std::vector<CallCompletedCallback> call_completed_callbacks_;
- // List of callbacks after microtasks were run.
- List<MicrotasksCompletedCallback> microtasks_completed_callbacks_;
+ // Vector of callbacks after microtasks were run.
+ std::vector<MicrotasksCompletedCallback> microtasks_completed_callbacks_;
bool is_running_microtasks_;
v8::Isolate::UseCounterCallback use_counter_callback_;
@@ -1620,12 +1635,12 @@ class Isolate {
friend class ThreadManager;
friend class Simulator;
friend class StackGuard;
+ friend class TestIsolate;
friend class ThreadId;
friend class v8::Isolate;
friend class v8::Locker;
friend class v8::Unlocker;
friend class v8::SnapshotCreator;
- friend class ::TestIsolate;
friend v8::StartupData v8::V8::CreateSnapshotDataBlob(const char*);
friend v8::StartupData v8::V8::WarmUpSnapshotDataBlob(v8::StartupData,
const char*);
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index 3fdc8b98ee..7a009030c4 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -15,8 +15,8 @@ var GetIterator;
var GetMethod;
var GlobalArray = global.Array;
var InternalArray = utils.InternalArray;
-var MaxSimple;
-var MinSimple;
+var MathMax = global.Math.max;
+var MathMin = global.Math.min;
var ObjectHasOwnProperty = global.Object.prototype.hasOwnProperty;
var ObjectToString = global.Object.prototype.toString;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
@@ -25,8 +25,6 @@ var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
utils.Import(function(from) {
GetIterator = from.GetIterator;
GetMethod = from.GetMethod;
- MaxSimple = from.MaxSimple;
- MinSimple = from.MinSimple;
});
// -------------------------------------------------------------------
@@ -226,7 +224,7 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
// Move data to new array.
var new_array = new InternalArray(
// Clamp array length to 2^32-1 to avoid early RangeError.
- MinSimple(len - del_count + num_additional_args, 0xffffffff));
+ MathMin(len - del_count + num_additional_args, 0xffffffff));
var big_indices;
var indices = %GetArrayKeys(array, len);
if (IS_NUMBER(indices)) {
@@ -616,7 +614,7 @@ function ArraySliceFallback(start, end) {
if (end_i > len) end_i = len;
}
- var result = ArraySpeciesCreate(array, MaxSimple(end_i - start_i, 0));
+ var result = ArraySpeciesCreate(array, MathMax(end_i - start_i, 0));
if (end_i < start_i) return result;
@@ -1007,6 +1005,10 @@ DEFINE_METHOD(
sort(comparefn) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.sort");
+ if (!IS_UNDEFINED(comparefn) && !IS_CALLABLE(comparefn)) {
+ throw %make_type_error(kBadSortComparisonFunction, comparefn);
+ }
+
var array = TO_OBJECT(this);
var length = TO_LENGTH(array.length);
return InnerArraySort(array, length, comparefn);
@@ -1085,28 +1087,28 @@ DEFINE_METHOD_LEN(
target = TO_INTEGER(target);
var to;
if (target < 0) {
- to = MaxSimple(length + target, 0);
+ to = MathMax(length + target, 0);
} else {
- to = MinSimple(target, length);
+ to = MathMin(target, length);
}
start = TO_INTEGER(start);
var from;
if (start < 0) {
- from = MaxSimple(length + start, 0);
+ from = MathMax(length + start, 0);
} else {
- from = MinSimple(start, length);
+ from = MathMin(start, length);
}
end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
var final;
if (end < 0) {
- final = MaxSimple(length + end, 0);
+ final = MathMax(length + end, 0);
} else {
- final = MinSimple(end, length);
+ final = MathMin(end, length);
}
- var count = MinSimple(final - from, length - to);
+ var count = MathMin(final - from, length - to);
var direction = 1;
if (from < to && to < (from + count)) {
direction = -1;
diff --git a/deps/v8/src/js/intl.js b/deps/v8/src/js/intl.js
index 5a423450d8..bc702dada1 100644
--- a/deps/v8/src/js/intl.js
+++ b/deps/v8/src/js/intl.js
@@ -31,7 +31,7 @@ var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
var IntlFallbackSymbol = utils.ImportNow("intl_fallback_symbol");
var InternalArray = utils.InternalArray;
-var MaxSimple;
+var MathMax = global.Math.max;
var ObjectHasOwnProperty = global.Object.prototype.hasOwnProperty;
var ObjectKeys = global.Object.keys;
var patternSymbol = utils.ImportNow("intl_pattern_symbol");
@@ -42,7 +42,6 @@ var StringSubstring = GlobalString.prototype.substring;
utils.Import(function(from) {
ArrayJoin = from.ArrayJoin;
ArrayPush = from.ArrayPush;
- MaxSimple = from.MaxSimple;
});
// Utilities for definitions
@@ -1221,7 +1220,7 @@ function SetNumberFormatDigitOptions(internalOptions, options,
mnfdDefault);
defineWEProperty(internalOptions, 'minimumFractionDigits', mnfd);
- var mxfdActualDefault = MaxSimple(mnfd, mxfdDefault);
+ var mxfdActualDefault = MathMax(mnfd, mxfdDefault);
var mxfd = getNumberOption(options, 'maximumFractionDigits', mnfd, 20,
mxfdActualDefault);
diff --git a/deps/v8/src/js/max-min.js b/deps/v8/src/js/max-min.js
deleted file mode 100644
index e451c09d1d..0000000000
--- a/deps/v8/src/js/max-min.js
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-function MaxSimple(a, b) {
- return a > b ? a : b;
-}
-
-function MinSimple(a, b) {
- return a > b ? b : a;
-}
-
-%SetForceInlineFlag(MaxSimple);
-%SetForceInlineFlag(MinSimple);
-
-// ----------------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
- to.MaxSimple = MaxSimple;
- to.MinSimple = MinSimple;
-});
-
-})
diff --git a/deps/v8/src/js/string.js b/deps/v8/src/js/string.js
index f718a64946..b066f3b3d6 100644
--- a/deps/v8/src/js/string.js
+++ b/deps/v8/src/js/string.js
@@ -17,11 +17,6 @@ var searchSymbol = utils.ImportNow("search_symbol");
//-------------------------------------------------------------------
-// ES#sec-createhtml
-function HtmlEscape(str) {
- return %RegExpInternalReplace(/"/g, TO_STRING(str), "&quot;");
-}
-
// Set up the non-enumerable functions on the String prototype object.
DEFINE_METHODS(
GlobalString.prototype,
@@ -61,112 +56,6 @@ DEFINE_METHODS(
var regexp = %RegExpCreate(pattern);
return %_Call(regexp[searchSymbol], regexp, subject);
}
-
- /* ES#sec-string.prototype.anchor */
- anchor(name) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.anchor");
- return "<a name=\"" + HtmlEscape(name) + "\">" + TO_STRING(this) +
- "</a>";
- }
-
- /* ES#sec-string.prototype.big */
- big() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.big");
- return "<big>" + TO_STRING(this) + "</big>";
- }
-
- /* ES#sec-string.prototype.blink */
- blink() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.blink");
- return "<blink>" + TO_STRING(this) + "</blink>";
- }
-
- /* ES#sec-string.prototype.bold */
- bold() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.bold");
- return "<b>" + TO_STRING(this) + "</b>";
- }
-
- /* ES#sec-string.prototype.fixed */
- fixed() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.fixed");
- return "<tt>" + TO_STRING(this) + "</tt>";
- }
-
- /* ES#sec-string.prototype.fontcolor */
- fontcolor(color) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.fontcolor");
- return "<font color=\"" + HtmlEscape(color) + "\">" + TO_STRING(this) +
- "</font>";
- }
-
- /* ES#sec-string.prototype.fontsize */
- fontsize(size) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.fontsize");
- return "<font size=\"" + HtmlEscape(size) + "\">" + TO_STRING(this) +
- "</font>";
- }
-
- /* ES#sec-string.prototype.italics */
- italics() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.italics");
- return "<i>" + TO_STRING(this) + "</i>";
- }
-
- /* ES#sec-string.prototype.link */
- link(s) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.link");
- return "<a href=\"" + HtmlEscape(s) + "\">" + TO_STRING(this) + "</a>";
- }
-
- /* ES#sec-string.prototype.small */
- small() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.small");
- return "<small>" + TO_STRING(this) + "</small>";
- }
-
- /* ES#sec-string.prototype.strike */
- strike() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.strike");
- return "<strike>" + TO_STRING(this) + "</strike>";
- }
-
- /* ES#sec-string.prototype.sub */
- sub() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.sub");
- return "<sub>" + TO_STRING(this) + "</sub>";
- }
-
- /* ES#sec-string.prototype.sup */
- sup() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.sup");
- return "<sup>" + TO_STRING(this) + "</sup>";
- }
-
- /* ES#sec-string.prototype.repeat */
- repeat(count) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.repeat");
-
- var s = TO_STRING(this);
- var n = TO_INTEGER(count);
-
- if (n < 0 || n === INFINITY) throw %make_range_error(kInvalidCountValue);
-
- // Early return to allow an arbitrarily-large repeat of the empty string.
- if (s.length === 0) return "";
-
- // The maximum string length is stored in a smi, so a longer repeat
- // must result in a range error.
- if (n > %_StringMaxLength()) %ThrowInvalidStringLength();
-
- var r = "";
- while (true) {
- if (n & 1) r += s;
- n >>= 1;
- if (n === 0) return r;
- s += s;
- }
- }
}
);
diff --git a/deps/v8/src/js/templates.js b/deps/v8/src/js/templates.js
deleted file mode 100644
index 7236d5c130..0000000000
--- a/deps/v8/src/js/templates.js
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Called from a desugaring in the parser.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalMap = global.Map;
-var InternalArray = utils.InternalArray;
-
-// -------------------------------------------------------------------
-
-var callSiteCache = new GlobalMap;
-var mapGetFn = GlobalMap.prototype.get;
-var mapSetFn = GlobalMap.prototype.set;
-
-
-function SameCallSiteElements(rawStrings, other) {
- var length = rawStrings.length;
- var other = other.raw;
-
- if (length !== other.length) return false;
-
- for (var i = 0; i < length; ++i) {
- if (rawStrings[i] !== other[i]) return false;
- }
-
- return true;
-}
-
-
-function GetCachedCallSite(siteObj, hash) {
- var obj = %_Call(mapGetFn, callSiteCache, hash);
-
- if (IS_UNDEFINED(obj)) return;
-
- var length = obj.length;
- for (var i = 0; i < length; ++i) {
- if (SameCallSiteElements(siteObj, obj[i])) return obj[i];
- }
-}
-
-
-function SetCachedCallSite(siteObj, hash) {
- var obj = %_Call(mapGetFn, callSiteCache, hash);
- var array;
-
- if (IS_UNDEFINED(obj)) {
- array = new InternalArray(1);
- array[0] = siteObj;
- %_Call(mapSetFn, callSiteCache, hash, array);
- } else {
- obj.push(siteObj);
- }
-
- return siteObj;
-}
-
-
-function GetTemplateCallSite(siteObj, rawStrings, hash) {
- var cached = GetCachedCallSite(rawStrings, hash);
-
- if (!IS_UNDEFINED(cached)) return cached;
-
- %AddNamedProperty(siteObj, "raw", %object_freeze(rawStrings),
- READ_ONLY | DONT_ENUM | DONT_DELETE);
-
- return SetCachedCallSite(%object_freeze(siteObj), hash);
-}
-
-// ----------------------------------------------------------------------------
-// Exports
-
-%InstallToContext(["get_template_call_site", GetTemplateCallSite]);
-
-})
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
index b2ace5dd44..e361f2f58d 100644
--- a/deps/v8/src/js/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -25,8 +25,8 @@ var InnerArrayJoin;
var InnerArraySort;
var InnerArrayToLocaleString;
var InternalArray = utils.InternalArray;
-var MaxSimple;
-var MinSimple;
+var MathMax = global.Math.max;
+var MathMin = global.Math.min;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
var speciesSymbol = utils.ImportNow("species_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
@@ -59,8 +59,6 @@ utils.Import(function(from) {
InnerArrayJoin = from.InnerArrayJoin;
InnerArraySort = from.InnerArraySort;
InnerArrayToLocaleString = from.InnerArrayToLocaleString;
- MaxSimple = from.MaxSimple;
- MinSimple = from.MinSimple;
});
// ES2015 7.3.20
@@ -208,15 +206,15 @@ function NAMESubArray(begin, end) {
}
if (beginInt < 0) {
- beginInt = MaxSimple(0, srcLength + beginInt);
+ beginInt = MathMax(0, srcLength + beginInt);
} else {
- beginInt = MinSimple(beginInt, srcLength);
+ beginInt = MathMin(beginInt, srcLength);
}
if (endInt < 0) {
- endInt = MaxSimple(0, srcLength + endInt);
+ endInt = MathMax(0, srcLength + endInt);
} else {
- endInt = MinSimple(endInt, srcLength);
+ endInt = MathMin(endInt, srcLength);
}
if (endInt < beginInt) {
@@ -247,68 +245,8 @@ TYPED_ARRAYS(TYPED_ARRAY_SUBARRAY_CASE)
"get %TypedArray%.prototype.subarray", this);
}
);
-%SetForceInlineFlag(GlobalTypedArray.prototype.subarray);
-DEFINE_METHOD_LEN(
- GlobalTypedArray.prototype,
- set(obj, offset) {
- var intOffset = IS_UNDEFINED(offset) ? 0 : TO_INTEGER(offset);
- if (intOffset < 0) throw %make_range_error(kTypedArraySetNegativeOffset);
-
- if (intOffset > %_MaxSmi()) {
- throw %make_range_error(kTypedArraySetSourceTooLarge);
- }
-
- switch (%TypedArraySetFastCases(this, obj, intOffset)) {
- // These numbers should be synchronized with runtime-typedarray.cc.
- case 0: // TYPED_ARRAY_SET_TYPED_ARRAY_SAME_TYPE
- return;
- case 1: // TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING
- %_TypedArraySetFromOverlapping(this, obj, intOffset);
- return;
- case 2: // TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING
- if (intOffset === 0) {
- %TypedArrayCopyElements(this, obj, %_TypedArrayGetLength(obj));
- } else {
- %_TypedArraySetFromArrayLike(
- this, obj, %_TypedArrayGetLength(obj), intOffset);
- }
- return;
- case 3: // TYPED_ARRAY_SET_NON_TYPED_ARRAY
- var l = obj.length;
- if (IS_UNDEFINED(l)) {
- if (IS_NUMBER(obj)) {
- // For number as a first argument, throw TypeError
- // instead of silently ignoring the call, so that
- // users know they did something wrong.
- // (Consistent with Firefox and Blink/WebKit)
- throw %make_type_error(kInvalidArgument);
- }
- return;
- }
- l = TO_LENGTH(l);
- if (intOffset + l > %_TypedArrayGetLength(this)) {
- throw %make_range_error(kTypedArraySetSourceTooLarge);
- }
- %_TypedArraySetFromArrayLike(this, obj, l, intOffset);
- return;
- }
- },
- 1 /* Set function length. */
-);
-
-
-DEFINE_METHOD(
- GlobalTypedArray.prototype,
- get [toStringTagSymbol]() {
- if (!IS_TYPEDARRAY(this)) return;
- var name = %_ClassOf(this);
- if (IS_UNDEFINED(name)) return;
- return name;
- }
-);
-
// The following functions cannot be made efficient on sparse arrays while
// preserving the semantics, since the calls to the receiver function can add
// or delete elements from the array.
@@ -382,6 +320,10 @@ DEFINE_METHOD(
sort(comparefn) {
ValidateTypedArray(this, "%TypedArray%.prototype.sort");
+ if (!IS_UNDEFINED(comparefn) && !IS_CALLABLE(comparefn)) {
+ throw %make_type_error(kBadSortComparisonFunction, comparefn);
+ }
+
var length = %_TypedArrayGetLength(this);
if (IS_UNDEFINED(comparefn)) {
diff --git a/deps/v8/src/js/v8natives.js b/deps/v8/src/js/v8natives.js
index 5de98a0a09..24eb6dc1bf 100644
--- a/deps/v8/src/js/v8natives.js
+++ b/deps/v8/src/js/v8natives.js
@@ -35,23 +35,6 @@ function GetMethod(obj, p) {
throw %make_type_error(kCalledNonCallable, typeof func);
}
-// ES6 19.1.1.1
-function ObjectConstructor(x) {
- if (GlobalObject != new.target && !IS_UNDEFINED(new.target)) {
- return this;
- }
- if (IS_NULL(x) || IS_UNDEFINED(x)) return {};
- return TO_OBJECT(x);
-}
-
-
-// ----------------------------------------------------------------------------
-// Object
-
-%SetNativeFlag(GlobalObject);
-%SetCode(GlobalObject, ObjectConstructor);
-
-
// ----------------------------------------------------------------------------
// Iterator related spec functions.
diff --git a/deps/v8/src/keys.cc b/deps/v8/src/keys.cc
index 534ea864b9..27dd7ff8ca 100644
--- a/deps/v8/src/keys.cc
+++ b/deps/v8/src/keys.cc
@@ -4,7 +4,7 @@
#include "src/keys.h"
-#include "src/api-arguments.h"
+#include "src/api-arguments-inl.h"
#include "src/elements.h"
#include "src/factory.h"
#include "src/identity-map.h"
@@ -259,9 +259,9 @@ void FastKeyAccumulator::Prepare() {
}
namespace {
-static Handle<FixedArray> ReduceFixedArrayTo(Isolate* isolate,
- Handle<FixedArray> array,
- int length) {
+
+Handle<FixedArray> ReduceFixedArrayTo(Isolate* isolate,
+ Handle<FixedArray> array, int length) {
DCHECK_LE(length, array->length());
if (array->length() == length) return array;
return isolate->factory()->CopyFixedArrayUpTo(array, length);
@@ -271,76 +271,77 @@ static Handle<FixedArray> ReduceFixedArrayTo(Isolate* isolate,
// have to make sure to never directly leak the enum cache.
Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
Handle<JSObject> object) {
- Handle<Map> map(object->map());
- bool cache_enum_length = map->OnlyHasSimpleProperties();
-
- Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(map->instance_descriptors(), isolate);
- int own_property_count = map->EnumLength();
- // If the enum length of the given map is set to kInvalidEnumCache, this
- // means that the map itself has never used the present enum cache. The
- // first step to using the cache is to set the enum length of the map by
- // counting the number of own descriptors that are ENUMERABLE_STRINGS.
- if (own_property_count == kInvalidEnumCacheSentinel) {
- own_property_count = map->NumberOfEnumerableProperties();
- } else {
- DCHECK_EQ(own_property_count, map->NumberOfEnumerableProperties());
- }
-
- if (descs->HasEnumCache()) {
- Handle<FixedArray> keys(descs->GetEnumCache(), isolate);
- // In case the number of properties required in the enum are actually
- // present, we can reuse the enum cache. Otherwise, this means that the
- // enum cache was generated for a previous (smaller) version of the
- // Descriptor Array. In that case we regenerate the enum cache.
- if (own_property_count <= keys->length()) {
- isolate->counters()->enum_cache_hits()->Increment();
- if (cache_enum_length) map->SetEnumLength(own_property_count);
- return ReduceFixedArrayTo(isolate, keys, own_property_count);
- }
+ Handle<Map> map(object->map(), isolate);
+ Handle<FixedArray> keys(map->instance_descriptors()->GetEnumCache()->keys(),
+ isolate);
+
+ // Check if the {map} has a valid enum length, which implies that it
+ // must have a valid enum cache as well.
+ int enum_length = map->EnumLength();
+ if (enum_length != kInvalidEnumCacheSentinel) {
+ DCHECK(map->OnlyHasSimpleProperties());
+ DCHECK_LE(enum_length, keys->length());
+ DCHECK_EQ(enum_length, map->NumberOfEnumerableProperties());
+ isolate->counters()->enum_cache_hits()->Increment();
+ return ReduceFixedArrayTo(isolate, keys, enum_length);
}
- if (descs->IsEmpty()) {
+ // Determine the actual number of enumerable properties of the {map}.
+ enum_length = map->NumberOfEnumerableProperties();
+
+ // Check if there's already a shared enum cache on the {map}s
+ // DescriptorArray with sufficient number of entries.
+ if (enum_length <= keys->length()) {
+ if (map->OnlyHasSimpleProperties()) map->SetEnumLength(enum_length);
isolate->counters()->enum_cache_hits()->Increment();
- if (cache_enum_length) map->SetEnumLength(0);
- return isolate->factory()->empty_fixed_array();
+ return ReduceFixedArrayTo(isolate, keys, enum_length);
}
+ Handle<DescriptorArray> descriptors =
+ Handle<DescriptorArray>(map->instance_descriptors(), isolate);
isolate->counters()->enum_cache_misses()->Increment();
+ int nod = map->NumberOfOwnDescriptors();
- Handle<FixedArray> storage =
- isolate->factory()->NewFixedArray(own_property_count);
- Handle<FixedArray> indices =
- isolate->factory()->NewFixedArray(own_property_count);
-
- int size = map->NumberOfOwnDescriptors();
+ // Create the keys array.
int index = 0;
-
- for (int i = 0; i < size; i++) {
- PropertyDetails details = descs->GetDetails(i);
+ bool fields_only = true;
+ keys = isolate->factory()->NewFixedArray(enum_length);
+ for (int i = 0; i < nod; i++) {
+ DisallowHeapAllocation no_gc;
+ PropertyDetails details = descriptors->GetDetails(i);
if (details.IsDontEnum()) continue;
- Object* key = descs->GetKey(i);
+ Object* key = descriptors->GetKey(i);
if (key->IsSymbol()) continue;
- storage->set(index, key);
- if (!indices.is_null()) {
- if (details.location() == kField) {
- DCHECK_EQ(kData, details.kind());
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
- int load_by_field_index = field_index.GetLoadByFieldIndex();
- indices->set(index, Smi::FromInt(load_by_field_index));
- } else {
- indices = Handle<FixedArray>();
- }
- }
+ keys->set(index, key);
+ if (details.location() != kField) fields_only = false;
index++;
}
- DCHECK(index == storage->length());
+ DCHECK_EQ(index, keys->length());
- DescriptorArray::SetEnumCache(descs, isolate, storage, indices);
- if (cache_enum_length) {
- map->SetEnumLength(own_property_count);
+ // Optionally also create the indices array.
+ Handle<FixedArray> indices = isolate->factory()->empty_fixed_array();
+ if (fields_only) {
+ indices = isolate->factory()->NewFixedArray(enum_length);
+ index = 0;
+ for (int i = 0; i < nod; i++) {
+ DisallowHeapAllocation no_gc;
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.IsDontEnum()) continue;
+ Object* key = descriptors->GetKey(i);
+ if (key->IsSymbol()) continue;
+ DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(kField, details.location());
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+ indices->set(index, Smi::FromInt(field_index.GetLoadByFieldIndex()));
+ index++;
+ }
+ DCHECK_EQ(index, indices->length());
}
- return storage;
+
+ DescriptorArray::SetEnumCache(descriptors, isolate, keys, indices);
+ if (map->OnlyHasSimpleProperties()) map->SetEnumLength(enum_length);
+
+ return keys;
}
template <bool fast_properties>
@@ -392,7 +393,7 @@ MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysFast(
return MaybeHandle<FixedArray>();
}
- // From this point on we are certiain to only collect own keys.
+ // From this point on we are certain to only collect own keys.
DCHECK(receiver_->IsJSObject());
Handle<JSObject> object = Handle<JSObject>::cast(receiver_);
@@ -456,34 +457,95 @@ namespace {
enum IndexedOrNamed { kIndexed, kNamed };
+void FilterForEnumerableProperties(Handle<JSReceiver> receiver,
+ Handle<JSObject> object,
+ Handle<InterceptorInfo> interceptor,
+ KeyAccumulator* accumulator,
+ Handle<JSObject> result,
+ IndexedOrNamed type) {
+ DCHECK(result->IsJSArray() || result->HasSloppyArgumentsElements());
+ ElementsAccessor* accessor = result->GetElementsAccessor();
+
+ uint32_t length = accessor->GetCapacity(*result, result->elements());
+ for (uint32_t i = 0; i < length; i++) {
+ if (!accessor->HasEntry(*result, i)) continue;
+
+ // args are invalid after args.Call(), create a new one in every iteration.
+ PropertyCallbackArguments args(accumulator->isolate(), interceptor->data(),
+ *receiver, *object, Object::DONT_THROW);
+
+ Handle<Object> element = accessor->Get(result, i);
+ Handle<Object> attributes;
+ if (type == kIndexed) {
+ uint32_t number;
+ CHECK(element->ToUint32(&number));
+ attributes = args.Call(
+ v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query()),
+ number);
+ } else {
+ CHECK(element->IsName());
+ attributes = args.Call(v8::ToCData<v8::GenericNamedPropertyQueryCallback>(
+ interceptor->query()),
+ Handle<Name>::cast(element));
+ }
+
+ if (!attributes.is_null()) {
+ int32_t value;
+ CHECK(attributes->ToInt32(&value));
+ if ((value & DONT_ENUM) == 0) {
+ accumulator->AddKey(element, DO_NOT_CONVERT);
+ }
+ }
+ }
+}
+
// Returns |true| on success, |nothing| on exception.
-template <class Callback, IndexedOrNamed type>
Maybe<bool> CollectInterceptorKeysInternal(Handle<JSReceiver> receiver,
Handle<JSObject> object,
Handle<InterceptorInfo> interceptor,
- KeyAccumulator* accumulator) {
+ KeyAccumulator* accumulator,
+ IndexedOrNamed type) {
Isolate* isolate = accumulator->isolate();
- PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
- *object, Object::DONT_THROW);
+ PropertyCallbackArguments enum_args(isolate, interceptor->data(), *receiver,
+ *object, Object::DONT_THROW);
+
Handle<JSObject> result;
if (!interceptor->enumerator()->IsUndefined(isolate)) {
- Callback enum_fun = v8::ToCData<Callback>(interceptor->enumerator());
- const char* log_tag = type == kIndexed ? "interceptor-indexed-enum"
- : "interceptor-named-enum";
- LOG(isolate, ApiObjectAccess(log_tag, *object));
- result = args.Call(enum_fun);
+ if (type == kIndexed) {
+ v8::IndexedPropertyEnumeratorCallback enum_fun =
+ v8::ToCData<v8::IndexedPropertyEnumeratorCallback>(
+ interceptor->enumerator());
+ const char* log_tag = "interceptor-indexed-enum";
+ LOG(isolate, ApiObjectAccess(log_tag, *object));
+ result = enum_args.Call(enum_fun);
+ } else {
+ DCHECK(type == kNamed);
+ v8::GenericNamedPropertyEnumeratorCallback enum_fun =
+ v8::ToCData<v8::GenericNamedPropertyEnumeratorCallback>(
+ interceptor->enumerator());
+ const char* log_tag = "interceptor-named-enum";
+ LOG(isolate, ApiObjectAccess(log_tag, *object));
+ result = enum_args.Call(enum_fun);
+ }
}
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
if (result.is_null()) return Just(true);
- accumulator->AddKeys(
- result, type == kIndexed ? CONVERT_TO_ARRAY_INDEX : DO_NOT_CONVERT);
+
+ if ((accumulator->filter() & ONLY_ENUMERABLE) &&
+ !interceptor->query()->IsUndefined(isolate)) {
+ FilterForEnumerableProperties(receiver, object, interceptor, accumulator,
+ result, type);
+ } else {
+ accumulator->AddKeys(
+ result, type == kIndexed ? CONVERT_TO_ARRAY_INDEX : DO_NOT_CONVERT);
+ }
return Just(true);
}
-template <class Callback, IndexedOrNamed type>
Maybe<bool> CollectInterceptorKeys(Handle<JSReceiver> receiver,
Handle<JSObject> object,
- KeyAccumulator* accumulator) {
+ KeyAccumulator* accumulator,
+ IndexedOrNamed type) {
Isolate* isolate = accumulator->isolate();
if (type == kIndexed) {
if (!object->HasIndexedInterceptor()) return Just(true);
@@ -498,8 +560,8 @@ Maybe<bool> CollectInterceptorKeys(Handle<JSReceiver> receiver,
!interceptor->all_can_read()) {
return Just(true);
}
- return CollectInterceptorKeysInternal<Callback, type>(
- receiver, object, interceptor, accumulator);
+ return CollectInterceptorKeysInternal(receiver, object, interceptor,
+ accumulator, type);
}
} // namespace
@@ -511,8 +573,7 @@ Maybe<bool> KeyAccumulator::CollectOwnElementIndices(
ElementsAccessor* accessor = object->GetElementsAccessor();
accessor->CollectElementIndices(object, this);
- return CollectInterceptorKeys<v8::IndexedPropertyEnumeratorCallback,
- kIndexed>(receiver, object, this);
+ return CollectInterceptorKeys(receiver, object, this, kIndexed);
}
namespace {
@@ -629,29 +690,25 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
}
}
// Add the property keys from the interceptor.
- return CollectInterceptorKeys<v8::GenericNamedPropertyEnumeratorCallback,
- kNamed>(receiver, object, this);
+ return CollectInterceptorKeys(receiver, object, this, kNamed);
}
Maybe<bool> KeyAccumulator::CollectAccessCheckInterceptorKeys(
Handle<AccessCheckInfo> access_check_info, Handle<JSReceiver> receiver,
Handle<JSObject> object) {
+ MAYBE_RETURN((CollectInterceptorKeysInternal(
+ receiver, object,
+ handle(InterceptorInfo::cast(
+ access_check_info->indexed_interceptor()),
+ isolate_),
+ this, kIndexed)),
+ Nothing<bool>());
MAYBE_RETURN(
- (CollectInterceptorKeysInternal<v8::IndexedPropertyEnumeratorCallback,
- kIndexed>(
- receiver, object,
- handle(
- InterceptorInfo::cast(access_check_info->indexed_interceptor()),
- isolate_),
- this)),
- Nothing<bool>());
- MAYBE_RETURN(
- (CollectInterceptorKeysInternal<
- v8::GenericNamedPropertyEnumeratorCallback, kNamed>(
+ (CollectInterceptorKeysInternal(
receiver, object,
handle(InterceptorInfo::cast(access_check_info->named_interceptor()),
isolate_),
- this)),
+ this, kNamed)),
Nothing<bool>());
return Just(true);
}
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 6245814c74..fec3ab2365 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -278,6 +278,10 @@ double DefaultPlatform::MonotonicallyIncreasingTime() {
static_cast<double>(base::Time::kMicrosecondsPerSecond);
}
+double DefaultPlatform::CurrentClockTimeMillis() {
+ return base::OS::TimeCurrentMillis();
+}
+
TracingController* DefaultPlatform::GetTracingController() {
return tracing_controller_.get();
}
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index a5fa734218..281ca27e89 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -57,6 +57,7 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) override;
bool IdleTasksEnabled(Isolate* isolate) override;
double MonotonicallyIncreasingTime() override;
+ double CurrentClockTimeMillis() override;
v8::TracingController* GetTracingController() override;
StackTracePrinter GetStackTracePrinter() override;
diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h
deleted file mode 100644
index c29a420169..0000000000
--- a/deps/v8/src/list-inl.h
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_LIST_INL_H_
-#define V8_LIST_INL_H_
-
-#include "src/list.h"
-
-#include "src/base/macros.h"
-#include "src/base/platform/platform.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-
-template<typename T, class P>
-void List<T, P>::Add(const T& element, P alloc) {
- if (length_ < capacity_) {
- data_[length_++] = element;
- } else {
- List<T, P>::ResizeAdd(element, alloc);
- }
-}
-
-
-template<typename T, class P>
-void List<T, P>::AddAll(const List<T, P>& other, P alloc) {
- AddAll(other.ToVector(), alloc);
-}
-
-
-template<typename T, class P>
-void List<T, P>::AddAll(const Vector<T>& other, P alloc) {
- int result_length = length_ + other.length();
- if (capacity_ < result_length) Resize(result_length, alloc);
- if (std::is_fundamental<T>()) {
- memcpy(data_ + length_, other.start(), sizeof(*data_) * other.length());
- } else {
- for (int i = 0; i < other.length(); i++) data_[length_ + i] = other.at(i);
- }
- length_ = result_length;
-}
-
-
-// Use two layers of inlining so that the non-inlined function can
-// use the same implementation as the inlined version.
-template<typename T, class P>
-void List<T, P>::ResizeAdd(const T& element, P alloc) {
- ResizeAddInternal(element, alloc);
-}
-
-
-template<typename T, class P>
-void List<T, P>::ResizeAddInternal(const T& element, P alloc) {
- DCHECK(length_ >= capacity_);
- // Grow the list capacity by 100%, but make sure to let it grow
- // even when the capacity is zero (possible initial case).
- int new_capacity = 1 + 2 * capacity_;
- // Since the element reference could be an element of the list, copy
- // it out of the old backing storage before resizing.
- T temp = element;
- Resize(new_capacity, alloc);
- data_[length_++] = temp;
-}
-
-
-template<typename T, class P>
-void List<T, P>::Resize(int new_capacity, P alloc) {
- DCHECK_LE(length_, new_capacity);
- T* new_data = NewData(new_capacity, alloc);
- MemCopy(new_data, data_, length_ * sizeof(T));
- List<T, P>::DeleteData(data_);
- data_ = new_data;
- capacity_ = new_capacity;
-}
-
-
-template<typename T, class P>
-Vector<T> List<T, P>::AddBlock(T value, int count, P alloc) {
- int start = length_;
- for (int i = 0; i < count; i++) Add(value, alloc);
- return Vector<T>(&data_[start], count);
-}
-
-
-template<typename T, class P>
-void List<T, P>::Set(int index, const T& elm) {
- DCHECK(index >= 0 && index <= length_);
- data_[index] = elm;
-}
-
-
-template<typename T, class P>
-void List<T, P>::InsertAt(int index, const T& elm, P alloc) {
- DCHECK(index >= 0 && index <= length_);
- Add(elm, alloc);
- for (int i = length_ - 1; i > index; --i) {
- data_[i] = data_[i - 1];
- }
- data_[index] = elm;
-}
-
-
-template<typename T, class P>
-T List<T, P>::Remove(int i) {
- T element = at(i);
- length_--;
- while (i < length_) {
- data_[i] = data_[i + 1];
- i++;
- }
- return element;
-}
-
-
-template<typename T, class P>
-bool List<T, P>::RemoveElement(const T& elm) {
- for (int i = 0; i < length_; i++) {
- if (data_[i] == elm) {
- Remove(i);
- return true;
- }
- }
- return false;
-}
-
-template <typename T, class P>
-void List<T, P>::Swap(List<T, P>* list) {
- std::swap(data_, list->data_);
- std::swap(length_, list->length_);
- std::swap(capacity_, list->capacity_);
-}
-
-template<typename T, class P>
-void List<T, P>::Allocate(int length, P allocator) {
- DeleteData(data_);
- Initialize(length, allocator);
- length_ = length;
-}
-
-
-template<typename T, class P>
-void List<T, P>::Clear() {
- DeleteData(data_);
- // We don't call Initialize(0) since that requires passing a Zone,
- // which we don't really need.
- data_ = NULL;
- capacity_ = 0;
- length_ = 0;
-}
-
-
-template<typename T, class P>
-void List<T, P>::Rewind(int pos) {
- DCHECK(0 <= pos && pos <= length_);
- length_ = pos;
-}
-
-
-template<typename T, class P>
-void List<T, P>::Trim(P alloc) {
- if (length_ < capacity_ / 4) {
- Resize(capacity_ / 2, alloc);
- }
-}
-
-
-template<typename T, class P>
-void List<T, P>::Iterate(void (*callback)(T* x)) {
- for (int i = 0; i < length_; i++) callback(&data_[i]);
-}
-
-
-template<typename T, class P>
-template<class Visitor>
-void List<T, P>::Iterate(Visitor* visitor) {
- for (int i = 0; i < length_; i++) visitor->Apply(&data_[i]);
-}
-
-
-template<typename T, class P>
-bool List<T, P>::Contains(const T& elm) const {
- for (int i = 0; i < length_; i++) {
- if (data_[i] == elm)
- return true;
- }
- return false;
-}
-
-
-template<typename T, class P>
-int List<T, P>::CountOccurrences(const T& elm, int start, int end) const {
- int result = 0;
- for (int i = start; i <= end; i++) {
- if (data_[i] == elm) ++result;
- }
- return result;
-}
-
-
-template <typename T, class P>
-template <typename CompareFunction>
-void List<T, P>::Sort(CompareFunction cmp) {
- Sort(cmp, 0, length_);
-}
-
-
-template <typename T, class P>
-template <typename CompareFunction>
-void List<T, P>::Sort(CompareFunction cmp, size_t s, size_t l) {
- ToVector().Sort(cmp, s, l);
-#ifdef DEBUG
- for (size_t i = s + 1; i < l; i++) DCHECK(cmp(&data_[i - 1], &data_[i]) <= 0);
-#endif
-}
-
-
-template<typename T, class P>
-void List<T, P>::Sort() {
- ToVector().Sort();
-}
-
-
-template <typename T, class P>
-template <typename CompareFunction>
-void List<T, P>::StableSort(CompareFunction cmp) {
- StableSort(cmp, 0, length_);
-}
-
-
-template <typename T, class P>
-template <typename CompareFunction>
-void List<T, P>::StableSort(CompareFunction cmp, size_t s, size_t l) {
- ToVector().StableSort(cmp, s, l);
-#ifdef DEBUG
- for (size_t i = s + 1; i < l; i++) DCHECK(cmp(&data_[i - 1], &data_[i]) <= 0);
-#endif
-}
-
-
-template <typename T, class P>
-void List<T, P>::StableSort() {
- ToVector().StableSort();
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_LIST_INL_H_
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
deleted file mode 100644
index 24784cf002..0000000000
--- a/deps/v8/src/list.h
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_LIST_H_
-#define V8_LIST_H_
-
-#include <algorithm>
-
-#include "src/checks.h"
-#include "src/vector.h"
-
-namespace v8 {
-namespace internal {
-
-template<typename T> class Vector;
-
-// ----------------------------------------------------------------------------
-// The list is a template for very light-weight lists. We are not
-// using the STL because we want full control over space and speed of
-// the code. This implementation is based on code by Robert Griesemer
-// and Rob Pike.
-//
-// The list is parameterized by the type of its elements (T) and by an
-// allocation policy (P). The policy is used for allocating lists in
-// the C free store or the zone; see zone.h.
-
-// Forward defined as
-// template <typename T,
-// class AllocationPolicy = FreeStoreAllocationPolicy> class List;
-template <typename T, class AllocationPolicy>
-class List {
- public:
- explicit List(AllocationPolicy allocator = AllocationPolicy()) {
- Initialize(0, allocator);
- }
- INLINE(explicit List(int capacity,
- AllocationPolicy allocator = AllocationPolicy())) {
- Initialize(capacity, allocator);
- }
- INLINE(~List()) { DeleteData(data_); }
-
- // Deallocates memory used by the list and leaves the list in a consistent
- // empty state.
- void Free() {
- DeleteData(data_);
- Initialize(0);
- }
-
- INLINE(void* operator new(size_t size,
- AllocationPolicy allocator = AllocationPolicy())) {
- return allocator.New(static_cast<int>(size));
- }
- INLINE(void operator delete(void* p)) {
- AllocationPolicy::Delete(p);
- }
-
- // Please the MSVC compiler. We should never have to execute this.
- INLINE(void operator delete(void* p, AllocationPolicy allocator)) {
- UNREACHABLE();
- }
-
- // Returns a reference to the element at index i. This reference is
- // not safe to use after operations that can change the list's
- // backing store (e.g. Add).
- inline T& operator[](int i) const {
- DCHECK_LE(0, i);
- DCHECK_GT(static_cast<unsigned>(length_), static_cast<unsigned>(i));
- return data_[i];
- }
- inline T& at(int i) const { return operator[](i); }
- inline T& last() const { return at(length_ - 1); }
- inline T& first() const { return at(0); }
-
- typedef T* iterator;
- inline iterator begin() const { return &data_[0]; }
- inline iterator end() const { return &data_[length_]; }
-
- INLINE(bool is_empty() const) { return length_ == 0; }
- INLINE(int length() const) { return length_; }
- INLINE(int capacity() const) { return capacity_; }
-
- Vector<T> ToVector() const { return Vector<T>(data_, length_); }
-
- Vector<const T> ToConstVector() const {
- return Vector<const T>(data_, length_);
- }
-
- // Adds a copy of the given 'element' to the end of the list,
- // expanding the list if necessary.
- void Add(const T& element, AllocationPolicy allocator = AllocationPolicy());
-
- // Add all the elements from the argument list to this list.
- void AddAll(const List<T, AllocationPolicy>& other,
- AllocationPolicy allocator = AllocationPolicy());
-
- // Add all the elements from the vector to this list.
- void AddAll(const Vector<T>& other,
- AllocationPolicy allocator = AllocationPolicy());
-
- // Inserts the element at the specific index.
- void InsertAt(int index, const T& element,
- AllocationPolicy allocator = AllocationPolicy());
-
- // Overwrites the element at the specific index.
- void Set(int index, const T& element);
-
- // Added 'count' elements with the value 'value' and returns a
- // vector that allows access to the elements. The vector is valid
- // until the next change is made to this list.
- Vector<T> AddBlock(T value, int count,
- AllocationPolicy allocator = AllocationPolicy());
-
- // Removes the i'th element without deleting it even if T is a
- // pointer type; moves all elements above i "down". Returns the
- // removed element. This function's complexity is linear in the
- // size of the list.
- T Remove(int i);
-
- // Remove the given element from the list. Returns whether or not
- // the input is included in the list in the first place.
- bool RemoveElement(const T& elm);
-
- // Removes the last element without deleting it even if T is a
- // pointer type. Returns the removed element.
- INLINE(T RemoveLast()) { return Remove(length_ - 1); }
-
- // Deletes current list contents and allocates space for 'length' elements.
- INLINE(void Allocate(int length,
- AllocationPolicy allocator = AllocationPolicy()));
-
- // Clears the list by freeing the storage memory. If you want to keep the
- // memory, use Rewind(0) instead. Be aware, that even if T is a
- // pointer type, clearing the list doesn't delete the entries.
- INLINE(void Clear());
-
- // Drops all but the first 'pos' elements from the list.
- INLINE(void Rewind(int pos));
-
- // Drop the last 'count' elements from the list.
- INLINE(void RewindBy(int count)) { Rewind(length_ - count); }
-
- // Swaps the contents of the two lists.
- INLINE(void Swap(List<T, AllocationPolicy>* list));
-
- // Halve the capacity if fill level is less than a quarter.
- INLINE(void Trim(AllocationPolicy allocator = AllocationPolicy()));
-
- bool Contains(const T& elm) const;
- int CountOccurrences(const T& elm, int start, int end) const;
-
- // Iterate through all list entries, starting at index 0.
- void Iterate(void (*callback)(T* x));
- template<class Visitor>
- void Iterate(Visitor* visitor);
-
- // Sort all list entries (using QuickSort)
- template <typename CompareFunction>
- void Sort(CompareFunction cmp, size_t start, size_t length);
- template <typename CompareFunction>
- void Sort(CompareFunction cmp);
- void Sort();
- template <typename CompareFunction>
- void StableSort(CompareFunction cmp, size_t start, size_t length);
- template <typename CompareFunction>
- void StableSort(CompareFunction cmp);
- void StableSort();
-
- INLINE(void Initialize(int capacity,
- AllocationPolicy allocator = AllocationPolicy())) {
- DCHECK(capacity >= 0);
- data_ = (capacity > 0) ? NewData(capacity, allocator) : NULL;
- capacity_ = capacity;
- length_ = 0;
- }
-
- private:
- T* data_;
- int capacity_;
- int length_;
-
- INLINE(T* NewData(int n, AllocationPolicy allocator)) {
- return static_cast<T*>(allocator.New(n * sizeof(T)));
- }
- INLINE(void DeleteData(T* data)) {
- AllocationPolicy::Delete(data);
- }
-
- // Increase the capacity of a full list, and add an element.
- // List must be full already.
- void ResizeAdd(const T& element, AllocationPolicy allocator);
-
- // Inlined implementation of ResizeAdd, shared by inlined and
- // non-inlined versions of ResizeAdd.
- void ResizeAddInternal(const T& element, AllocationPolicy allocator);
-
- // Resize the list.
- void Resize(int new_capacity, AllocationPolicy allocator);
-
- DISALLOW_COPY_AND_ASSIGN(List);
-};
-
-class Map;
-class FieldType;
-class Code;
-template<typename T> class Handle;
-typedef List<Map*> MapList;
-typedef List<Code*> CodeList;
-typedef List<Handle<Map> > MapHandleList;
-typedef List<Handle<FieldType> > TypeHandleList;
-typedef List<Handle<Code> > CodeHandleList;
-
-} // namespace internal
-} // namespace v8
-
-
-#endif // V8_LIST_H_
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 0ba024b987..006acf71b9 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -8,6 +8,7 @@
#include <memory>
#include <sstream>
+#include "src/api.h"
#include "src/bailout-reason.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
@@ -28,6 +29,7 @@
#include "src/source-position-table.h"
#include "src/string-stream.h"
#include "src/tracing/tracing-category-observer.h"
+#include "src/unicode-inl.h"
#include "src/vm-state-inl.h"
namespace v8 {
@@ -41,7 +43,6 @@ static const char* kLogEventsNames[CodeEventListener::NUMBER_OF_LOG_EVENTS] = {
static const char* ComputeMarker(SharedFunctionInfo* shared,
AbstractCode* code) {
switch (code->kind()) {
- case AbstractCode::FUNCTION:
case AbstractCode::INTERPRETED_FUNCTION:
return shared->optimization_disabled() ? "" : "~";
case AbstractCode::OPTIMIZED_FUNCTION:
@@ -262,8 +263,7 @@ PerfBasicLogger::~PerfBasicLogger() {
void PerfBasicLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
const char* name, int length) {
if (FLAG_perf_basic_prof_only_functions &&
- (code->kind() != AbstractCode::FUNCTION &&
- code->kind() != AbstractCode::INTERPRETED_FUNCTION &&
+ (code->kind() != AbstractCode::INTERPRETED_FUNCTION &&
code->kind() != AbstractCode::OPTIMIZED_FUNCTION)) {
return;
}
@@ -739,7 +739,6 @@ Logger::Logger(Isolate* isolate)
perf_jit_logger_(NULL),
ll_logger_(NULL),
jit_logger_(NULL),
- listeners_(5),
is_initialized_(false) {}
Logger::~Logger() {
@@ -1283,7 +1282,7 @@ void Logger::CodeDisableOptEvent(AbstractCode* code,
void Logger::CodeMovingGCEvent() {
if (!is_logging_code_events()) return;
if (!log_->IsEnabled() || !FLAG_ll_prof) return;
- base::OS::SignalCodeMovingGC();
+ base::OS::SignalCodeMovingGC(GetRandomMmapAddr());
}
void Logger::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
@@ -1358,7 +1357,7 @@ void Logger::ResourceEvent(const char* name, const char* tag) {
if (base::OS::GetUserTime(&sec, &usec) != -1) {
msg.Append("%d,%d,", sec, usec);
}
- msg.Append("%.0f", base::OS::TimeCurrentMillis());
+ msg.Append("%.0f", V8::GetCurrentPlatform()->CurrentClockTimeMillis());
msg.WriteToLogFile();
}
@@ -1389,7 +1388,7 @@ void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
// Using non-relative system time in order to be able to synchronize with
// external memory profiling events (e.g. DOM memory size).
msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f", space, kind,
- base::OS::TimeCurrentMillis());
+ V8::GetCurrentPlatform()->CurrentClockTimeMillis());
msg.WriteToLogFile();
}
@@ -1509,32 +1508,6 @@ static void AddFunctionAndCode(SharedFunctionInfo* sfi,
}
}
-class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
- public:
- EnumerateOptimizedFunctionsVisitor(Handle<SharedFunctionInfo>* sfis,
- Handle<AbstractCode>* code_objects,
- int* count)
- : sfis_(sfis), code_objects_(code_objects), count_(count) {}
-
- virtual void VisitFunction(JSFunction* function) {
- SharedFunctionInfo* sfi = SharedFunctionInfo::cast(function->shared());
- Object* maybe_script = sfi->script();
- if (maybe_script->IsScript()
- && !Script::cast(maybe_script)->HasValidSource()) return;
-
- DCHECK(function->abstract_code()->kind() ==
- AbstractCode::OPTIMIZED_FUNCTION);
- AddFunctionAndCode(sfi, function->abstract_code(), sfis_, code_objects_,
- *count_);
- *count_ = *count_ + 1;
- }
-
- private:
- Handle<SharedFunctionInfo>* sfis_;
- Handle<AbstractCode>* code_objects_;
- int* count_;
-};
-
static int EnumerateCompiledFunctions(Heap* heap,
Handle<SharedFunctionInfo>* sfis,
Handle<AbstractCode>* code_objects) {
@@ -1545,33 +1518,45 @@ static int EnumerateCompiledFunctions(Heap* heap,
// Iterate the heap to find shared function info objects and record
// the unoptimized code for them.
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- if (!obj->IsSharedFunctionInfo()) continue;
- SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
- if (sfi->is_compiled()
- && (!sfi->script()->IsScript()
- || Script::cast(sfi->script())->HasValidSource())) {
- // In some cases, an SFI might have (and have executing!) both bytecode
- // and baseline code, so check for both and add them both if needed.
- if (sfi->HasBytecodeArray()) {
- AddFunctionAndCode(sfi, AbstractCode::cast(sfi->bytecode_array()), sfis,
- code_objects, compiled_funcs_count);
- ++compiled_funcs_count;
- }
+ if (obj->IsSharedFunctionInfo()) {
+ SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
+ if (sfi->is_compiled() &&
+ (!sfi->script()->IsScript() ||
+ Script::cast(sfi->script())->HasValidSource())) {
+ // In some cases, an SFI might have (and have executing!) both bytecode
+ // and baseline code, so check for both and add them both if needed.
+ if (sfi->HasBytecodeArray()) {
+ AddFunctionAndCode(sfi, AbstractCode::cast(sfi->bytecode_array()),
+ sfis, code_objects, compiled_funcs_count);
+ ++compiled_funcs_count;
+ }
- if (!sfi->IsInterpreted()) {
- AddFunctionAndCode(sfi, AbstractCode::cast(sfi->code()), sfis,
+ if (!sfi->IsInterpreted()) {
+ AddFunctionAndCode(sfi, AbstractCode::cast(sfi->code()), sfis,
+ code_objects, compiled_funcs_count);
+ ++compiled_funcs_count;
+ }
+ }
+ } else if (obj->IsJSFunction()) {
+ // Given that we no longer iterate over all optimized JSFunctions, we need
+ // to take care of this here.
+ JSFunction* function = JSFunction::cast(obj);
+ SharedFunctionInfo* sfi = SharedFunctionInfo::cast(function->shared());
+ Object* maybe_script = sfi->script();
+ if (maybe_script->IsScript() &&
+ !Script::cast(maybe_script)->HasValidSource()) {
+ continue;
+ }
+ // TODO(jarin) This leaves out deoptimized code that might still be on the
+ // stack. Also note that we will not log optimized code objects that are
+ // only on a type feedback vector. We should make this mroe precise.
+ if (function->IsOptimized()) {
+ AddFunctionAndCode(sfi, AbstractCode::cast(function->code()), sfis,
code_objects, compiled_funcs_count);
++compiled_funcs_count;
}
}
}
-
- // Iterate all optimized functions in all contexts.
- EnumerateOptimizedFunctionsVisitor visitor(sfis,
- code_objects,
- &compiled_funcs_count);
- Deoptimizer::VisitAllOptimizedFunctions(heap->isolate(), &visitor);
-
return compiled_funcs_count;
}
@@ -1581,7 +1566,6 @@ void Logger::LogCodeObject(Object* object) {
CodeEventListener::LogEventsAndTags tag = CodeEventListener::STUB_TAG;
const char* description = "Unknown code from the snapshot";
switch (code_object->kind()) {
- case AbstractCode::FUNCTION:
case AbstractCode::INTERPRETED_FUNCTION:
case AbstractCode::OPTIMIZED_FUNCTION:
return; // We log this later using LogCompiledFunctions.
@@ -1603,34 +1587,6 @@ void Logger::LogCodeObject(Object* object) {
isolate_->builtins()->name(code_object->GetCode()->builtin_index());
tag = CodeEventListener::BUILTIN_TAG;
break;
- case AbstractCode::HANDLER:
- description = "An IC handler from the snapshot";
- tag = CodeEventListener::HANDLER_TAG;
- break;
- case AbstractCode::KEYED_LOAD_IC:
- description = "A keyed load IC from the snapshot";
- tag = CodeEventListener::KEYED_LOAD_IC_TAG;
- break;
- case AbstractCode::LOAD_IC:
- description = "A load IC from the snapshot";
- tag = CodeEventListener::LOAD_IC_TAG;
- break;
- case AbstractCode::LOAD_GLOBAL_IC:
- description = "A load global IC from the snapshot";
- tag = Logger::LOAD_GLOBAL_IC_TAG;
- break;
- case AbstractCode::STORE_IC:
- description = "A store IC from the snapshot";
- tag = CodeEventListener::STORE_IC_TAG;
- break;
- case AbstractCode::STORE_GLOBAL_IC:
- description = "A store global IC from the snapshot";
- tag = CodeEventListener::STORE_GLOBAL_IC_TAG;
- break;
- case AbstractCode::KEYED_STORE_IC:
- description = "A keyed store IC from the snapshot";
- tag = CodeEventListener::KEYED_STORE_IC_TAG;
- break;
case AbstractCode::WASM_FUNCTION:
description = "A Wasm function";
tag = CodeEventListener::STUB_TAG;
@@ -1818,7 +1774,8 @@ static void PrepareLogFileName(std::ostream& os, // NOLINT
break;
case 't':
// %t expands to the current time in milliseconds.
- os << static_cast<int64_t>(base::OS::TimeCurrentMillis());
+ os << static_cast<int64_t>(
+ V8::GetCurrentPlatform()->CurrentClockTimeMillis());
break;
case '%':
// %% expands (contracts really) to %.
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 3e4d385527..91672875ef 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -317,7 +317,6 @@ class Logger : public CodeEventListener {
LowLevelLogger* ll_logger_;
JitLogger* jit_logger_;
std::unique_ptr<ProfilerListener> profiler_listener_;
- List<CodeEventListener*> listeners_;
std::set<int> logged_source_code_;
uint32_t next_source_info_id_ = 0;
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 08efd6ddb1..91d87ebbff 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -48,6 +48,7 @@ LookupIterator LookupIterator::PropertyOrElement(Isolate* isolate,
Handle<Object> key,
bool* success,
Configuration configuration) {
+ // TODO(mslekova): come up with better way to avoid duplication
uint32_t index = 0;
if (key->ToArrayIndex(&index)) {
*success = true;
@@ -93,21 +94,9 @@ LookupIterator LookupIterator::ForTransitionHandler(
has_property);
if (!transition_map->is_dictionary_map()) {
- PropertyConstness new_constness = kConst;
- if (FLAG_track_constant_fields) {
- if (it.constness() == kConst) {
- DCHECK_EQ(kData, it.property_details_.kind());
- // Check that current value matches new value otherwise we should make
- // the property mutable.
- if (!it.IsConstFieldValueEqualTo(*value)) new_constness = kMutable;
- }
- } else {
- new_constness = kMutable;
- }
-
int descriptor_number = transition_map->LastAdded();
Handle<Map> new_map = Map::PrepareForDataProperty(
- transition_map, descriptor_number, new_constness, value);
+ transition_map, descriptor_number, kConst, value);
// Reload information; this is no-op if nothing changed.
it.property_details_ =
new_map->instance_descriptors()->GetDetails(descriptor_number);
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index 9ea2d77cf6..25c5a6cc3b 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -22,7 +22,7 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
kInterceptor = 1 << 0,
kPrototypeChain = 1 << 1,
- // Convenience combinations of bits.
+ // Convience combinations of bits.
OWN_SKIP_INTERCEPTOR = 0,
OWN = kInterceptor,
PROTOTYPE_CHAIN_SKIP_INTERCEPTOR = kPrototypeChain,
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 8184f95d31..fcdddbb1fa 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -5,7 +5,8 @@
#ifndef V8_MACRO_ASSEMBLER_H_
#define V8_MACRO_ASSEMBLER_H_
-#include "src/assembler-inl.h"
+#include "src/assembler.h"
+#include "src/frames.h"
// Helper types to make boolean flag easier to read at call-site.
enum InvokeFlag {
@@ -196,7 +197,7 @@ class ParameterCount BASE_EMBEDDED {
explicit ParameterCount(Register reg) : reg_(reg), immediate_(0) {}
explicit ParameterCount(int imm) : reg_(no_reg), immediate_(imm) {}
- bool is_reg() const { return !reg_.is(no_reg); }
+ bool is_reg() const { return reg_.is_valid(); }
bool is_immediate() const { return !is_reg(); }
Register reg() const {
@@ -215,28 +216,6 @@ class ParameterCount BASE_EMBEDDED {
DISALLOW_IMPLICIT_CONSTRUCTORS(ParameterCount);
};
-
-class AllocationUtils {
- public:
- static ExternalReference GetAllocationTopReference(
- Isolate* isolate, AllocationFlags flags) {
- if ((flags & PRETENURE) != 0) {
- return ExternalReference::old_space_allocation_top_address(isolate);
- }
- return ExternalReference::new_space_allocation_top_address(isolate);
- }
-
-
- static ExternalReference GetAllocationLimitReference(
- Isolate* isolate, AllocationFlags flags) {
- if ((flags & PRETENURE) != 0) {
- return ExternalReference::old_space_allocation_limit_address(isolate);
- }
- return ExternalReference::new_space_allocation_limit_address(isolate);
- }
-};
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/managed.h b/deps/v8/src/managed.h
index ebbfe33c16..63fefdd480 100644
--- a/deps/v8/src/managed.h
+++ b/deps/v8/src/managed.h
@@ -42,7 +42,16 @@ class Managed : public Foreign {
return reinterpret_cast<Managed<CppType>*>(obj);
}
- static Handle<Managed<CppType>> New(Isolate* isolate, CppType* ptr) {
+ // Allocate a new CppType and wrap it in a Managed.
+ template <typename... Args>
+ static Handle<Managed<CppType>> Allocate(Isolate* isolate, Args&&... args) {
+ CppType* ptr = new CppType(std::forward<Args>(args)...);
+ return From(isolate, ptr);
+ }
+
+ // Create a Managed from an existing CppType*. Takes ownership of the passed
+ // object.
+ static Handle<Managed<CppType>> From(Isolate* isolate, CppType* ptr) {
FinalizerWithHandle* finalizer =
new FinalizerWithHandle(ptr, &NativeDelete);
isolate->RegisterForReleaseAtTeardown(finalizer);
diff --git a/deps/v8/src/map-updater.cc b/deps/v8/src/map-updater.cc
index 05ef5fd3e2..3a9a9caf14 100644
--- a/deps/v8/src/map-updater.cc
+++ b/deps/v8/src/map-updater.cc
@@ -123,8 +123,9 @@ Handle<Map> MapUpdater::ReconfigureToDataField(int descriptor,
new_field_type_ = field_type;
}
- GeneralizeIfTransitionableFastElementsKind(
- &new_constness_, &new_representation_, &new_field_type_);
+ Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
+ isolate_, old_map_->instance_type(), &new_constness_,
+ &new_representation_, &new_field_type_);
if (TryRecofigureToDataFieldInplace() == kEnd) return result_map_;
if (FindRootMap() == kEnd) return result_map_;
@@ -158,28 +159,6 @@ Handle<Map> MapUpdater::Update() {
return result_map_;
}
-void MapUpdater::GeneralizeIfTransitionableFastElementsKind(
- PropertyConstness* constness, Representation* representation,
- Handle<FieldType>* field_type) {
- DCHECK_EQ(is_transitionable_fast_elements_kind_,
- IsTransitionableFastElementsKind(new_elements_kind_));
- if (is_transitionable_fast_elements_kind_ &&
- Map::IsInplaceGeneralizableField(*constness, *representation,
- **field_type)) {
- // We don't support propagation of field generalization through elements
- // kind transitions because they are inserted into the transition tree
- // before field transitions. In order to avoid complexity of handling
- // such a case we ensure that all maps with transitionable elements kinds
- // do not have fields that can be generalized in-place (without creation
- // of a new map).
- if (FLAG_track_constant_fields && FLAG_modify_map_inplace) {
- *constness = kMutable;
- }
- DCHECK(representation->IsHeapObject());
- *field_type = FieldType::Any(isolate_);
- }
-}
-
void MapUpdater::GeneralizeField(Handle<Map> map, int modify_index,
PropertyConstness new_constness,
Representation new_representation,
@@ -437,6 +416,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
}
Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
+ InstanceType instance_type = old_map_->instance_type();
int target_nof = target_map_->NumberOfOwnDescriptors();
Handle<DescriptorArray> target_descriptors(
target_map_->instance_descriptors(), isolate_);
@@ -518,8 +498,9 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
old_details.representation(), old_field_type, next_representation,
target_field_type, isolate_);
- GeneralizeIfTransitionableFastElementsKind(
- &next_constness, &next_representation, &next_field_type);
+ Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
+ isolate_, instance_type, &next_constness, &next_representation,
+ &next_field_type);
Handle<Object> wrapped_type(Map::WrapFieldType(next_field_type));
Descriptor d;
diff --git a/deps/v8/src/map-updater.h b/deps/v8/src/map-updater.h
index a1d052261c..7c5e92f2bf 100644
--- a/deps/v8/src/map-updater.h
+++ b/deps/v8/src/map-updater.h
@@ -148,10 +148,6 @@ class MapUpdater {
Handle<DescriptorArray> descriptors, int descriptor,
PropertyLocation location, Representation representation);
- inline void GeneralizeIfTransitionableFastElementsKind(
- PropertyConstness* constness, Representation* representation,
- Handle<FieldType>* field_type);
-
void GeneralizeField(Handle<Map> map, int modify_index,
PropertyConstness new_constness,
Representation new_representation,
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 780198dac6..ddc5124cfc 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -12,7 +12,6 @@
#include "src/keys.h"
#include "src/objects/frame-array-inl.h"
#include "src/string-builder.h"
-#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -114,6 +113,9 @@ void MessageHandler::ReportMessage(Isolate* isolate, const MessageLocation* loc,
}
if (!maybe_stringified.ToHandle(&stringified)) {
+ DCHECK(isolate->has_pending_exception());
+ isolate->clear_pending_exception();
+ isolate->set_external_caught_exception(false);
stringified =
isolate->factory()->NewStringFromAsciiChecked("exception");
}
@@ -722,9 +724,7 @@ Handle<Object> WasmStackFrame::Null() const {
bool WasmStackFrame::HasScript() const { return true; }
Handle<Script> WasmStackFrame::GetScript() const {
- return handle(
- WasmInstanceObject::cast(*wasm_instance_)->compiled_module()->script(),
- isolate_);
+ return handle(wasm_instance_->compiled_module()->script(), isolate_);
}
AsmJsWasmStackFrame::AsmJsWasmStackFrame() {}
@@ -748,15 +748,13 @@ Handle<Object> AsmJsWasmStackFrame::GetFunction() const {
}
Handle<Object> AsmJsWasmStackFrame::GetFileName() {
- Handle<Script> script =
- wasm::GetScript(Handle<JSObject>::cast(wasm_instance_));
+ Handle<Script> script(wasm_instance_->compiled_module()->script(), isolate_);
DCHECK(script->IsUserJavaScript());
return handle(script->name(), isolate_);
}
Handle<Object> AsmJsWasmStackFrame::GetScriptNameOrSourceUrl() {
- Handle<Script> script =
- wasm::GetScript(Handle<JSObject>::cast(wasm_instance_));
+ Handle<Script> script(wasm_instance_->compiled_module()->script(), isolate_);
DCHECK_EQ(Script::TYPE_NORMAL, script->type());
return ScriptNameOrSourceUrl(script, isolate_);
}
@@ -764,26 +762,24 @@ Handle<Object> AsmJsWasmStackFrame::GetScriptNameOrSourceUrl() {
int AsmJsWasmStackFrame::GetPosition() const {
DCHECK_LE(0, offset_);
int byte_offset = code_->SourcePosition(offset_);
- Handle<WasmCompiledModule> compiled_module(
- WasmInstanceObject::cast(*wasm_instance_)->compiled_module(), isolate_);
+ Handle<WasmCompiledModule> compiled_module(wasm_instance_->compiled_module(),
+ isolate_);
DCHECK_LE(0, byte_offset);
- return WasmCompiledModule::GetAsmJsSourcePosition(
+ return WasmCompiledModule::GetSourcePosition(
compiled_module, wasm_func_index_, static_cast<uint32_t>(byte_offset),
is_at_number_conversion_);
}
int AsmJsWasmStackFrame::GetLineNumber() {
DCHECK_LE(0, GetPosition());
- Handle<Script> script =
- wasm::GetScript(Handle<JSObject>::cast(wasm_instance_));
+ Handle<Script> script(wasm_instance_->compiled_module()->script(), isolate_);
DCHECK(script->IsUserJavaScript());
return Script::GetLineNumber(script, GetPosition()) + 1;
}
int AsmJsWasmStackFrame::GetColumnNumber() {
DCHECK_LE(0, GetPosition());
- Handle<Script> script =
- wasm::GetScript(Handle<JSObject>::cast(wasm_instance_));
+ Handle<Script> script(wasm_instance_->compiled_module()->script(), isolate_);
DCHECK(script->IsUserJavaScript());
return Script::GetColumnNumber(script, GetPosition()) + 1;
}
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 24384d224b..9237f7a231 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -271,6 +271,11 @@ class ErrorUtils : public AllStatic {
T(ArrayFunctionsOnSealed, "Cannot add/remove sealed array elements") \
T(AwaitNotInAsyncFunction, "await is only valid in async function") \
T(AtomicsWaitNotAllowed, "Atomics.wait cannot be called in this context") \
+ T(BadSortComparisonFunction, \
+ "The comparison function must be either a function or undefined") \
+ T(BigIntMixedTypes, \
+ "Cannot mix BigInt and other types, use explicit conversions") \
+ T(BigIntShr, "BigInts have no unsigned right shift, use >> instead") \
T(CalledNonCallable, "% is not a function") \
T(CalledOnNonObject, "% called on non-object") \
T(CalledOnNullOrUndefined, "% called on null or undefined") \
@@ -312,6 +317,7 @@ class ErrorUtils : public AllStatic {
T(ImmutablePrototypeSet, \
"Immutable prototype object '%' cannot have their prototype set") \
T(ImportCallNotNewExpression, "Cannot use new with import") \
+ T(ImportMetaOutsideModule, "Cannot use 'import.meta' outside a module") \
T(IncompatibleMethodReceiver, "Method % called on incompatible receiver %") \
T(InstanceofNonobjectProto, \
"Function has non-object prototype '%' in instanceof check") \
@@ -464,7 +470,6 @@ class ErrorUtils : public AllStatic {
T(ReduceNoInitial, "Reduce of empty array with no initial value") \
T(RegExpFlags, \
"Cannot supply flags when constructing one RegExp from another") \
- T(RegExpInvalidReplaceString, "Invalid replacement string: '%'") \
T(RegExpNonObject, "% getter called on non-object %") \
T(RegExpNonRegExp, "% getter called on non-RegExp object") \
T(ResolverNotAFunction, "Promise resolver % is not a function") \
@@ -501,6 +506,8 @@ class ErrorUtils : public AllStatic {
T(SuperAlreadyCalled, "Super constructor may only be called once") \
T(UnsupportedSuper, "Unsupported reference to 'super'") \
/* RangeError */ \
+ T(BigIntDivZero, "Division by zero") \
+ T(BigIntTooBig, "Maximum BigInt size exceeded") \
T(DateRange, "Provided date is not in valid range.") \
T(ExpectedTimezoneID, \
"Expected Area/Location(/Location)* for time zone, got %") \
@@ -536,7 +543,7 @@ class ErrorUtils : public AllStatic {
T(ToPrecisionFormatRange, \
"toPrecision() argument must be between 1 and 100") \
T(ToRadixFormatRange, "toString() radix argument must be between 2 and 36") \
- T(TypedArraySetNegativeOffset, "Start offset is negative") \
+ T(TypedArraySetOffsetOutOfBounds, "offset is out of bounds") \
T(TypedArraySetSourceTooLarge, "Source is too large") \
T(UnsupportedTimeZone, "Unsupported time zone specified %") \
T(ValueOutOfRange, "Value % out of range for % options property %") \
@@ -545,6 +552,7 @@ class ErrorUtils : public AllStatic {
"The requested module contains conflicting star exports for name '%'") \
T(BadGetterArity, "Getter must not have any formal parameters.") \
T(BadSetterArity, "Setter must have exactly one formal parameter.") \
+ T(BigIntInvalidString, "Invalid BigInt string") \
T(ConstructorIsAccessor, "Class constructor may not be an accessor") \
T(ConstructorIsGenerator, "Class constructor may not be a generator") \
T(ConstructorIsAsync, "Class constructor may not be an async method") \
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 9dce4c3b4a..cbe6fcbca9 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -54,36 +54,15 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
// -----------------------------------------------------------------------------
// Operand and MemOperand.
-Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
- rm_ = no_reg;
- value_.immediate = immediate;
- rmode_ = rmode;
-}
-
-
-Operand::Operand(const ExternalReference& f) {
- rm_ = no_reg;
- value_.immediate = reinterpret_cast<int32_t>(f.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-
-Operand::Operand(Smi* value) {
- rm_ = no_reg;
- value_.immediate = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE32;
-}
-
-
-Operand::Operand(Register rm) {
- rm_ = rm;
-}
-
-
bool Operand::is_reg() const {
return rm_.is_valid();
}
+int32_t Operand::immediate() const {
+ DCHECK(!is_reg());
+ DCHECK(!IsHeapObjectRequest());
+ return value_.immediate;
+}
// -----------------------------------------------------------------------------
// RelocInfo.
@@ -160,6 +139,21 @@ Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
}
+void Assembler::deserialization_set_special_target_at(
+ Isolate* isolate, Address instruction_payload, Code* code, Address target) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ // On R6 the address location is shifted by one instruction
+ set_target_address_at(
+ isolate,
+ instruction_payload - (kInstructionsFor32BitConstant - 1) * kInstrSize,
+ code, target);
+ } else {
+ set_target_address_at(
+ isolate,
+ instruction_payload - kInstructionsFor32BitConstant * kInstrSize, code,
+ target);
+ }
+}
void Assembler::set_target_internal_reference_encoded_at(Address pc,
Address target) {
@@ -385,6 +379,7 @@ void Assembler::emit(Instr x, CompactBranchType is_compact_branch) {
EmitHelper(x, is_compact_branch);
}
+EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 46267ae15d..926c64d4d9 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -214,10 +214,9 @@ void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
// Implementation of Operand and MemOperand.
// See assembler-mips-inl.h for inlined constructors.
-Operand::Operand(Handle<HeapObject> handle) {
- rm_ = no_reg;
+Operand::Operand(Handle<HeapObject> handle)
+ : rm_(no_reg), rmode_(RelocInfo::EMBEDDED_OBJECT) {
value_.immediate = reinterpret_cast<intptr_t>(handle.address());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
}
Operand Operand::EmbeddedNumber(double value) {
@@ -271,31 +270,31 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
static const int kNegOffset = 0x00008000;
// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
// operations as post-increment of sp.
-const Instr kPopInstruction = ADDIU | (Register::kCode_sp << kRsShift) |
- (Register::kCode_sp << kRtShift) |
+const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift) |
+ (sp.code() << kRtShift) |
(kPointerSize & kImm16Mask); // NOLINT
// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
-const Instr kPushInstruction = ADDIU | (Register::kCode_sp << kRsShift) |
- (Register::kCode_sp << kRtShift) |
+const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift) |
+ (sp.code() << kRtShift) |
(-kPointerSize & kImm16Mask); // NOLINT
// sw(r, MemOperand(sp, 0))
const Instr kPushRegPattern =
- SW | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
+ SW | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
// lw(r, MemOperand(sp, 0))
const Instr kPopRegPattern =
- LW | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
+ LW | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
const Instr kLwRegFpOffsetPattern =
- LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
+ LW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
const Instr kSwRegFpOffsetPattern =
- SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
+ SW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
-const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) |
- (kNegOffset & kImm16Mask); // NOLINT
+const Instr kLwRegFpNegOffsetPattern =
+ LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
-const Instr kSwRegFpNegOffsetPattern = SW | (Register::kCode_fp << kRsShift) |
- (kNegOffset & kImm16Mask); // NOLINT
+const Instr kSwRegFpNegOffsetPattern =
+ SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
const Instr kLwSwInstrTypeMask = 0xffe00000;
@@ -357,23 +356,17 @@ void Assembler::CodeTargetAlign() {
Register Assembler::GetRtReg(Instr instr) {
- Register rt;
- rt.reg_code = (instr & kRtFieldMask) >> kRtShift;
- return rt;
+ return Register::from_code((instr & kRtFieldMask) >> kRtShift);
}
Register Assembler::GetRsReg(Instr instr) {
- Register rs;
- rs.reg_code = (instr & kRsFieldMask) >> kRsShift;
- return rs;
+ return Register::from_code((instr & kRsFieldMask) >> kRsShift);
}
Register Assembler::GetRdReg(Instr instr) {
- Register rd;
- rd.reg_code = (instr & kRdFieldMask) >> kRdShift;
- return rd;
+ return Register::from_code((instr & kRdFieldMask) >> kRdShift);
}
@@ -1469,15 +1462,15 @@ void Assembler::bgez(Register rs, int16_t offset) {
void Assembler::bgezc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rt != zero_reg);
GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs != zero_reg);
+ DCHECK(rt != zero_reg);
DCHECK(rs.code() != rt.code());
GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1485,16 +1478,16 @@ void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
void Assembler::bgec(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs != zero_reg);
+ DCHECK(rt != zero_reg);
DCHECK(rs.code() != rt.code());
GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgezal(Register rs, int16_t offset) {
- DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
- DCHECK(!(rs.is(ra)));
+ DCHECK(!IsMipsArchVariant(kMips32r6) || rs == zero_reg);
+ DCHECK(rs != ra);
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
@@ -1510,7 +1503,7 @@ void Assembler::bgtz(Register rs, int16_t offset) {
void Assembler::bgtzc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rt != zero_reg);
GenInstrImmediate(BGTZL, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1525,7 +1518,7 @@ void Assembler::blez(Register rs, int16_t offset) {
void Assembler::blezc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rt != zero_reg);
GenInstrImmediate(BLEZL, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1533,15 +1526,15 @@ void Assembler::blezc(Register rt, int16_t offset) {
void Assembler::bltzc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!rt.is(zero_reg));
+ DCHECK(rt != zero_reg);
GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs != zero_reg);
+ DCHECK(rt != zero_reg);
DCHECK(rs.code() != rt.code());
GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1549,8 +1542,8 @@ void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
void Assembler::bltc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!rs.is(zero_reg));
- DCHECK(!rt.is(zero_reg));
+ DCHECK(rs != zero_reg);
+ DCHECK(rt != zero_reg);
DCHECK(rs.code() != rt.code());
GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1564,8 +1557,8 @@ void Assembler::bltz(Register rs, int16_t offset) {
void Assembler::bltzal(Register rs, int16_t offset) {
- DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
- DCHECK(!(rs.is(ra)));
+ DCHECK(!IsMipsArchVariant(kMips32r6) || rs == zero_reg);
+ DCHECK(rs != ra);
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
@@ -1601,8 +1594,8 @@ void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
void Assembler::blezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- DCHECK(!(rt.is(ra)));
+ DCHECK(rt != zero_reg);
+ DCHECK(rt != ra);
GenInstrImmediate(BLEZ, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1610,16 +1603,16 @@ void Assembler::blezalc(Register rt, int16_t offset) {
void Assembler::bgezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- DCHECK(!(rt.is(ra)));
+ DCHECK(rt != zero_reg);
+ DCHECK(rt != ra);
GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgezall(Register rs, int16_t offset) {
DCHECK(!IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(!(rs.is(ra)));
+ DCHECK(rs != zero_reg);
+ DCHECK(rs != ra);
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
@@ -1628,16 +1621,16 @@ void Assembler::bgezall(Register rs, int16_t offset) {
void Assembler::bltzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- DCHECK(!(rt.is(ra)));
+ DCHECK(rt != zero_reg);
+ DCHECK(rt != ra);
GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgtzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- DCHECK(!(rt.is(ra)));
+ DCHECK(rt != zero_reg);
+ DCHECK(rt != ra);
GenInstrImmediate(BGTZ, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1645,8 +1638,8 @@ void Assembler::bgtzalc(Register rt, int16_t offset) {
void Assembler::beqzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- DCHECK(!(rt.is(ra)));
+ DCHECK(rt != zero_reg);
+ DCHECK(rt != ra);
GenInstrImmediate(ADDI, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1654,8 +1647,8 @@ void Assembler::beqzalc(Register rt, int16_t offset) {
void Assembler::bnezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- DCHECK(!(rt.is(ra)));
+ DCHECK(rt != zero_reg);
+ DCHECK(rt != ra);
GenInstrImmediate(DADDI, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1674,7 +1667,7 @@ void Assembler::beqc(Register rs, Register rt, int16_t offset) {
void Assembler::beqzc(Register rs, int32_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
+ DCHECK(rs != zero_reg);
GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1692,7 +1685,7 @@ void Assembler::bnec(Register rs, Register rt, int16_t offset) {
void Assembler::bnezc(Register rs, int32_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
+ DCHECK(rs != zero_reg);
GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1894,7 +1887,7 @@ void Assembler::sll(Register rd,
// Don't allow nop instructions in the form sll zero_reg, zero_reg to be
// generated using the sll instruction. They must be generated using
// nop(int/NopMarkerTypes).
- DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
+ DCHECK(coming_from_nop || !(rd == zero_reg && rt == zero_reg));
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
}
@@ -1983,8 +1976,8 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
}
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!src.rm().is(scratch)); // Must not overwrite the register 'base'
- // while loading 'offset'.
+ DCHECK(src.rm() != scratch); // Must not overwrite the register 'base'
+ // while loading 'offset'.
#ifdef DEBUG
// Remember the "(mis)alignment" of 'offset', it will be checked at the end.
@@ -2190,7 +2183,7 @@ void Assembler::aui(Register rt, Register rs, int32_t j) {
// This instruction uses same opcode as 'lui'. The difference in encoding is
// 'lui' has zero reg. for rs field.
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
+ DCHECK(rs != zero_reg);
DCHECK(is_uint16(j));
GenInstrImmediate(LUI, rs, rt, j);
}
@@ -2357,15 +2350,13 @@ void Assembler::movn(Register rd, Register rs, Register rt) {
void Assembler::movt(Register rd, Register rs, uint16_t cc) {
- Register rt;
- rt.reg_code = (cc & 0x0007) << 2 | 1;
+ Register rt = Register::from_code((cc & 0x0007) << 2 | 1);
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
void Assembler::movf(Register rd, Register rs, uint16_t cc) {
- Register rt;
- rt.reg_code = (cc & 0x0007) << 2 | 0;
+ Register rt = Register::from_code((cc & 0x0007) << 2 | 0);
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
@@ -2584,32 +2575,28 @@ void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(!IsMipsArchVariant(kMips32r6));
- FPURegister ft;
- ft.reg_code = (cc & 0x0007) << 2 | 1;
+ FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(!IsMipsArchVariant(kMips32r6));
- FPURegister ft;
- ft.reg_code = (cc & 0x0007) << 2 | 1;
+ FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(!IsMipsArchVariant(kMips32r6));
- FPURegister ft;
- ft.reg_code = (cc & 0x0007) << 2 | 0;
+ FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(!IsMipsArchVariant(kMips32r6));
- FPURegister ft;
- ft.reg_code = (cc & 0x0007) << 2 | 0;
+ FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
@@ -3139,7 +3126,7 @@ MSA_BRANCH_LIST(MSA_BRANCH)
} else { \
UseScratchRegisterScope temps(this); \
Register scratch = temps.Acquire(); \
- DCHECK(!rs.rm().is(scratch)); \
+ DCHECK(rs.rm() != scratch); \
addiu(scratch, source.rm(), source.offset()); \
GenInstrMsaMI10(opcode, 0, scratch, wd); \
} \
@@ -3901,7 +3888,7 @@ void Assembler::set_target_value_at(Isolate* isolate, Address pc,
#ifdef DEBUG
// Check we have the result from a li macro-instruction, using instr pair.
Instr instr1 = instr_at(pc);
- CHECK(IsLui(instr1) && (IsOri(instr2) || IsJicOrJialc(instr2)));
+ DCHECK(IsLui(instr1) && (IsOri(instr2) || IsJicOrJialc(instr2)));
#endif
if (IsJicOrJialc(instr2)) {
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 49e143fdeb..d2a9802b5e 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -200,19 +200,15 @@ const int kSafepointRegisterStackIndexMap[kNumRegs] = {kUndefIndex, // zero_reg
// -----------------------------------------------------------------------------
// Implementation of Register and FPURegister.
-struct Register {
- static constexpr int kCpRegister = 23; // cp (s7) is the 23rd register.
-
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = -1
- };
-
- static constexpr int kNumRegisters = Code::kAfterLast;
+ kRegAfterLast
+};
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ public:
#if defined(V8_TARGET_LITTLE_ENDIAN)
static constexpr int kMantissaOffset = 0;
static constexpr int kExponentOffset = 4;
@@ -223,34 +219,19 @@ struct Register {
#error Unknown endianness
#endif
-
- static Register from_code(int code) {
- DCHECK_LE(0, code);
- DCHECK_GT(kNumRegisters, code);
- return Register{code};
- }
- bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
- bool is(Register reg) const { return reg_code == reg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
-
- // Unfortunately we can't make this private in a struct.
- int reg_code;
+ private:
+ friend class RegisterBase;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
};
// s7: context register
// s3: lithium scratch
// s4: lithium scratch2
-#define DECLARE_REGISTER(R) constexpr Register R = {Register::kCode_##R};
+#define DECLARE_REGISTER(R) \
+ constexpr Register R = Register::from_code<kRegCode_##R>();
GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
-constexpr Register no_reg = {Register::kCode_no_reg};
+constexpr Register no_reg = Register::no_reg();
int ToNumber(Register reg);
@@ -259,100 +240,43 @@ Register ToRegister(int num);
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
-// Coprocessor register.
-struct FPURegister {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = kInvalidFPURegister
- };
-
- static constexpr int kMaxNumRegisters = Code::kAfterLast;
-
- inline static int NumRegisters();
-
- // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
- // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
- // number of Double regs (64-bit regs, or FPU-reg-pairs).
+ kDoubleAfterLast
+};
- bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
- bool is(FPURegister reg) const { return reg_code == reg.reg_code; }
+// Coprocessor register.
+class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
+ public:
FPURegister low() const {
// Find low reg of a Double-reg pair, which is the reg itself.
- DCHECK(reg_code % 2 == 0); // Specified Double reg must be even.
- FPURegister reg;
- reg.reg_code = reg_code;
- DCHECK(reg.is_valid());
- return reg;
+ DCHECK(code() % 2 == 0); // Specified Double reg must be even.
+ return FPURegister::from_code(code());
}
FPURegister high() const {
// Find high reg of a Doubel-reg pair, which is reg + 1.
- DCHECK(reg_code % 2 == 0); // Specified Double reg must be even.
- FPURegister reg;
- reg.reg_code = reg_code + 1;
- DCHECK(reg.is_valid());
- return reg;
+ DCHECK(code() % 2 == 0); // Specified Double reg must be even.
+ return FPURegister::from_code(code() + 1);
}
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
-
- static FPURegister from_code(int code) {
- FPURegister r = {code};
- return r;
- }
- void setcode(int f) {
- reg_code = f;
- DCHECK(is_valid());
- }
- // Unfortunately we can't make this private in a struct.
- int reg_code;
+ private:
+ friend class RegisterBase;
+ explicit constexpr FPURegister(int code) : RegisterBase(code) {}
};
-// MIPS SIMD (MSA) register
-struct MSARegister {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- SIMD128_REGISTERS(REGISTER_CODE)
+enum MSARegisterCode {
+#define REGISTER_CODE(R) kMsaCode_##R,
+ SIMD128_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = kInvalidMSARegister
- };
-
- static const int kMaxNumRegisters = Code::kAfterLast;
-
- inline static int NumRegisters();
-
- bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
- bool is(MSARegister reg) const { return reg_code == reg.reg_code; }
-
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
+ kMsaAfterLast
+};
- static MSARegister from_code(int code) {
- MSARegister r = {code};
- return r;
- }
- void setcode(int f) {
- reg_code = f;
- DCHECK(is_valid());
- }
- // Unfortunately we can't make this private in a struct.
- int reg_code;
+// MIPS SIMD (MSA) register
+class MSARegister : public RegisterBase<MSARegister, kMsaAfterLast> {
+ friend class RegisterBase;
+ explicit constexpr MSARegister(int code) : RegisterBase(code) {}
};
// A few double registers are reserved: one as a scratch register and one to
@@ -373,78 +297,22 @@ typedef FPURegister FloatRegister;
typedef FPURegister DoubleRegister;
-constexpr DoubleRegister no_freg = {kInvalidFPURegister};
-
-constexpr DoubleRegister f0 = {0}; // Return value in hard float mode.
-constexpr DoubleRegister f1 = {1};
-constexpr DoubleRegister f2 = {2};
-constexpr DoubleRegister f3 = {3};
-constexpr DoubleRegister f4 = {4};
-constexpr DoubleRegister f5 = {5};
-constexpr DoubleRegister f6 = {6};
-constexpr DoubleRegister f7 = {7};
-constexpr DoubleRegister f8 = {8};
-constexpr DoubleRegister f9 = {9};
-constexpr DoubleRegister f10 = {10};
-constexpr DoubleRegister f11 = {11};
-constexpr DoubleRegister f12 = {12}; // Arg 0 in hard float mode.
-constexpr DoubleRegister f13 = {13};
-constexpr DoubleRegister f14 = {14}; // Arg 1 in hard float mode.
-constexpr DoubleRegister f15 = {15};
-constexpr DoubleRegister f16 = {16};
-constexpr DoubleRegister f17 = {17};
-constexpr DoubleRegister f18 = {18};
-constexpr DoubleRegister f19 = {19};
-constexpr DoubleRegister f20 = {20};
-constexpr DoubleRegister f21 = {21};
-constexpr DoubleRegister f22 = {22};
-constexpr DoubleRegister f23 = {23};
-constexpr DoubleRegister f24 = {24};
-constexpr DoubleRegister f25 = {25};
-constexpr DoubleRegister f26 = {26};
-constexpr DoubleRegister f27 = {27};
-constexpr DoubleRegister f28 = {28};
-constexpr DoubleRegister f29 = {29};
-constexpr DoubleRegister f30 = {30};
-constexpr DoubleRegister f31 = {31};
+#define DECLARE_DOUBLE_REGISTER(R) \
+ constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
+DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
+#undef DECLARE_DOUBLE_REGISTER
+
+constexpr DoubleRegister no_freg = DoubleRegister::no_reg();
// SIMD registers.
typedef MSARegister Simd128Register;
-const Simd128Register no_msareg = {kInvalidMSARegister};
-
-constexpr Simd128Register w0 = {0};
-constexpr Simd128Register w1 = {1};
-constexpr Simd128Register w2 = {2};
-constexpr Simd128Register w3 = {3};
-constexpr Simd128Register w4 = {4};
-constexpr Simd128Register w5 = {5};
-constexpr Simd128Register w6 = {6};
-constexpr Simd128Register w7 = {7};
-constexpr Simd128Register w8 = {8};
-constexpr Simd128Register w9 = {9};
-constexpr Simd128Register w10 = {10};
-constexpr Simd128Register w11 = {11};
-constexpr Simd128Register w12 = {12};
-constexpr Simd128Register w13 = {13};
-constexpr Simd128Register w14 = {14};
-constexpr Simd128Register w15 = {15};
-constexpr Simd128Register w16 = {16};
-constexpr Simd128Register w17 = {17};
-constexpr Simd128Register w18 = {18};
-constexpr Simd128Register w19 = {19};
-constexpr Simd128Register w20 = {20};
-constexpr Simd128Register w21 = {21};
-constexpr Simd128Register w22 = {22};
-constexpr Simd128Register w23 = {23};
-constexpr Simd128Register w24 = {24};
-constexpr Simd128Register w25 = {25};
-constexpr Simd128Register w26 = {26};
-constexpr Simd128Register w27 = {27};
-constexpr Simd128Register w28 = {28};
-constexpr Simd128Register w29 = {29};
-constexpr Simd128Register w30 = {30};
-constexpr Simd128Register w31 = {31};
+#define DECLARE_SIMD128_REGISTER(R) \
+ constexpr Simd128Register R = Simd128Register::from_code<kMsaCode_##R>();
+SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
+#undef DECLARE_SIMD128_REGISTER
+
+const Simd128Register no_msareg = Simd128Register::no_reg();
// Register aliases.
// cp is assumed to be a callee saved register.
@@ -518,28 +386,33 @@ class Operand BASE_EMBEDDED {
public:
// Immediate.
INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE32));
- INLINE(explicit Operand(const ExternalReference& f));
+ RelocInfo::Mode rmode = RelocInfo::NONE32))
+ : rm_(no_reg), rmode_(rmode) {
+ value_.immediate = immediate;
+ }
+ INLINE(explicit Operand(const ExternalReference& f))
+ : rm_(no_reg), rmode_(RelocInfo::EXTERNAL_REFERENCE) {
+ value_.immediate = reinterpret_cast<int32_t>(f.address());
+ }
INLINE(explicit Operand(const char* s));
INLINE(explicit Operand(Object** opp));
INLINE(explicit Operand(Context** cpp));
explicit Operand(Handle<HeapObject> handle);
- INLINE(explicit Operand(Smi* value));
+ INLINE(explicit Operand(Smi* value))
+ : rm_(no_reg), rmode_(RelocInfo::NONE32) {
+ value_.immediate = reinterpret_cast<intptr_t>(value);
+ }
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedCode(CodeStub* stub);
// Register.
- INLINE(explicit Operand(Register rm));
+ INLINE(explicit Operand(Register rm)) : rm_(rm) {}
// Return true if this is a register operand.
INLINE(bool is_reg() const);
- inline int32_t immediate() const {
- DCHECK(!is_reg());
- DCHECK(!IsHeapObjectRequest());
- return value_.immediate;
- }
+ inline int32_t immediate() const;
bool IsImmediate() const { return !rm_.is_valid(); }
@@ -726,21 +599,7 @@ class Assembler : public AssemblerBase {
// has already deserialized the lui/ori instructions etc.
inline static void deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code,
- Address target) {
- if (IsMipsArchVariant(kMips32r6)) {
- // On R6 the address location is shifted by one instruction
- set_target_address_at(
- isolate,
- instruction_payload -
- (kInstructionsFor32BitConstant - 1) * kInstrSize,
- code, target);
- } else {
- set_target_address_at(
- isolate,
- instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
- code, target);
- }
- }
+ Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
@@ -2375,9 +2234,7 @@ class Assembler : public AssemblerBase {
class EnsureSpace BASE_EMBEDDED {
public:
- explicit EnsureSpace(Assembler* assembler) {
- assembler->CheckBuffer();
- }
+ explicit inline EnsureSpace(Assembler* assembler);
};
class UseScratchRegisterScope {
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 414436b802..29583eca1a 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -11,6 +11,7 @@
#include "src/codegen.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+#include "src/heap/heap-inl.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
@@ -44,7 +45,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
int double_offset = offset();
// Account for saved regs if input is sp.
- if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
+ if (input_reg == sp) double_offset += 3 * kPointerSize;
Register scratch =
GetRegisterThatIsNotOneOf(input_reg, result_reg);
@@ -222,7 +223,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(exponent.is(a2));
+ DCHECK(exponent == a2);
const DoubleRegister double_base = f2;
const DoubleRegister double_exponent = f4;
const DoubleRegister double_result = f0;
@@ -425,28 +426,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ AssertStackIsAligned();
- int frame_alignment = MacroAssembler::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- int result_stack_size;
- if (result_size() <= 2) {
- // a0 = argc, a1 = argv, a2 = isolate
- __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
- __ mov(a1, s1);
- result_stack_size = 0;
- } else {
- DCHECK_EQ(3, result_size());
- // Allocate additional space for the result.
- result_stack_size =
- ((result_size() * kPointerSize) + frame_alignment_mask) &
- ~frame_alignment_mask;
- __ Subu(sp, sp, Operand(result_stack_size));
-
- // a0 = hidden result argument, a1 = argc, a2 = argv, a3 = isolate.
- __ li(a3, Operand(ExternalReference::isolate_address(isolate())));
- __ mov(a2, s1);
- __ mov(a1, a0);
- __ mov(a0, sp);
- }
+ // a0 = argc, a1 = argv, a2 = isolate
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(a1, s1);
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
@@ -469,7 +451,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&find_ra);
// This spot was reserved in EnterExitFrame.
- __ sw(ra, MemOperand(sp, result_stack_size));
+ __ sw(ra, MemOperand(sp));
// Stack space reservation moved to the branch delay slot below.
// Stack is still aligned.
@@ -482,14 +464,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
DCHECK_EQ(kNumInstructionsToJump,
masm->InstructionsGeneratedSince(&find_ra));
}
- if (result_size() > 2) {
- DCHECK_EQ(3, result_size());
- // Read result values stored on stack.
- __ lw(a0, MemOperand(v0, 2 * kPointerSize));
- __ lw(v1, MemOperand(v0, 1 * kPointerSize));
- __ lw(v0, MemOperand(v0, 0 * kPointerSize));
- }
- // Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
+
+ // Result returned in v0 or v1:v0 - do not destroy these registers!
// Check result for exception sentinel.
Label exception_returned;
@@ -515,14 +491,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- Register argc;
- if (argv_in_register()) {
- // We don't want to pop arguments so set argc to no_reg.
- argc = no_reg;
- } else {
- // s0: still holds argc (callee-saved).
- argc = s0;
- }
+ Register argc = argv_in_register()
+ // We don't want to pop arguments so set argc to no_reg.
+ ? no_reg
+ // s0: still holds argc (callee-saved).
+ : s0;
__ LeaveExitFrame(save_doubles(), argc, true, EMIT_RETURN);
// Handling of exception.
@@ -907,7 +880,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ Lsa(tmp, properties, index, 1);
__ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
- DCHECK(!tmp.is(entity_name));
+ DCHECK(tmp != entity_name);
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
__ Branch(done, eq, entity_name, Operand(tmp));
@@ -1048,6 +1021,49 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
stub2.GetCode();
}
+RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
+ Instr first_instruction = Assembler::instr_at(stub->instruction_start());
+ Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+ 2 * Assembler::kInstrSize);
+
+ if (Assembler::IsBeq(first_instruction)) {
+ return INCREMENTAL;
+ }
+
+ DCHECK(Assembler::IsBne(first_instruction));
+
+ if (Assembler::IsBeq(second_instruction)) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ DCHECK(Assembler::IsBne(second_instruction));
+
+ return STORE_BUFFER_ONLY;
+}
+
+void RecordWriteStub::Patch(Code* stub, Mode mode) {
+ MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
+ stub->instruction_size(), CodeObjectRequired::kNo);
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ DCHECK(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ PatchBranchIntoNop(&masm, 0);
+ PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
+ break;
+ case INCREMENTAL:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, 0);
+ break;
+ case INCREMENTAL_COMPACTION:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
+ break;
+ }
+ DCHECK(GetMode(stub) == mode);
+ Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
+ 4 * Assembler::kInstrSize);
+}
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
@@ -1069,11 +1085,7 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
__ nop();
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(),
- address(),
- value(),
- save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
}
__ Ret();
@@ -1111,11 +1123,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm);
regs_.Restore(masm);
- __ RememberedSetHelper(object(),
- address(),
- value(),
- save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
__ bind(&dont_need_remembered_set);
}
@@ -1132,10 +1140,9 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
- Register address =
- a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
- DCHECK(!address.is(regs_.object()));
- DCHECK(!address.is(a0));
+ Register address = a0 == regs_.address() ? regs_.scratch0() : regs_.address();
+ DCHECK(address != regs_.object());
+ DCHECK(address != a0);
__ Move(address, regs_.address());
__ Move(a0, regs_.object());
__ Move(a1, address);
@@ -1148,6 +1155,9 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
+void RecordWriteStub::Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+}
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
@@ -1164,11 +1174,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(),
- address(),
- value(),
- save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ Ret();
}
@@ -1209,11 +1215,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(),
- address(),
- value(),
- save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ Ret();
}
@@ -1600,7 +1602,7 @@ static void CallApiFunctionAndReturn(
const int kLevelOffset = AddressOffset(
ExternalReference::handle_scope_level_address(isolate), next_address);
- DCHECK(function_address.is(a1) || function_address.is(a2));
+ DCHECK(function_address == a1 || function_address == a2);
Label profiler_disabled;
Label end_profiler_check;
@@ -1802,7 +1804,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
+ DCHECK(api_function_address != a0 && scratch != a0);
// a0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ Addu(a0, sp, Operand(1 * kPointerSize));
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index 4989c74ca7..f0a365bd6b 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -103,49 +103,9 @@ class RecordWriteStub: public PlatformCodeStub {
DCHECK(Assembler::IsBeq(masm->instr_at(pos)));
}
- static Mode GetMode(Code* stub) {
- Instr first_instruction = Assembler::instr_at(stub->instruction_start());
- Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
- 2 * Assembler::kInstrSize);
+ static Mode GetMode(Code* stub);
- if (Assembler::IsBeq(first_instruction)) {
- return INCREMENTAL;
- }
-
- DCHECK(Assembler::IsBne(first_instruction));
-
- if (Assembler::IsBeq(second_instruction)) {
- return INCREMENTAL_COMPACTION;
- }
-
- DCHECK(Assembler::IsBne(second_instruction));
-
- return STORE_BUFFER_ONLY;
- }
-
- static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
- stub->instruction_size(), CodeObjectRequired::kNo);
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- PatchBranchIntoNop(&masm, 0);
- PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 0);
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
- break;
- }
- DCHECK(GetMode(stub) == mode);
- Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
- 4 * Assembler::kInstrSize);
- }
+ static void Patch(Code* stub, Mode mode);
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
@@ -155,12 +115,11 @@ class RecordWriteStub: public PlatformCodeStub {
// the caller.
class RegisterAllocation {
public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch0)
+ RegisterAllocation(Register object, Register address, Register scratch0)
: object_(object),
address_(address),
- scratch0_(scratch0) {
+ scratch0_(scratch0),
+ scratch1_(no_reg) {
DCHECK(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
@@ -223,9 +182,7 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- void Activate(Code* code) override {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
+ void Activate(Code* code) override;
Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_));
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index 86df6d6e59..352dbb1181 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -387,6 +387,7 @@ const int kMsaI5Mask = ((7U << 23) | ((1 << 6) - 1));
const int kMsaMI10Mask = (15U << 2);
const int kMsaBITMask = ((7U << 23) | ((1 << 6) - 1));
const int kMsaELMMask = (15U << 22);
+const int kMsaLongerELMMask = kMsaELMMask | (63U << 16);
const int kMsa3RMask = ((7U << 23) | ((1 << 6) - 1));
const int kMsa3RFMask = ((15U << 22) | ((1 << 6) - 1));
const int kMsaVECMask = (23U << 21);
@@ -1602,7 +1603,8 @@ class InstructionGetters : public T {
}
inline int32_t MsaElmDf() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+ this->InstructionType() == InstructionBase::kImmediateType);
int32_t df_n = this->Bits(21, 16);
if (((df_n >> 4) & 3U) == 0) {
return 0;
@@ -1618,7 +1620,8 @@ class InstructionGetters : public T {
}
inline int32_t MsaElmNValue() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+ this->InstructionType() == InstructionBase::kImmediateType);
return this->Bits(16 + 4 - this->MsaElmDf(), 16);
}
@@ -1783,6 +1786,15 @@ InstructionBase::Type InstructionBase::InstructionType() const {
case kMsaMinor2R:
case kMsaMinor2RF:
return kRegisterType;
+ case kMsaMinorELM:
+ switch (InstructionBits() & kMsaLongerELMMask) {
+ case CFCMSA:
+ case CTCMSA:
+ case MOVE_V:
+ return kRegisterType;
+ default:
+ return kImmediateType;
+ }
default:
return kImmediateType;
}
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 27ab7de617..616224053c 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/assembler-inl.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/register-configuration.h"
@@ -26,8 +27,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
RegList saved_regs = restored_regs | sp.bit() | ra.bit();
- const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kMaxNumRegisters;
- const int kFloatRegsSize = kFloatSize * FloatRegister::kMaxNumRegisters;
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
+ const int kFloatRegsSize = kFloatSize * FloatRegister::kNumRegisters;
// Save all FPU registers before messing with them.
__ Subu(sp, sp, Operand(kDoubleRegsSize));
@@ -208,10 +209,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Ldc1(fpu_reg, MemOperand(a1, src_offset));
}
- // Push state, pc, and continuation from the last output frame.
- __ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
- __ push(t2);
-
+ // Push pc and continuation from the last output frame.
__ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
__ push(t2);
__ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
@@ -313,6 +311,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
}
}
+bool Deoptimizer::PadTopOfStackRegister() { return false; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index d471b656ce..21b46bbac4 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -1659,6 +1659,9 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
case kMsaMinor2RF:
DecodeTypeMsa2RF(instr);
break;
+ case kMsaMinorELM:
+ DecodeTypeMsaELM(instr);
+ break;
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/mips/frame-constants-mips.cc b/deps/v8/src/mips/frame-constants-mips.cc
index 2d79bcceda..73072a212f 100644
--- a/deps/v8/src/mips/frame-constants-mips.cc
+++ b/deps/v8/src/mips/frame-constants-mips.cc
@@ -18,6 +18,10 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
+int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
+ return register_count;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 5d1151e564..92e0e958b7 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -22,9 +22,14 @@ void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // TODO(albertnetymk): Use default for now; should call
- // RestrictAllocatableRegisters like src/x64/interface-descriptors-x64.cc
- DefaultInitializePlatformSpecific(data, kParameterCount);
+ const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
+
+ data->RestrictAllocatableRegisters(default_stub_registers,
+ arraysize(default_stub_registers));
+
+ CHECK_LE(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
const Register FastNewFunctionContextDescriptor::FunctionRegister() {
@@ -83,27 +88,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void FastCloneRegExpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a3, a2, a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a3, a2, a1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a3, a2, a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1};
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 3373dc601b..caf6b85cc7 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -9,10 +9,12 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
+#include "src/callable.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/mips/assembler-mips-inl.h"
#include "src/mips/macro-assembler-mips.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -24,45 +26,93 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, size, create_code_object) {}
-void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1, Register exclusion2,
- Register exclusion3) {
+TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(isolate, buffer, buffer_size),
+ isolate_(isolate),
+ has_double_zero_reg_set_(false) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
+ }
+}
+
+int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) const {
+ int bytes = 0;
RegList exclusions = 0;
- if (!exclusion1.is(no_reg)) {
+ if (exclusion1 != no_reg) {
exclusions |= exclusion1.bit();
- if (!exclusion2.is(no_reg)) {
+ if (exclusion2 != no_reg) {
exclusions |= exclusion2.bit();
- if (!exclusion3.is(no_reg)) {
+ if (exclusion3 != no_reg) {
exclusions |= exclusion3.bit();
}
}
}
- MultiPush(kJSCallerSaved & ~exclusions);
+ RegList list = kJSCallerSaved & ~exclusions;
+ bytes += NumRegs(list) * kPointerSize;
if (fp_mode == kSaveFPRegs) {
- MultiPushFPU(kCallerSavedFPU);
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
+
+ return bytes;
}
-void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ int bytes = 0;
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = kJSCallerSaved & ~exclusions;
+ MultiPush(list);
+ bytes += NumRegs(list) * kPointerSize;
+
+ if (fp_mode == kSaveFPRegs) {
+ MultiPushFPU(kCallerSavedFPU);
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ }
+
+ return bytes;
+}
+
+int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
if (fp_mode == kSaveFPRegs) {
MultiPopFPU(kCallerSavedFPU);
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
RegList exclusions = 0;
- if (!exclusion1.is(no_reg)) {
+ if (exclusion1 != no_reg) {
exclusions |= exclusion1.bit();
- if (!exclusion2.is(no_reg)) {
+ if (exclusion2 != no_reg) {
exclusions |= exclusion2.bit();
- if (!exclusion3.is(no_reg)) {
+ if (exclusion3 != no_reg) {
exclusions |= exclusion3.bit();
}
}
}
- MultiPop(kJSCallerSaved & ~exclusions);
+ RegList list = kJSCallerSaved & ~exclusions;
+ MultiPop(list);
+ bytes += NumRegs(list) * kPointerSize;
+
+ return bytes;
}
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
@@ -138,16 +188,12 @@ void MacroAssembler::InNewSpace(Register object,
// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- RAStatus ra_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
+void MacroAssembler::RecordWriteField(Register object, int offset,
+ Register value, Register dst,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
DCHECK(!AreAliased(value, dst, t8, object));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
@@ -171,14 +217,8 @@ void MacroAssembler::RecordWriteField(
bind(&ok);
}
- RecordWrite(object,
- dst,
- value,
- ra_status,
- save_fp,
- remembered_set_action,
- OMIT_SMI_CHECK,
- pointers_to_here_check_for_value);
+ RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK);
bind(&done);
@@ -190,99 +230,74 @@ void MacroAssembler::RecordWriteField(
}
}
-
-// Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
-void MacroAssembler::RecordWriteForMap(Register object,
- Register map,
- Register dst,
- RAStatus ra_status,
- SaveFPRegsMode fp_mode) {
- if (emit_debug_code()) {
- DCHECK(!dst.is(at));
- lw(dst, FieldMemOperand(map, HeapObject::kMapOffset));
- Check(eq,
- kWrongAddressOrValuePassedToRecordWrite,
- dst,
- Operand(isolate()->factory()->meta_map()));
- }
-
- if (!FLAG_incremental_marking) {
- return;
+void TurboAssembler::SaveRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
}
+ MultiPush(regs);
+}
- if (emit_debug_code()) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- Check(eq, kWrongAddressOrValuePassedToRecordWrite, map, Operand(scratch));
+void TurboAssembler::RestoreRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
}
+ MultiPop(regs);
+}
- Label done;
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
+ // i.e. always emit remember set and save FP registers in RecordWriteStub. If
+ // large performance regression is observed, we should use these values to
+ // avoid unnecessary work.
- // A single check of the map's pages interesting flag suffices, since it is
- // only set during incremental collection, and then it's also guaranteed that
- // the from object's page's interesting flag is also set. This optimization
- // relies on the fact that maps can never be in new space.
- CheckPageFlag(map,
- map, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
+ RegList registers = callable.descriptor().allocatable_registers();
- Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
- if (emit_debug_code()) {
- Label ok;
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- And(scratch, dst, Operand(kPointerSize - 1));
- Branch(&ok, eq, scratch, Operand(zero_reg));
- stop("Unaligned cell in write barrier");
- bind(&ok);
- }
+ SaveRegisters(registers);
+ Register object_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kObject));
+ Register slot_parameter(
+ callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register isolate_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kIsolate));
+ Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kFPMode));
- // Record the actual write.
- if (ra_status == kRAHasNotBeenSaved) {
- push(ra);
- }
- RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
- fp_mode);
- CallStub(&stub);
- if (ra_status == kRAHasNotBeenSaved) {
- pop(ra);
- }
+ Push(object);
+ Push(address);
- bind(&done);
+ Pop(slot_parameter);
+ Pop(object_parameter);
- {
- // Count number of write barriers in generated code.
- isolate()->counters()->write_barriers_static()->Increment();
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1,
- scratch, dst);
- }
+ li(isolate_parameter, Operand(ExternalReference::isolate_address(isolate())));
+ Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
+ Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
+ Call(callable.code(), RelocInfo::CODE_TARGET);
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- li(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
- li(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
- }
+ RestoreRegisters(registers);
}
-
// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWrite(
- Register object,
- Register address,
- Register value,
- RAStatus ra_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
+void MacroAssembler::RecordWrite(Register object, Register address,
+ Register value, RAStatus ra_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
DCHECK(!AreAliased(object, address, value, t8));
DCHECK(!AreAliased(object, address, value, t9));
@@ -308,13 +323,9 @@ void MacroAssembler::RecordWrite(
JumpIfSmi(value, &done);
}
- if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
- }
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -325,9 +336,13 @@ void MacroAssembler::RecordWrite(
if (ra_status == kRAHasNotBeenSaved) {
push(ra);
}
+#ifdef V8_CSA_WRITE_BARRIER
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+#else
RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
fp_mode);
CallStub(&stub);
+#endif
if (ra_status == kRAHasNotBeenSaved) {
pop(ra);
}
@@ -352,10 +367,8 @@ void MacroAssembler::RecordWrite(
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address,
- Register scratch,
- SaveFPRegsMode fp_mode,
- RememberedSetFinalAction and_then) {
+ Register address, Register scratch,
+ SaveFPRegsMode fp_mode) {
Label done;
if (emit_debug_code()) {
Label ok;
@@ -376,20 +389,13 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Call stub on end of buffer.
// Check for end of buffer.
And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
- if (and_then == kFallThroughAtEnd) {
- Branch(&done, ne, t8, Operand(zero_reg));
- } else {
- DCHECK(and_then == kReturnAtEnd);
- Ret(ne, t8, Operand(zero_reg));
- }
+ Ret(ne, t8, Operand(zero_reg));
push(ra);
StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
CallStub(&store_buffer_overflow);
pop(ra);
bind(&done);
- if (and_then == kReturnAtEnd) {
- Ret();
- }
+ Ret();
}
@@ -406,7 +412,7 @@ void TurboAssembler::Addu(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
addu(rd, rs, scratch);
}
@@ -424,14 +430,14 @@ void TurboAssembler::Subu(Register rd, Register rs, const Operand& rt) {
// -imm and addu for cases where loading -imm generates one instruction.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, -rt.immediate());
addu(rd, rs, scratch);
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
subu(rd, rs, scratch);
}
@@ -450,7 +456,7 @@ void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (IsMipsArchVariant(kLoongson)) {
mult(rs, scratch);
@@ -469,13 +475,13 @@ void TurboAssembler::Mul(Register rd_hi, Register rd_lo, Register rs,
mflo(rd_lo);
mfhi(rd_hi);
} else {
- if (rd_lo.is(rs)) {
- DCHECK(!rd_hi.is(rs));
- DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
+ if (rd_lo == rs) {
+ DCHECK(rd_hi != rs);
+ DCHECK(rd_hi != rt.rm() && rd_lo != rt.rm());
muh(rd_hi, rs, rt.rm());
mul(rd_lo, rs, rt.rm());
} else {
- DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
+ DCHECK(rd_hi != rt.rm() && rd_lo != rt.rm());
mul(rd_lo, rs, rt.rm());
muh(rd_hi, rs, rt.rm());
}
@@ -484,20 +490,20 @@ void TurboAssembler::Mul(Register rd_hi, Register rd_lo, Register rs,
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (!IsMipsArchVariant(kMips32r6)) {
mult(rs, scratch);
mflo(rd_lo);
mfhi(rd_hi);
} else {
- if (rd_lo.is(rs)) {
- DCHECK(!rd_hi.is(rs));
- DCHECK(!rd_hi.is(scratch) && !rd_lo.is(scratch));
+ if (rd_lo == rs) {
+ DCHECK(rd_hi != rs);
+ DCHECK(rd_hi != scratch && rd_lo != scratch);
muh(rd_hi, rs, scratch);
mul(rd_lo, rs, scratch);
} else {
- DCHECK(!rd_hi.is(scratch) && !rd_lo.is(scratch));
+ DCHECK(rd_hi != scratch && rd_lo != scratch);
mul(rd_lo, rs, scratch);
muh(rd_hi, rs, scratch);
}
@@ -507,13 +513,13 @@ void TurboAssembler::Mul(Register rd_hi, Register rd_lo, Register rs,
void TurboAssembler::Mulu(Register rd_hi, Register rd_lo, Register rs,
const Operand& rt) {
- Register reg;
+ Register reg = no_reg;
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (rt.is_reg()) {
reg = rt.rm();
} else {
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
reg = scratch;
li(reg, rt);
}
@@ -523,13 +529,13 @@ void TurboAssembler::Mulu(Register rd_hi, Register rd_lo, Register rs,
mflo(rd_lo);
mfhi(rd_hi);
} else {
- if (rd_lo.is(rs)) {
- DCHECK(!rd_hi.is(rs));
- DCHECK(!rd_hi.is(reg) && !rd_lo.is(reg));
+ if (rd_lo == rs) {
+ DCHECK(rd_hi != rs);
+ DCHECK(rd_hi != reg && rd_lo != reg);
muhu(rd_hi, rs, reg);
mulu(rd_lo, rs, reg);
} else {
- DCHECK(!rd_hi.is(reg) && !rd_lo.is(reg));
+ DCHECK(rd_hi != reg && rd_lo != reg);
mulu(rd_lo, rs, reg);
muhu(rd_hi, rs, reg);
}
@@ -548,7 +554,7 @@ void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (!IsMipsArchVariant(kMips32r6)) {
mult(rs, scratch);
@@ -566,7 +572,7 @@ void TurboAssembler::Mult(Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
mult(rs, scratch);
}
@@ -584,7 +590,7 @@ void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (!IsMipsArchVariant(kMips32r6)) {
multu(rs, scratch);
@@ -602,7 +608,7 @@ void TurboAssembler::Multu(Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
multu(rs, scratch);
}
@@ -615,7 +621,7 @@ void TurboAssembler::Div(Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
div(rs, scratch);
}
@@ -636,7 +642,7 @@ void TurboAssembler::Div(Register rem, Register res, Register rs,
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (!IsMipsArchVariant(kMips32r6)) {
div(rs, scratch);
@@ -661,7 +667,7 @@ void TurboAssembler::Div(Register res, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (!IsMipsArchVariant(kMips32r6)) {
div(rs, scratch);
@@ -684,7 +690,7 @@ void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (!IsMipsArchVariant(kMips32r6)) {
div(rs, scratch);
@@ -707,7 +713,7 @@ void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (!IsMipsArchVariant(kMips32r6)) {
divu(rs, scratch);
@@ -725,7 +731,7 @@ void TurboAssembler::Divu(Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
divu(rs, scratch);
}
@@ -743,7 +749,7 @@ void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (!IsMipsArchVariant(kMips32r6)) {
divu(rs, scratch);
@@ -764,7 +770,7 @@ void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
and_(rd, rs, scratch);
}
@@ -781,7 +787,7 @@ void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
or_(rd, rs, scratch);
}
@@ -798,7 +804,7 @@ void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
xor_(rd, rs, scratch);
}
@@ -812,7 +818,7 @@ void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
nor(rd, rs, scratch);
}
@@ -831,8 +837,8 @@ void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
- Register scratch = rd.is(at) ? t8 : temps.Acquire();
- DCHECK(!rs.is(scratch));
+ Register scratch = rd == at ? t8 : temps.Acquire();
+ DCHECK(rs != scratch);
li(scratch, rt);
slt(rd, rs, scratch);
}
@@ -854,8 +860,8 @@ void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
- Register scratch = rd.is(at) ? t8 : temps.Acquire();
- DCHECK(!rs.is(scratch));
+ Register scratch = rd == at ? t8 : temps.Acquire();
+ DCHECK(rs != scratch);
li(scratch, rt);
sltu(rd, rs, scratch);
}
@@ -872,7 +878,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
} else {
if (rt.is_reg()) {
UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
subu(scratch, zero_reg, rt.rm());
sllv(scratch, rs, scratch);
srlv(rd, rs, rt.rm());
@@ -906,8 +912,8 @@ void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
if (IsMipsArchVariant(kMips32r6) && sa <= 4) {
lsa(rd, rt, rs, sa - 1);
} else {
- Register tmp = rd.is(rt) ? scratch : rd;
- DCHECK(!tmp.is(rt));
+ Register tmp = rd == rt ? scratch : rd;
+ DCHECK(tmp != rt);
sll(tmp, rs, sa);
Addu(rd, rt, tmp);
}
@@ -1005,8 +1011,8 @@ void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
}
void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
- DCHECK(!rd.is(at));
- DCHECK(!rs.rm().is(at));
+ DCHECK(rd != at);
+ DCHECK(rs.rm() != at);
if (IsMipsArchVariant(kMips32r6)) {
lw(rd, rs);
} else {
@@ -1016,7 +1022,7 @@ void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 3 fits into int16_t.
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
- if (!rd.is(source.rm())) {
+ if (rd != source.rm()) {
lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset));
lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset));
} else {
@@ -1030,9 +1036,9 @@ void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
}
void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
- DCHECK(!rd.is(at));
- DCHECK(!rs.rm().is(at));
- DCHECK(!rd.is(rs.rm()));
+ DCHECK(rd != at);
+ DCHECK(rs.rm() != at);
+ DCHECK(rd != rs.rm());
if (IsMipsArchVariant(kMips32r6)) {
sw(rd, rs);
} else {
@@ -1048,8 +1054,8 @@ void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
}
void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
- DCHECK(!rd.is(at));
- DCHECK(!rs.rm().is(at));
+ DCHECK(rd != at);
+ DCHECK(rs.rm() != at);
if (IsMipsArchVariant(kMips32r6)) {
lh(rd, rs);
} else {
@@ -1060,7 +1066,7 @@ void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- if (source.rm().is(scratch)) {
+ if (source.rm() == scratch) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
lb(rd, MemOperand(source.rm(), source.offset() + 1));
lbu(scratch, source);
@@ -1083,8 +1089,8 @@ void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
}
void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
- DCHECK(!rd.is(at));
- DCHECK(!rs.rm().is(at));
+ DCHECK(rd != at);
+ DCHECK(rs.rm() != at);
if (IsMipsArchVariant(kMips32r6)) {
lhu(rd, rs);
} else {
@@ -1095,7 +1101,7 @@ void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- if (source.rm().is(scratch)) {
+ if (source.rm() == scratch) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
lbu(rd, MemOperand(source.rm(), source.offset() + 1));
lbu(scratch, source);
@@ -1118,10 +1124,10 @@ void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
}
void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
- DCHECK(!rd.is(at));
- DCHECK(!rs.rm().is(at));
- DCHECK(!rs.rm().is(scratch));
- DCHECK(!scratch.is(at));
+ DCHECK(rd != at);
+ DCHECK(rs.rm() != at);
+ DCHECK(rs.rm() != scratch);
+ DCHECK(scratch != at);
if (IsMipsArchVariant(kMips32r6)) {
sh(rd, rs);
} else {
@@ -1131,7 +1137,7 @@ void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
- if (!scratch.is(rd)) {
+ if (scratch != rd) {
mov(scratch, rd);
}
@@ -1173,7 +1179,7 @@ void TurboAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
- DCHECK(!scratch.is(at));
+ DCHECK(scratch != at);
if (IsMipsArchVariant(kMips32r6)) {
Ldc1(fd, rs);
} else {
@@ -1188,7 +1194,7 @@ void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
- DCHECK(!scratch.is(at));
+ DCHECK(scratch != at);
if (IsMipsArchVariant(kMips32r6)) {
Sdc1(fd, rs);
} else {
@@ -1209,8 +1215,7 @@ void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
lwc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
if (IsFp32Mode()) { // fp32 mode.
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
+ FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
lwc1(nextfpreg,
MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
} else {
@@ -1219,7 +1224,7 @@ void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!src.rm().is(scratch));
+ DCHECK(src.rm() != scratch);
lw(scratch, MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
Mthc1(scratch, fd);
}
@@ -1233,15 +1238,14 @@ void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
swc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
if (IsFp32Mode()) { // fp32 mode.
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
+ FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
swc1(nextfpreg,
MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
} else {
DCHECK(IsFp64Mode() || IsFpxxMode());
// Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
- DCHECK(!src.rm().is(t8));
+ DCHECK(src.rm() != t8);
Mfhc1(t8, fd);
sw(t8, MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
}
@@ -1374,7 +1378,7 @@ void TurboAssembler::AddPair(Register dst_low, Register dst_high,
Register left_low, Register left_high,
Register right_low, Register right_high) {
Register kScratchReg = s3;
- if (left_low.is(right_low)) {
+ if (left_low == right_low) {
// Special case for left = right and the sum potentially overwriting both
// left and right.
Slt(kScratchReg, left_low, zero_reg);
@@ -1383,7 +1387,7 @@ void TurboAssembler::AddPair(Register dst_low, Register dst_high,
Addu(dst_low, left_low, right_low);
// If the sum overwrites right, left remains unchanged, otherwise right
// remains unchanged.
- Sltu(kScratchReg, dst_low, (dst_low.is(right_low)) ? left_low : right_low);
+ Sltu(kScratchReg, dst_low, (dst_low == right_low) ? left_low : right_low);
}
Addu(dst_high, left_high, right_high);
Addu(dst_high, dst_high, kScratchReg);
@@ -1587,7 +1591,7 @@ void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos,
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
ins_(rt, rs, pos, size);
} else {
- DCHECK(!rt.is(t8) && !rs.is(t8));
+ DCHECK(rt != t8 && rs != t8);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Subu(scratch, zero_reg, Operand(1));
@@ -1708,8 +1712,8 @@ void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs,
cvt_d_l(fd, scratch);
} else {
// Convert rs to a FP value in fd.
- DCHECK(!fd.is(scratch));
- DCHECK(!rs.is(at));
+ DCHECK(fd != scratch);
+ DCHECK(rs != at);
Label msb_clear, conversion_done;
// For a value which is < 2^31, regard it as a signed positve word.
@@ -1750,8 +1754,8 @@ void TurboAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
mtc1(t8, fd);
}
-void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
- if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+void TurboAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
+ if (IsMipsArchVariant(kLoongson) && fd == fs) {
Mfhc1(t8, fs);
trunc_w_d(fd, fs);
Mthc1(t8, fs);
@@ -1760,9 +1764,8 @@ void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
}
}
-
-void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
- if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+void TurboAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
+ if (IsMipsArchVariant(kLoongson) && fd == fs) {
Mfhc1(t8, fs);
round_w_d(fd, fs);
Mthc1(t8, fs);
@@ -1771,9 +1774,8 @@ void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
}
}
-
-void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
- if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+void TurboAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
+ if (IsMipsArchVariant(kLoongson) && fd == fs) {
Mfhc1(t8, fs);
floor_w_d(fd, fs);
Mthc1(t8, fs);
@@ -1782,9 +1784,8 @@ void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
}
}
-
-void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
- if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+void TurboAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
+ if (IsMipsArchVariant(kLoongson) && fd == fs) {
Mfhc1(t8, fs);
ceil_w_d(fd, fs);
Mthc1(t8, fs);
@@ -1795,8 +1796,8 @@ void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
void TurboAssembler::Trunc_uw_d(FPURegister fd, Register rs,
FPURegister scratch) {
- DCHECK(!fd.is(scratch));
- DCHECK(!rs.is(at));
+ DCHECK(fd != scratch);
+ DCHECK(rs != at);
{
// Load 2^31 into scratch as its float representation.
@@ -1830,8 +1831,8 @@ void TurboAssembler::Trunc_uw_d(FPURegister fd, Register rs,
void TurboAssembler::Trunc_uw_s(FPURegister fd, Register rs,
FPURegister scratch) {
- DCHECK(!fd.is(scratch));
- DCHECK(!rs.is(at));
+ DCHECK(fd != scratch);
+ DCHECK(rs != at);
{
// Load 2^31 into scratch as its float representation.
@@ -1887,7 +1888,7 @@ void TurboAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
if (IsMipsArchVariant(kMips32r2)) {
madd_s(fd, fr, fs, ft);
} else {
- DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ DCHECK(fr != scratch && fs != scratch && ft != scratch);
mul_s(scratch, fs, ft);
add_s(fd, fr, scratch);
}
@@ -1898,7 +1899,7 @@ void TurboAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
if (IsMipsArchVariant(kMips32r2)) {
madd_d(fd, fr, fs, ft);
} else {
- DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ DCHECK(fr != scratch && fs != scratch && ft != scratch);
mul_d(scratch, fs, ft);
add_d(fd, fr, scratch);
}
@@ -1909,7 +1910,7 @@ void TurboAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
if (IsMipsArchVariant(kMips32r2)) {
msub_s(fd, fr, fs, ft);
} else {
- DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ DCHECK(fr != scratch && fs != scratch && ft != scratch);
mul_s(scratch, fs, ft);
sub_s(fd, scratch, fr);
}
@@ -1920,7 +1921,7 @@ void TurboAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
if (IsMipsArchVariant(kMips32r2)) {
msub_d(fd, fr, fs, ft);
} else {
- DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ DCHECK(fr != scratch && fs != scratch && ft != scratch);
mul_d(scratch, fs, ft);
sub_d(fd, scratch, fr);
}
@@ -1962,7 +1963,7 @@ void TurboAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
} else {
// Use kDoubleCompareReg for comparison result. It has to be unavailable
// to lithium register allocator.
- DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
+ DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
if (long_branch) {
Label skip;
cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
@@ -2065,7 +2066,7 @@ void TurboAssembler::BranchShortF(SecondaryField sizeField, Label* target,
// Unsigned conditions are treated as their signed counterpart.
// Use kDoubleCompareReg for comparison result, it is
// valid in fp64 (FR = 1) mode which is implied for mips32r6.
- DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
+ DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
switch (cc) {
case lt:
cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
@@ -2210,7 +2211,7 @@ void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!src_low.is(scratch));
+ DCHECK(src_low != scratch);
mfhc1(scratch, dst);
mtc1(src_low, dst);
mthc1(scratch, dst);
@@ -2254,7 +2255,7 @@ void TurboAssembler::Move(FPURegister dst, double imm) {
} else {
Mthc1(zero_reg, dst);
}
- if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
+ if (dst == kDoubleRegZero) has_double_zero_reg_set_ = true;
}
}
@@ -2285,7 +2286,7 @@ void TurboAssembler::Movt(Register rd, Register rs, uint16_t cc) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
DCHECK(cc == 0);
- DCHECK(!(rs.is(t8) || rd.is(t8)));
+ DCHECK(rs != t8 && rd != t8);
Label done;
Register scratch = t8;
// For testing purposes we need to fetch content of the FCSR register and
@@ -2310,7 +2311,7 @@ void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
DCHECK(cc == 0);
- DCHECK(!(rs.is(t8) || rd.is(t8)));
+ DCHECK(rs != t8 && rd != t8);
Label done;
Register scratch = t8;
// For testing purposes we need to fetch content of the FCSR register and
@@ -2332,7 +2333,7 @@ void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) {
void TurboAssembler::Clz(Register rd, Register rs) {
if (IsMipsArchVariant(kLoongson)) {
- DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
+ DCHECK(rd != t8 && rd != t9 && rs != t8 && rs != t9);
Register mask = t8;
Register scratch = t9;
Label loop, end;
@@ -2363,9 +2364,9 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
DoubleRegister double_scratch,
Register except_flag,
CheckForInexactConversion check_inexact) {
- DCHECK(!result.is(scratch));
- DCHECK(!double_input.is(double_scratch));
- DCHECK(!except_flag.is(scratch));
+ DCHECK(result != scratch);
+ DCHECK(double_input != double_scratch);
+ DCHECK(except_flag != scratch);
Label done;
@@ -2466,9 +2467,9 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
// Emulated condtional branches do not emit a nop in the branch delay slot.
//
// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
-#define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
- (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
- (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
+#define BRANCH_ARGS_CHECK(cond, rs, rt) \
+ DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \
+ (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg)))
void TurboAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
DCHECK(IsMipsArchVariant(kMips32r6) ? is_int26(offset) : is_int16(offset));
@@ -2575,7 +2576,7 @@ void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
static inline bool IsZero(const Operand& rt) {
if (rt.is_reg()) {
- return rt.rm().is(zero_reg);
+ return rt.rm() == zero_reg;
} else {
return rt.immediate() == 0;
}
@@ -2636,7 +2637,7 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
bc(offset);
break;
case eq:
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
// Pre R6 beq is used here to make the code patchable. Otherwise bc
// should be used which has no condition field so is not patchable.
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
@@ -2654,7 +2655,7 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
}
break;
case ne:
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
// Pre R6 bne is used here to make the code patchable. Otherwise we
// should not generate any instruction.
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
@@ -2675,9 +2676,9 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
// Signed comparison.
case greater:
// rs > rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bltzc(scratch, offset);
@@ -2687,16 +2688,16 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
bltc(scratch, rs, offset);
}
break;
case greater_equal:
// rs >= rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
bc(offset);
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
blezc(scratch, offset);
@@ -2706,15 +2707,15 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
bgec(rs, scratch, offset);
}
break;
case less:
// rs < rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bgtzc(scratch, offset);
@@ -2724,16 +2725,16 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
bltc(rs, scratch, offset);
}
break;
case less_equal:
// rs <= rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
bc(offset);
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bgezc(scratch, offset);
@@ -2743,7 +2744,7 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
bgec(scratch, rs, offset);
}
break;
@@ -2751,9 +2752,9 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
// Unsigned comparison.
case Ugreater:
// rs > rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
return false;
bnezc(scratch, offset);
@@ -2763,16 +2764,16 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
bltuc(scratch, rs, offset);
}
break;
case Ugreater_equal:
// rs >= rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
bc(offset);
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
return false;
beqzc(scratch, offset);
@@ -2782,15 +2783,15 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
bgeuc(rs, scratch, offset);
}
break;
case Uless:
// rs < rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
return false;
bnezc(scratch, offset);
@@ -2799,16 +2800,16 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
bltuc(rs, scratch, offset);
}
break;
case Uless_equal:
// rs <= rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
bc(offset);
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26, scratch, rt))
return false;
bc(offset);
@@ -2818,7 +2819,7 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
bgeuc(scratch, rs, offset);
}
break;
@@ -3116,9 +3117,9 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
// Signed comparison.
case greater:
// rs > rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bltzalc(scratch, offset);
@@ -3134,10 +3135,10 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
break;
case greater_equal:
// rs >= rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
balc(offset);
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
blezalc(scratch, offset);
@@ -3153,9 +3154,9 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
break;
case less:
// rs < rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bgtzalc(scratch, offset);
@@ -3171,10 +3172,10 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
break;
case less_equal:
// rs <= r2
- if (rs.code() == rt.rm().reg_code) {
+ if (rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
balc(offset);
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bgezalc(scratch, offset);
@@ -3389,7 +3390,7 @@ void TurboAssembler::Jump(Register target, Register base, int16_t offset,
if (offset != 0) {
Addu(target, base, offset);
} else { // Call through target
- if (!target.is(base)) mov(target, base);
+ if (target != base) mov(target, base);
}
if (cond == cc_always) {
jr(target);
@@ -3530,8 +3531,8 @@ void TurboAssembler::Call(Register target, int16_t offset, Condition cond,
}
#ifdef DEBUG
- CHECK_EQ(size + CallSize(target, offset, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
+ DCHECK_EQ(size + CallSize(target, offset, cond, rs, rt, bd),
+ SizeOfCodeGeneratedSince(&start));
#endif
}
@@ -3559,7 +3560,7 @@ void TurboAssembler::Call(Register target, Register base, int16_t offset,
if (offset != 0) {
Addu(target, base, offset);
} else { // Call through target
- if (!target.is(base)) mov(target, base);
+ if (target != base) mov(target, base);
}
if (cond == cc_always) {
jalr(target);
@@ -3573,8 +3574,8 @@ void TurboAssembler::Call(Register target, Register base, int16_t offset,
}
#ifdef DEBUG
- CHECK_EQ(size + CallSize(target, offset, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
+ DCHECK_EQ(size + CallSize(target, offset, cond, rs, rt, bd),
+ SizeOfCodeGeneratedSince(&start));
#endif
}
@@ -3768,7 +3769,7 @@ void TurboAssembler::Drop(int count, Condition cond, Register reg,
void MacroAssembler::Swap(Register reg1,
Register reg2,
Register scratch) {
- if (scratch.is(no_reg)) {
+ if (scratch == no_reg) {
Xor(reg1, reg1, Operand(reg2));
Xor(reg2, reg2, Operand(reg1));
Xor(reg1, reg1, Operand(reg2));
@@ -3836,92 +3837,6 @@ void MacroAssembler::PopStackHandler() {
}
-void MacroAssembler::Allocate(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
- DCHECK(object_size <= kMaxRegularHeapObjectSize);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- li(result, 0x7091);
- li(scratch1, 0x7191);
- li(scratch2, 0x7291);
- }
- jmp(gc_required);
- return;
- }
-
- DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- DCHECK_EQ(0, object_size & kObjectAlignmentMask);
-
- // Check relative positions of allocation top and limit addresses.
- // ARM adds additional checks to make sure the ldm instruction can be
- // used. On MIPS we don't have ldm so we don't need additional checks either.
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
- intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
- DCHECK((limit - top) == kPointerSize);
-
- // Set up allocation top address and allocation limit registers.
- Register top_address = scratch1;
- // This code stores a temporary value in t9.
- Register alloc_limit = t9;
- Register result_end = scratch2;
- li(top_address, Operand(allocation_top));
-
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into alloc_limit.
- lw(result, MemOperand(top_address));
- lw(alloc_limit, MemOperand(top_address, kPointerSize));
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry.
- lw(alloc_limit, MemOperand(top_address));
- Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
- }
- // Load allocation limit. Result already contains allocation top.
- lw(alloc_limit, MemOperand(top_address, limit - top));
- }
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- And(result_end, result, Operand(kDoubleAlignmentMask));
- Label aligned;
- Branch(&aligned, eq, result_end, Operand(zero_reg));
- if ((flags & PRETENURE) != 0) {
- Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit));
- }
- li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
- sw(result_end, MemOperand(result));
- Addu(result, result, Operand(kDoubleSize / 2));
- bind(&aligned);
- }
-
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top.
- Addu(result_end, result, Operand(object_size));
- Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
-
- sw(result_end, MemOperand(top_address));
-
- // Tag object.
- Addu(result, result, Operand(kHeapObjectTag));
-}
-
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
Label* not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
@@ -3937,77 +3852,6 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
bind(&succeed);
}
-void MacroAssembler::AllocateJSValue(Register result, Register constructor,
- Register value, Register scratch1,
- Register scratch2, Label* gc_required) {
- DCHECK(!result.is(constructor));
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!result.is(value));
-
- // Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Initialize the JSValue.
- LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
- sw(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
- LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- sw(scratch1, FieldMemOperand(result, JSObject::kPropertiesOrHashOffset));
- sw(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
- sw(value, FieldMemOperand(result, JSValue::kValueOffset));
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-}
-
-void MacroAssembler::CompareMapAndBranch(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* early_success,
- Condition cond,
- Label* branch_to) {
- lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
-}
-
-
-void MacroAssembler::CompareMapAndBranch(Register obj_map,
- Handle<Map> map,
- Label* early_success,
- Condition cond,
- Label* branch_to) {
- Branch(branch_to, cond, obj_map, Operand(map));
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
- Label success;
- CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
- bind(&success);
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
- lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- UseScratchRegisterScope temps(this);
- Register scratch1 = temps.Acquire();
- LoadRoot(scratch1, index);
- Branch(fail, ne, scratch, Operand(scratch1));
-}
-
void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
sub_d(dst, src, kDoubleRegZero);
@@ -4076,8 +3920,8 @@ void TurboAssembler::MovToFloatResult(DoubleRegister src) {
void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
if (!IsMipsSoftFloatABI) {
- if (src2.is(f12)) {
- DCHECK(!src1.is(f14));
+ if (src2 == f12) {
+ DCHECK(src1 != f14);
Move(f14, src2);
Move(f12, src1);
} else {
@@ -4175,8 +4019,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract if values are
// passed in registers.
- DCHECK(actual.is_immediate() || actual.reg().is(a0));
- DCHECK(expected.is_immediate() || expected.reg().is(a2));
+ DCHECK(actual.is_immediate() || actual.reg() == a0);
+ DCHECK(expected.is_immediate() || expected.reg() == a2);
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -4265,8 +4109,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
InvokeFlag flag) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- DCHECK(function.is(a1));
- DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
+ DCHECK(function == a1);
+ DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
// On function call, call into the debugger if necessary.
CheckDebugHook(function, new_target, expected, actual);
@@ -4304,7 +4148,7 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in a1.
- DCHECK(function.is(a1));
+ DCHECK(function == a1);
Register expected_reg = a2;
Register temp_reg = t0;
@@ -4326,7 +4170,7 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in a1.
- DCHECK(function.is(a1));
+ DCHECK(function == a1);
// Get the function and setup the context.
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
@@ -4425,16 +4269,16 @@ void TurboAssembler::AddBranchOvf(Register dst, Register left,
} else {
if (IsMipsArchVariant(kMips32r6)) {
Register right_reg = t9;
- DCHECK(!left.is(right_reg));
+ DCHECK(left != right_reg);
li(right_reg, Operand(right));
AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label);
} else {
Register overflow_dst = t9;
- DCHECK(!dst.is(scratch));
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!scratch.is(overflow_dst));
- DCHECK(!left.is(overflow_dst));
- if (dst.is(left)) {
+ DCHECK(dst != scratch);
+ DCHECK(dst != overflow_dst);
+ DCHECK(scratch != overflow_dst);
+ DCHECK(left != overflow_dst);
+ if (dst == left) {
mov(scratch, left); // Preserve left.
Addu(dst, left, right.immediate()); // Left is overwritten.
xor_(scratch, dst, scratch); // Original left.
@@ -4461,11 +4305,11 @@ void TurboAssembler::AddBranchOvf(Register dst, Register left, Register right,
if (IsMipsArchVariant(kMips32r6)) {
if (!overflow_label) {
DCHECK(no_overflow_label);
- DCHECK(!dst.is(scratch));
- Register left_reg = left.is(dst) ? scratch : left;
- Register right_reg = right.is(dst) ? t9 : right;
- DCHECK(!dst.is(left_reg));
- DCHECK(!dst.is(right_reg));
+ DCHECK(dst != scratch);
+ Register left_reg = left == dst ? scratch : left;
+ Register right_reg = right == dst ? t9 : right;
+ DCHECK(dst != left_reg);
+ DCHECK(dst != right_reg);
Move(left_reg, left);
Move(right_reg, right);
addu(dst, left, right);
@@ -4477,26 +4321,26 @@ void TurboAssembler::AddBranchOvf(Register dst, Register left, Register right,
}
} else {
Register overflow_dst = t9;
- DCHECK(!dst.is(scratch));
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!scratch.is(overflow_dst));
- DCHECK(!left.is(overflow_dst));
- DCHECK(!right.is(overflow_dst));
- DCHECK(!left.is(scratch));
- DCHECK(!right.is(scratch));
-
- if (left.is(right) && dst.is(left)) {
+ DCHECK(dst != scratch);
+ DCHECK(dst != overflow_dst);
+ DCHECK(scratch != overflow_dst);
+ DCHECK(left != overflow_dst);
+ DCHECK(right != overflow_dst);
+ DCHECK(left != scratch);
+ DCHECK(right != scratch);
+
+ if (left == right && dst == left) {
mov(overflow_dst, right);
right = overflow_dst;
}
- if (dst.is(left)) {
+ if (dst == left) {
mov(scratch, left); // Preserve left.
addu(dst, left, right); // Left is overwritten.
xor_(scratch, dst, scratch); // Original left.
xor_(overflow_dst, dst, right);
and_(overflow_dst, overflow_dst, scratch);
- } else if (dst.is(right)) {
+ } else if (dst == right) {
mov(scratch, right); // Preserve right.
addu(dst, left, right); // Right is overwritten.
xor_(scratch, dst, scratch); // Original right.
@@ -4521,12 +4365,12 @@ void TurboAssembler::SubBranchOvf(Register dst, Register left,
scratch);
} else {
Register overflow_dst = t9;
- DCHECK(!dst.is(scratch));
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!scratch.is(overflow_dst));
- DCHECK(!left.is(overflow_dst));
- DCHECK(!left.is(scratch));
- if (dst.is(left)) {
+ DCHECK(dst != scratch);
+ DCHECK(dst != overflow_dst);
+ DCHECK(scratch != overflow_dst);
+ DCHECK(left != overflow_dst);
+ DCHECK(left != scratch);
+ if (dst == left) {
mov(scratch, left); // Preserve left.
Subu(dst, left, right.immediate()); // Left is overwritten.
// Load right since xori takes uint16 as immediate.
@@ -4551,30 +4395,30 @@ void TurboAssembler::SubBranchOvf(Register dst, Register left, Register right,
Label* no_overflow_label, Register scratch) {
DCHECK(overflow_label || no_overflow_label);
Register overflow_dst = t9;
- DCHECK(!dst.is(scratch));
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!scratch.is(overflow_dst));
- DCHECK(!overflow_dst.is(left));
- DCHECK(!overflow_dst.is(right));
- DCHECK(!scratch.is(left));
- DCHECK(!scratch.is(right));
+ DCHECK(dst != scratch);
+ DCHECK(dst != overflow_dst);
+ DCHECK(scratch != overflow_dst);
+ DCHECK(overflow_dst != left);
+ DCHECK(overflow_dst != right);
+ DCHECK(scratch != left);
+ DCHECK(scratch != right);
// This happens with some crankshaft code. Since Subu works fine if
// left == right, let's not make that restriction here.
- if (left.is(right)) {
+ if (left == right) {
mov(dst, zero_reg);
if (no_overflow_label) {
Branch(no_overflow_label);
}
}
- if (dst.is(left)) {
+ if (dst == left) {
mov(scratch, left); // Preserve left.
subu(dst, left, right); // Left is overwritten.
xor_(overflow_dst, dst, scratch); // scratch is original left.
xor_(scratch, scratch, right); // scratch is original left.
and_(overflow_dst, scratch, overflow_dst);
- } else if (dst.is(right)) {
+ } else if (dst == right) {
mov(scratch, right); // Preserve right.
subu(dst, left, right); // Right is overwritten.
xor_(overflow_dst, dst, left);
@@ -4612,11 +4456,11 @@ void TurboAssembler::MulBranchOvf(Register dst, Register left,
scratch);
} else {
Register overflow_dst = t9;
- DCHECK(!dst.is(scratch));
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!scratch.is(overflow_dst));
- DCHECK(!left.is(overflow_dst));
- DCHECK(!left.is(scratch));
+ DCHECK(dst != scratch);
+ DCHECK(dst != overflow_dst);
+ DCHECK(scratch != overflow_dst);
+ DCHECK(left != overflow_dst);
+ DCHECK(left != scratch);
Mul(overflow_dst, dst, left, right.immediate());
sra(scratch, dst, 31);
@@ -4631,15 +4475,15 @@ void TurboAssembler::MulBranchOvf(Register dst, Register left, Register right,
Label* no_overflow_label, Register scratch) {
DCHECK(overflow_label || no_overflow_label);
Register overflow_dst = t9;
- DCHECK(!dst.is(scratch));
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!scratch.is(overflow_dst));
- DCHECK(!overflow_dst.is(left));
- DCHECK(!overflow_dst.is(right));
- DCHECK(!scratch.is(left));
- DCHECK(!scratch.is(right));
-
- if (IsMipsArchVariant(kMips32r6) && dst.is(right)) {
+ DCHECK(dst != scratch);
+ DCHECK(dst != overflow_dst);
+ DCHECK(scratch != overflow_dst);
+ DCHECK(overflow_dst != left);
+ DCHECK(overflow_dst != right);
+ DCHECK(scratch != left);
+ DCHECK(scratch != right);
+
+ if (IsMipsArchVariant(kMips32r6) && dst == right) {
mov(scratch, right);
Mul(overflow_dst, dst, left, scratch);
sra(scratch, dst, 31);
@@ -4799,21 +4643,6 @@ void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
lw(dst, ContextMemOperand(dst, index));
}
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch) {
- // Load the initial map. The global functions all have initial maps.
- lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
- Branch(&ok);
- bind(&fail);
- Abort(kGlobalFunctionsMustHaveInitialMap);
- bind(&ok);
- }
-}
-
void TurboAssembler::StubPrologue(StackFrame::Type type) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -4927,10 +4756,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
}
- int space = FPURegister::kMaxNumRegisters * kDoubleSize;
+ int space = FPURegister::kNumRegisters * kDoubleSize;
Subu(sp, sp, Operand(space));
// Remember: we only need to save every 2nd double FPU value.
- for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
+ for (int i = 0; i < FPURegister::kNumRegisters; i += 2) {
FPURegister reg = FPURegister::from_code(i);
Sdc1(reg, MemOperand(sp, i * kDoubleSize));
}
@@ -4962,7 +4791,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (save_doubles) {
// Remember: we only need to restore every 2nd double FPU value.
lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
- for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
+ for (int i = 0; i < FPURegister::kNumRegisters; i += 2) {
FPURegister reg = FPURegister::from_code(i);
Ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
}
@@ -5167,24 +4996,9 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
-void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
- Register first, Register second, Register scratch1, Register scratch2,
- Label* failure) {
- // Test that both first and second are sequential one-byte strings.
- // Assume that they are non-smis.
- lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
- lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
- lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
-
- JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
- scratch2, failure);
-}
-
-
void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
- if (src1.is(src2)) {
+ if (src1 == src2) {
Move_s(dst, src1);
return;
}
@@ -5206,13 +5020,13 @@ void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
Branch(&return_right);
bind(&return_right);
- if (!src2.is(dst)) {
+ if (src2 != dst) {
Move_s(dst, src2);
}
Branch(&done);
bind(&return_left);
- if (!src1.is(dst)) {
+ if (src1 != dst) {
Move_s(dst, src1);
}
@@ -5227,7 +5041,7 @@ void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
- if (src1.is(src2)) {
+ if (src1 == src2) {
Move_s(dst, src1);
return;
}
@@ -5249,13 +5063,13 @@ void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
Branch(&return_left);
bind(&return_right);
- if (!src2.is(dst)) {
+ if (src2 != dst) {
Move_s(dst, src2);
}
Branch(&done);
bind(&return_left);
- if (!src1.is(dst)) {
+ if (src1 != dst) {
Move_s(dst, src1);
}
@@ -5270,7 +5084,7 @@ void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
void TurboAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1,
DoubleRegister src2, Label* out_of_line) {
- if (src1.is(src2)) {
+ if (src1 == src2) {
Move_d(dst, src1);
return;
}
@@ -5292,13 +5106,13 @@ void TurboAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1,
Branch(&return_right);
bind(&return_right);
- if (!src2.is(dst)) {
+ if (src2 != dst) {
Move_d(dst, src2);
}
Branch(&done);
bind(&return_left);
- if (!src1.is(dst)) {
+ if (src1 != dst) {
Move_d(dst, src1);
}
@@ -5314,7 +5128,7 @@ void TurboAssembler::Float64MaxOutOfLine(DoubleRegister dst,
void TurboAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1,
DoubleRegister src2, Label* out_of_line) {
- if (src1.is(src2)) {
+ if (src1 == src2) {
Move_d(dst, src1);
return;
}
@@ -5336,13 +5150,13 @@ void TurboAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1,
Branch(&return_left);
bind(&return_right);
- if (!src2.is(dst)) {
+ if (src2 != dst) {
Move_d(dst, src2);
}
Branch(&done);
bind(&return_left);
- if (!src1.is(dst)) {
+ if (src1 != dst) {
Move_d(dst, src1);
}
@@ -5356,20 +5170,6 @@ void TurboAssembler::Float64MinOutOfLine(DoubleRegister dst,
add_d(dst, src1, src2);
}
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
- Register first, Register second, Register scratch1, Register scratch2,
- Label* failure) {
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
- andi(scratch1, first, kFlatOneByteStringMask);
- Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
- andi(scratch2, second, kFlatOneByteStringMask);
- Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
-}
-
static const int kRegisterPassedArguments = 4;
int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -5483,7 +5283,7 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
// allow preemption, so the return address in the link register
// stays correct.
- if (!function_base.is(t9)) {
+ if (function_base != t9) {
mov(t9, function_base);
function_base = t9;
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index d7922a714e..86934ee5a6 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -13,20 +13,20 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-constexpr Register kReturnRegister0 = {Register::kCode_v0};
-constexpr Register kReturnRegister1 = {Register::kCode_v1};
-constexpr Register kReturnRegister2 = {Register::kCode_a0};
-constexpr Register kJSFunctionRegister = {Register::kCode_a1};
-constexpr Register kContextRegister = {Register::kCpRegister};
-constexpr Register kAllocateSizeRegister = {Register::kCode_a0};
-constexpr Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
-constexpr Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t4};
-constexpr Register kInterpreterBytecodeArrayRegister = {Register::kCode_t5};
-constexpr Register kInterpreterDispatchTableRegister = {Register::kCode_t6};
-constexpr Register kJavaScriptCallArgCountRegister = {Register::kCode_a0};
-constexpr Register kJavaScriptCallNewTargetRegister = {Register::kCode_a3};
-constexpr Register kRuntimeCallFunctionRegister = {Register::kCode_a1};
-constexpr Register kRuntimeCallArgCountRegister = {Register::kCode_a0};
+constexpr Register kReturnRegister0 = v0;
+constexpr Register kReturnRegister1 = v1;
+constexpr Register kReturnRegister2 = a0;
+constexpr Register kJSFunctionRegister = a1;
+constexpr Register kContextRegister = s7;
+constexpr Register kAllocateSizeRegister = a0;
+constexpr Register kInterpreterAccumulatorRegister = v0;
+constexpr Register kInterpreterBytecodeOffsetRegister = t4;
+constexpr Register kInterpreterBytecodeArrayRegister = t5;
+constexpr Register kInterpreterDispatchTableRegister = t6;
+constexpr Register kJavaScriptCallArgCountRegister = a0;
+constexpr Register kJavaScriptCallNewTargetRegister = a3;
+constexpr Register kRuntimeCallFunctionRegister = a1;
+constexpr Register kRuntimeCallArgCountRegister = a0;
// Forward declaration.
class JumpTarget;
@@ -77,10 +77,6 @@ enum LiFlags {
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum PointersToHereCheck {
- kPointersToHereMaybeInteresting,
- kPointersToHereAreAlwaysInteresting
-};
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1,
@@ -128,15 +124,7 @@ inline MemOperand CFunctionArgumentOperand(int index) {
class TurboAssembler : public Assembler {
public:
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size),
- isolate_(isolate),
- has_double_zero_reg_set_(false) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ =
- Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
- }
- }
+ CodeObjectRequired create_code_object);
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() const { return has_frame_; }
@@ -356,18 +344,36 @@ class TurboAssembler : public Assembler {
sw(src, MemOperand(sp, 0));
}
+ void SaveRegisters(RegList registers);
+ void RestoreRegisters(RegList registers);
+
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode);
+
// Push multiple registers on the stack.
// Registers are saved in numerical order, with higher numbered registers
// saved in higher memory addresses.
void MultiPush(RegList regs);
void MultiPushFPU(RegList regs);
- void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
- void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ // Calculate how much stack space (in bytes) are required to store caller
+ // registers excluding those specified in the arguments.
+ int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg) const;
+
+ // Push caller saved registers on the stack, and return the number of bytes
+ // stack pointer is adjusted.
+ int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
+ // Restore caller saved registers from the stack, and return the number of
+ // bytes stack pointer is adjusted.
+ int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
void pop(Register dst) {
lw(dst, MemOperand(sp, 0));
@@ -378,7 +384,7 @@ class TurboAssembler : public Assembler {
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
- DCHECK(!src1.is(src2));
+ DCHECK(src1 != src2);
lw(src2, MemOperand(sp, 0 * kPointerSize));
lw(src1, MemOperand(sp, 1 * kPointerSize));
Addu(sp, sp, 2 * kPointerSize);
@@ -599,6 +605,11 @@ class TurboAssembler : public Assembler {
void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
void Trunc_uw_s(FPURegister fd, Register rs, FPURegister scratch);
+ void Trunc_w_d(FPURegister fd, FPURegister fs);
+ void Round_w_d(FPURegister fd, FPURegister fs);
+ void Floor_w_d(FPURegister fd, FPURegister fs);
+ void Ceil_w_d(FPURegister fd, FPURegister fs);
+
// FP32 mode: Move the general purpose register into
// the high part of the double-register pair.
// FP64 mode: Move the general-purpose register into
@@ -650,7 +661,7 @@ class TurboAssembler : public Assembler {
// handled in out-of-line code. The specific behaviour depends on supported
// instructions.
//
- // These functions assume (and assert) that !src1.is(src2). It is permitted
+ // These functions assume (and assert) that src1!=src2. It is permitted
// for the result to alias either input register.
void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
Label* out_of_line);
@@ -677,19 +688,19 @@ class TurboAssembler : public Assembler {
inline void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
inline void Move(Register dst, Register src) {
- if (!dst.is(src)) {
+ if (dst != src) {
mov(dst, src);
}
}
inline void Move_d(FPURegister dst, FPURegister src) {
- if (!dst.is(src)) {
+ if (dst != src) {
mov_d(dst, src);
}
}
inline void Move_s(FPURegister dst, FPURegister src) {
- if (!dst.is(src)) {
+ if (dst != src) {
mov_s(dst, src);
}
}
@@ -946,15 +957,12 @@ class MacroAssembler : public TurboAssembler {
void IncrementalMarkingRecordWriteHelper(Register object, Register value,
Register address);
- enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
-
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
Register addr, Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
+ SaveFPRegsMode save_fp);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but it will be clobbered.
@@ -989,26 +997,7 @@ class MacroAssembler : public TurboAssembler {
Register object, int offset, Register value, Register scratch,
RAStatus ra_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
-
- // As above, but the offset has the tag presubtracted. For use with
- // MemOperand(reg, off).
- inline void RecordWriteContextSlot(
- Register context, int offset, Register value, Register scratch,
- RAStatus ra_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting) {
- RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
- ra_status, save_fp, remembered_set_action, smi_check,
- pointers_to_here_check_for_value);
- }
-
- void RecordWriteForMap(Register object, Register map, Register dst,
- RAStatus ra_status, SaveFPRegsMode save_fp);
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
@@ -1017,28 +1006,7 @@ class MacroAssembler : public TurboAssembler {
Register object, Register address, Register value, RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
-
- // ---------------------------------------------------------------------------
- // Allocation support.
-
- // Allocate an object in new space or old space. The object_size is
- // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. If the space is exhausted control continues at the gc_required
- // label. The allocated object is returned in result. If the flag
- // tag_allocated_object is true the result is tagged as as a heap object.
- // All registers are clobbered also when control continues at the gc_required
- // label.
- void Allocate(int object_size, Register result, Register scratch1,
- Register scratch2, Label* gc_required, AllocationFlags flags);
-
- // Allocate and initialize a JSValue wrapper with the specified {constructor}
- // and {value}.
- void AllocateJSValue(Register result, Register constructor, Register value,
- Register scratch1, Register scratch2,
- Label* gc_required);
+ SmiCheck smi_check = INLINE_SMI_CHECK);
void Pref(int32_t hint, const MemOperand& rs);
@@ -1047,11 +1015,6 @@ class MacroAssembler : public TurboAssembler {
void PushSafepointRegisters();
void PopSafepointRegisters();
- void Trunc_w_d(FPURegister fd, FPURegister fs);
- void Round_w_d(FPURegister fd, FPURegister fs);
- void Floor_w_d(FPURegister fd, FPURegister fs);
- void Ceil_w_d(FPURegister fd, FPURegister fs);
-
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
// The except_flag will contain any exceptions caused by the instruction.
@@ -1078,11 +1041,6 @@ class MacroAssembler : public TurboAssembler {
// Make sure the stack is aligned. Only emits code in debug mode.
void AssertStackIsAligned();
- // Load the global object from the current context.
- void LoadGlobalObject(Register dst) {
- LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
- }
-
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
@@ -1090,12 +1048,6 @@ class MacroAssembler : public TurboAssembler {
void LoadNativeContextSlot(int index, Register dst);
- // Load the initial map from the global function. The registers
- // function and map can be the same, function is then overwritten.
- void LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch);
-
// -------------------------------------------------------------------------
// JavaScript invokes.
@@ -1145,47 +1097,6 @@ class MacroAssembler : public TurboAssembler {
Register map,
Register type_reg);
- void GetInstanceType(Register object_map, Register object_instance_type) {
- lbu(object_instance_type,
- FieldMemOperand(object_map, Map::kInstanceTypeOffset));
- }
-
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
- // "branch_to" if the result of the comparison is "cond". If multiple map
- // compares are required, the compare sequences branches to early_success.
- void CompareMapAndBranch(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* early_success,
- Condition cond,
- Label* branch_to);
-
- // As above, but the map of the object is already loaded into the register
- // which is preserved by the code generated.
- void CompareMapAndBranch(Register obj_map,
- Handle<Map> map,
- Label* early_success,
- Condition cond,
- Label* branch_to);
-
- // Check if the map of an object is equal to a specified map and branch to
- // label if not. Skip the smi check if not required (object is known to be a
- // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specificed map.
- void CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type);
-
-
- void CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- SmiCheckType smi_check_type);
-
// Get value of the weak cell.
void GetWeakValue(Register value, Handle<WeakCell> cell);
@@ -1296,22 +1207,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// -------------------------------------------------------------------------
// String utilities.
- // Checks if both instance types are sequential ASCII strings and jumps to
- // label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialOneByte(
- Register first_object_instance_type, Register second_object_instance_type,
- Register scratch1, Register scratch2, Label* failure);
-
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
- // Checks if both objects are sequential one-byte strings and jumps to label
- // if either is not. Assumes that neither object is a smi.
- void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
void LoadInstanceDescriptors(Register map, Register descriptors);
void LoadAccessor(Register dst, Register holder, int accessor_index,
AccessorComponent accessor);
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index bef8579dde..7b2e38cbd5 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -9,7 +9,7 @@
#if V8_TARGET_ARCH_MIPS
-#include "src/assembler.h"
+#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/codegen.h"
#include "src/disasm.h"
@@ -912,6 +912,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
if (IsMipsArchVariant(kMips32r6)) {
FCSR_ = kFCSRNaN2008FlagMask;
+ MSACSR_ = 0;
} else {
DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kMips32r2));
FCSR_ = 0;
@@ -1258,11 +1259,17 @@ void Simulator::set_fcsr_rounding_mode(FPURoundingMode mode) {
FCSR_ |= mode & kFPURoundingModeMask;
}
+void Simulator::set_msacsr_rounding_mode(FPURoundingMode mode) {
+ MSACSR_ |= mode & kFPURoundingModeMask;
+}
unsigned int Simulator::get_fcsr_rounding_mode() {
return FCSR_ & kFPURoundingModeMask;
}
+unsigned int Simulator::get_msacsr_rounding_mode() {
+ return MSACSR_ & kFPURoundingModeMask;
+}
void Simulator::set_fpu_register_word_invalid_result(float original,
float rounded) {
@@ -1542,6 +1549,7 @@ void Simulator::round_according_to_fcsr(double toRound, double& rounded,
// If the number is halfway between two integers,
// round to the even one.
rounded_int--;
+ rounded -= 1.;
}
break;
case kRoundToZero:
@@ -1583,6 +1591,7 @@ void Simulator::round_according_to_fcsr(float toRound, float& rounded,
// If the number is halfway between two integers,
// round to the even one.
rounded_int--;
+ rounded -= 1.f;
}
break;
case kRoundToZero:
@@ -1600,6 +1609,47 @@ void Simulator::round_according_to_fcsr(float toRound, float& rounded,
}
}
+template <typename T_fp, typename T_int>
+void Simulator::round_according_to_msacsr(T_fp toRound, T_fp& rounded,
+ T_int& rounded_int) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero. Behave like round_w_d.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or
+ // equal to the infinitely accurate result. Behave like trunc_w_d.
+
+ // 2 RP (round up, or toward infinity): Round a result to the
+ // next representable value up. Behave like ceil_w_d.
+
+ // 3 RD (round down, or toward −infinity): Round a result to
+ // the next representable value down. Behave like floor_w_d.
+ switch (get_msacsr_rounding_mode()) {
+ case kRoundToNearest:
+ rounded = std::floor(toRound + 0.5);
+ rounded_int = static_cast<T_int>(rounded);
+ if ((rounded_int & 1) != 0 && rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ rounded_int--;
+ rounded -= 1;
+ }
+ break;
+ case kRoundToZero:
+ rounded = trunc(toRound);
+ rounded_int = static_cast<T_int>(rounded);
+ break;
+ case kRoundToPlusInf:
+ rounded = std::ceil(toRound);
+ rounded_int = static_cast<T_int>(rounded);
+ break;
+ case kRoundToMinusInf:
+ rounded = std::floor(toRound);
+ rounded_int = static_cast<T_int>(rounded);
+ break;
+ }
+}
void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
int64_t& rounded_int, double fs) {
@@ -1624,6 +1674,7 @@ void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
// If the number is halfway between two integers,
// round to the even one.
rounded_int--;
+ rounded -= 1.;
}
break;
case kRoundToZero:
@@ -1665,6 +1716,7 @@ void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
// If the number is halfway between two integers,
// round to the even one.
rounded_int--;
+ rounded -= 1.f;
}
break;
case kRoundToZero:
@@ -2235,10 +2287,6 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0, int32_t arg1,
int32_t arg6, int32_t arg7,
int32_t arg8);
-typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int32_t arg0, int32_t arg1,
- int32_t arg2, int32_t arg3,
- int32_t arg4);
-
// These prototypes handle the four types of FP calls.
typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
@@ -2456,28 +2504,6 @@ void Simulator::SoftwareInterrupt() {
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
target(arg0, arg1, Redirection::ReverseRedirection(arg2));
- } else if (redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE) {
- // builtin call returning ObjectTriple.
- SimulatorRuntimeTripleCall target =
- reinterpret_cast<SimulatorRuntimeTripleCall>(external);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF(
- "Call to host triple returning runtime function %p "
- "args %08x, %08x, %08x, %08x, %08x\n",
- static_cast<void*>(FUNCTION_ADDR(target)), arg1, arg2, arg3, arg4,
- arg5);
- }
- // arg0 is a hidden argument pointing to the return location, so don't
- // pass it to the target function.
- ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned { %p, %p, %p }\n", static_cast<void*>(result.x),
- static_cast<void*>(result.y), static_cast<void*>(result.z));
- }
- // Return is passed back in address pointed to by hidden first argument.
- ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
- *sim_result = result;
- set_register(v0, arg0);
} else {
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
@@ -4628,80 +4654,99 @@ void Simulator::DecodeTypeMsaI10() {
void Simulator::DecodeTypeMsaELM() {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
- uint32_t opcode = instr_.InstructionBits() & kMsaELMMask;
+ uint32_t opcode = instr_.InstructionBits() & kMsaLongerELMMask;
int32_t n = instr_.MsaElmNValue();
int32_t alu_out;
switch (opcode) {
- case COPY_S:
- case COPY_U: {
- msa_reg_t ws;
- switch (DecodeMsaDataFormat()) {
- case MSA_BYTE: {
- DCHECK(n < kMSALanesByte);
- get_msa_register(instr_.WsValue(), ws.b);
- alu_out = static_cast<int32_t>(ws.b[n]);
- SetResult(wd_reg(), (opcode == COPY_U) ? alu_out & 0xFFu : alu_out);
- break;
- }
- case MSA_HALF: {
- DCHECK(n < kMSALanesHalf);
- get_msa_register(instr_.WsValue(), ws.h);
- alu_out = static_cast<int32_t>(ws.h[n]);
- SetResult(wd_reg(), (opcode == COPY_U) ? alu_out & 0xFFFFu : alu_out);
- break;
- }
- case MSA_WORD: {
- DCHECK(n < kMSALanesWord);
- get_msa_register(instr_.WsValue(), ws.w);
- alu_out = static_cast<int32_t>(ws.w[n]);
- SetResult(wd_reg(), alu_out);
- break;
- }
- default:
- UNREACHABLE();
- }
- } break;
- case INSERT: {
- msa_reg_t wd;
- switch (DecodeMsaDataFormat()) {
- case MSA_BYTE: {
- DCHECK(n < kMSALanesByte);
- int32_t rs = get_register(instr_.WsValue());
- get_msa_register(instr_.WdValue(), wd.b);
- wd.b[n] = rs & 0xFFu;
- set_msa_register(instr_.WdValue(), wd.b);
- TraceMSARegWr(wd.b);
- break;
- }
- case MSA_HALF: {
- DCHECK(n < kMSALanesHalf);
- int32_t rs = get_register(instr_.WsValue());
- get_msa_register(instr_.WdValue(), wd.h);
- wd.h[n] = rs & 0xFFFFu;
- set_msa_register(instr_.WdValue(), wd.h);
- TraceMSARegWr(wd.h);
- break;
- }
- case MSA_WORD: {
- DCHECK(n < kMSALanesWord);
- int32_t rs = get_register(instr_.WsValue());
- get_msa_register(instr_.WdValue(), wd.w);
- wd.w[n] = rs;
- set_msa_register(instr_.WdValue(), wd.w);
- TraceMSARegWr(wd.w);
+ case CTCMSA:
+ DCHECK(sa() == kMSACSRRegister);
+ MSACSR_ = bit_cast<uint32_t>(registers_[rd_reg()]);
+ TraceRegWr(static_cast<int32_t>(MSACSR_));
+ break;
+ case CFCMSA:
+ DCHECK(rd_reg() == kMSACSRRegister);
+ SetResult(sa(), bit_cast<int32_t>(MSACSR_));
+ break;
+ case MOVE_V:
+ UNIMPLEMENTED();
+ break;
+ default:
+ opcode &= kMsaELMMask;
+ switch (opcode) {
+ case COPY_S:
+ case COPY_U: {
+ msa_reg_t ws;
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE: {
+ DCHECK(n < kMSALanesByte);
+ get_msa_register(instr_.WsValue(), ws.b);
+ alu_out = static_cast<int32_t>(ws.b[n]);
+ SetResult(wd_reg(),
+ (opcode == COPY_U) ? alu_out & 0xFFu : alu_out);
+ break;
+ }
+ case MSA_HALF: {
+ DCHECK(n < kMSALanesHalf);
+ get_msa_register(instr_.WsValue(), ws.h);
+ alu_out = static_cast<int32_t>(ws.h[n]);
+ SetResult(wd_reg(),
+ (opcode == COPY_U) ? alu_out & 0xFFFFu : alu_out);
+ break;
+ }
+ case MSA_WORD: {
+ DCHECK(n < kMSALanesWord);
+ get_msa_register(instr_.WsValue(), ws.w);
+ alu_out = static_cast<int32_t>(ws.w[n]);
+ SetResult(wd_reg(), alu_out);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } break;
+ case INSERT: {
+ msa_reg_t wd;
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE: {
+ DCHECK(n < kMSALanesByte);
+ int32_t rs = get_register(instr_.WsValue());
+ get_msa_register(instr_.WdValue(), wd.b);
+ wd.b[n] = rs & 0xFFu;
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ }
+ case MSA_HALF: {
+ DCHECK(n < kMSALanesHalf);
+ int32_t rs = get_register(instr_.WsValue());
+ get_msa_register(instr_.WdValue(), wd.h);
+ wd.h[n] = rs & 0xFFFFu;
+ set_msa_register(instr_.WdValue(), wd.h);
+ TraceMSARegWr(wd.h);
+ break;
+ }
+ case MSA_WORD: {
+ DCHECK(n < kMSALanesWord);
+ int32_t rs = get_register(instr_.WsValue());
+ get_msa_register(instr_.WdValue(), wd.w);
+ wd.w[n] = rs;
+ set_msa_register(instr_.WdValue(), wd.w);
+ TraceMSARegWr(wd.w);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } break;
+ case SLDI:
+ case SPLATI:
+ case INSVE:
+ UNIMPLEMENTED();
break;
- }
default:
UNREACHABLE();
}
- } break;
- case SLDI:
- case SPLATI:
- case INSVE:
- UNIMPLEMENTED();
break;
- default:
- UNREACHABLE();
}
}
@@ -4897,53 +4942,213 @@ void Simulator::DecodeTypeMsaMI10() {
#undef MSA_MI10_STORE
}
-void Simulator::DecodeTypeMsa3R() {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
- uint32_t opcode = instr_.InstructionBits() & kMsa3RMask;
+template <typename T>
+T Simulator::Msa3RInstrHelper(uint32_t opcode, T wd, T ws, T wt) {
+ typedef typename std::make_unsigned<T>::type uT;
+ T res;
+ T wt_modulo = wt % (sizeof(T) * 8);
switch (opcode) {
case SLL_MSA:
+ res = static_cast<T>(ws << wt_modulo);
+ break;
case SRA_MSA:
+ res = static_cast<T>(ArithmeticShiftRight(ws, wt_modulo));
+ break;
case SRL_MSA:
+ res = static_cast<T>(static_cast<uT>(ws) >> wt_modulo);
+ break;
case BCLR:
+ res = static_cast<T>(static_cast<T>(~(1ull << wt_modulo)) & ws);
+ break;
case BSET:
+ res = static_cast<T>(static_cast<T>(1ull << wt_modulo) | ws);
+ break;
case BNEG:
- case BINSL:
- case BINSR:
+ res = static_cast<T>(static_cast<T>(1ull << wt_modulo) ^ ws);
+ break;
+ case BINSL: {
+ int elem_size = 8 * sizeof(T);
+ int bits = wt_modulo + 1;
+ if (bits == elem_size) {
+ res = static_cast<T>(ws);
+ } else {
+ uint64_t mask = ((1ull << bits) - 1) << (elem_size - bits);
+ res = static_cast<T>((static_cast<T>(mask) & ws) |
+ (static_cast<T>(~mask) & wd));
+ }
+ } break;
+ case BINSR: {
+ int elem_size = 8 * sizeof(T);
+ int bits = wt_modulo + 1;
+ if (bits == elem_size) {
+ res = static_cast<T>(ws);
+ } else {
+ uint64_t mask = (1ull << bits) - 1;
+ res = static_cast<T>((static_cast<T>(mask) & ws) |
+ (static_cast<T>(~mask) & wd));
+ }
+ } break;
case ADDV:
+ res = ws + wt;
+ break;
case SUBV:
+ res = ws - wt;
+ break;
case MAX_S:
+ res = Max(ws, wt);
+ break;
case MAX_U:
+ res = static_cast<T>(Max(static_cast<uT>(ws), static_cast<uT>(wt)));
+ break;
case MIN_S:
+ res = Min(ws, wt);
+ break;
case MIN_U:
+ res = static_cast<T>(Min(static_cast<uT>(ws), static_cast<uT>(wt)));
+ break;
case MAX_A:
+ // We use negative abs in order to avoid problems
+ // with corner case for MIN_INT
+ res = Nabs(ws) < Nabs(wt) ? ws : wt;
+ break;
case MIN_A:
+ // We use negative abs in order to avoid problems
+ // with corner case for MIN_INT
+ res = Nabs(ws) > Nabs(wt) ? ws : wt;
+ break;
case CEQ:
+ res = static_cast<T>(!Compare(ws, wt) ? -1ull : 0ull);
+ break;
case CLT_S:
+ res = static_cast<T>((Compare(ws, wt) == -1) ? -1ull : 0ull);
+ break;
case CLT_U:
+ res = static_cast<T>(
+ (Compare(static_cast<uT>(ws), static_cast<uT>(wt)) == -1) ? -1ull
+ : 0ull);
+ break;
case CLE_S:
+ res = static_cast<T>((Compare(ws, wt) != 1) ? -1ull : 0ull);
+ break;
case CLE_U:
+ res = static_cast<T>(
+ (Compare(static_cast<uT>(ws), static_cast<uT>(wt)) != 1) ? -1ull
+ : 0ull);
+ break;
case ADD_A:
- case ADDS_A:
+ res = static_cast<T>(Abs(ws) + Abs(wt));
+ break;
+ case ADDS_A: {
+ T ws_nabs = Nabs(ws);
+ T wt_nabs = Nabs(wt);
+ if (ws_nabs < -std::numeric_limits<T>::max() - wt_nabs) {
+ res = std::numeric_limits<T>::max();
+ } else {
+ res = -(ws_nabs + wt_nabs);
+ }
+ } break;
case ADDS_S:
- case ADDS_U:
+ res = SaturateAdd(ws, wt);
+ break;
+ case ADDS_U: {
+ uT ws_u = static_cast<uT>(ws);
+ uT wt_u = static_cast<uT>(wt);
+ res = static_cast<T>(SaturateAdd(ws_u, wt_u));
+ } break;
case AVE_S:
- case AVE_U:
+ res = static_cast<T>((wt & ws) + ((wt ^ ws) >> 1));
+ break;
+ case AVE_U: {
+ uT ws_u = static_cast<uT>(ws);
+ uT wt_u = static_cast<uT>(wt);
+ res = static_cast<T>((wt_u & ws_u) + ((wt_u ^ ws_u) >> 1));
+ } break;
case AVER_S:
- case AVER_U:
+ res = static_cast<T>((wt | ws) - ((wt ^ ws) >> 1));
+ break;
+ case AVER_U: {
+ uT ws_u = static_cast<uT>(ws);
+ uT wt_u = static_cast<uT>(wt);
+ res = static_cast<T>((wt_u | ws_u) - ((wt_u ^ ws_u) >> 1));
+ } break;
case SUBS_S:
- case SUBS_U:
- case SUBSUS_U:
- case SUBSUU_S:
+ res = SaturateSub(ws, wt);
+ break;
+ case SUBS_U: {
+ uT ws_u = static_cast<uT>(ws);
+ uT wt_u = static_cast<uT>(wt);
+ res = static_cast<T>(SaturateSub(ws_u, wt_u));
+ } break;
+ case SUBSUS_U: {
+ uT wsu = static_cast<uT>(ws);
+ if (wt > 0) {
+ uT wtu = static_cast<uT>(wt);
+ if (wtu > wsu) {
+ res = 0;
+ } else {
+ res = static_cast<T>(wsu - wtu);
+ }
+ } else {
+ if (wsu > std::numeric_limits<uT>::max() + wt) {
+ res = static_cast<T>(std::numeric_limits<uT>::max());
+ } else {
+ res = static_cast<T>(wsu - wt);
+ }
+ }
+ } break;
+ case SUBSUU_S: {
+ uT wsu = static_cast<uT>(ws);
+ uT wtu = static_cast<uT>(wt);
+ uT wdu;
+ if (wsu > wtu) {
+ wdu = wsu - wtu;
+ if (wdu > std::numeric_limits<T>::max()) {
+ res = std::numeric_limits<T>::max();
+ } else {
+ res = static_cast<T>(wdu);
+ }
+ } else {
+ wdu = wtu - wsu;
+ CHECK(-std::numeric_limits<T>::max() ==
+ std::numeric_limits<T>::min() + 1);
+ if (wdu <= std::numeric_limits<T>::max()) {
+ res = -static_cast<T>(wdu);
+ } else {
+ res = std::numeric_limits<T>::min();
+ }
+ }
+ } break;
case ASUB_S:
- case ASUB_U:
+ res = static_cast<T>(Abs(ws - wt));
+ break;
+ case ASUB_U: {
+ uT wsu = static_cast<uT>(ws);
+ uT wtu = static_cast<uT>(wt);
+ res = static_cast<T>(wsu > wtu ? wsu - wtu : wtu - wsu);
+ } break;
case MULV:
+ res = ws * wt;
+ break;
case MADDV:
+ res = wd + ws * wt;
+ break;
case MSUBV:
+ res = wd - ws * wt;
+ break;
case DIV_S_MSA:
+ res = wt != 0 ? ws / wt : static_cast<T>(Unpredictable);
+ break;
case DIV_U:
+ res = wt != 0 ? static_cast<T>(static_cast<uT>(ws) / static_cast<uT>(wt))
+ : static_cast<T>(Unpredictable);
+ break;
case MOD_S:
+ res = wt != 0 ? ws % wt : static_cast<T>(Unpredictable);
+ break;
case MOD_U:
+ res = wt != 0 ? static_cast<T>(static_cast<uT>(ws) % static_cast<uT>(wt))
+ : static_cast<T>(Unpredictable);
+ break;
case DOTP_S:
case DOTP_U:
case DPADD_S:
@@ -4959,8 +5164,17 @@ void Simulator::DecodeTypeMsa3R() {
case ILVEV:
case ILVOD:
case VSHF:
- case SRAR:
- case SRLR:
+ UNIMPLEMENTED();
+ break;
+ case SRAR: {
+ int bit = wt_modulo == 0 ? 0 : (ws >> (wt_modulo - 1)) & 1;
+ res = static_cast<T>(ArithmeticShiftRight(ws, wt_modulo) + bit);
+ } break;
+ case SRLR: {
+ uT wsu = static_cast<uT>(ws);
+ int bit = wt_modulo == 0 ? 0 : (wsu >> (wt_modulo - 1)) & 1;
+ res = static_cast<T>((wsu >> wt_modulo) + bit);
+ } break;
case HADD_S:
case HADD_U:
case HSUB_S:
@@ -4970,144 +5184,180 @@ void Simulator::DecodeTypeMsa3R() {
default:
UNREACHABLE();
}
-}
-
-void Simulator::DecodeTypeMsa3RF() {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
- uint32_t opcode = instr_.InstructionBits() & kMsa3RFMask;
- switch (opcode) {
- case FCAF:
- case FCUN:
- case FCEQ:
- case FCUEQ:
- case FCLT:
- case FCULT:
- case FCLE:
- case FCULE:
- case FSAF:
- case FSUN:
- case FSEQ:
- case FSUEQ:
- case FSLT:
- case FSULT:
- case FSLE:
- case FSULE:
- case FADD:
- case FSUB:
- case FMUL:
- case FDIV:
- case FMADD:
- case FMSUB:
- case FEXP2:
- case FEXDO:
- case FTQ:
- case FMIN:
- case FMIN_A:
- case FMAX:
- case FMAX_A:
- case FCOR:
- case FCUNE:
- case FCNE:
- case MUL_Q:
- case MADD_Q:
- case MSUB_Q:
- case FSOR:
- case FSUNE:
- case FSNE:
- case MULR_Q:
- case MADDR_Q:
- case MSUBR_Q:
- UNIMPLEMENTED();
- break;
- default:
- UNREACHABLE();
+ return res;
}
-}
-void Simulator::DecodeTypeMsaVec() {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
- uint32_t opcode = instr_.InstructionBits() & kMsaVECMask;
- msa_reg_t wd, ws, wt;
+ void Simulator::DecodeTypeMsa3R() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsa3RMask;
+ msa_reg_t ws, wd, wt;
- get_msa_register(instr_.WsValue(), ws.w);
- get_msa_register(instr_.WtValue(), wt.w);
- if (opcode == BMNZ_V || opcode == BMZ_V || opcode == BSEL_V) {
- get_msa_register(instr_.WdValue(), wd.w);
- }
+#define MSA_3R_DF(elem, num_of_lanes) \
+ get_msa_register(instr_.WdValue(), wd.elem); \
+ get_msa_register(instr_.WsValue(), ws.elem); \
+ get_msa_register(instr_.WtValue(), wt.elem); \
+ for (int i = 0; i < num_of_lanes; i++) { \
+ wd.elem[i] = Msa3RInstrHelper(opcode, wd.elem[i], ws.elem[i], wt.elem[i]); \
+ } \
+ set_msa_register(instr_.WdValue(), wd.elem); \
+ TraceMSARegWr(wd.elem);
- for (int i = 0; i < kMSALanesWord; i++) {
- switch (opcode) {
- case AND_V:
- wd.w[i] = ws.w[i] & wt.w[i];
- break;
- case OR_V:
- wd.w[i] = ws.w[i] | wt.w[i];
- break;
- case NOR_V:
- wd.w[i] = ~(ws.w[i] | wt.w[i]);
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ MSA_3R_DF(b, kMSALanesByte);
break;
- case XOR_V:
- wd.w[i] = ws.w[i] ^ wt.w[i];
+ case MSA_HALF:
+ MSA_3R_DF(h, kMSALanesHalf);
break;
- case BMNZ_V:
- wd.w[i] = (wt.w[i] & ws.w[i]) | (~wt.w[i] & wd.w[i]);
+ case MSA_WORD:
+ MSA_3R_DF(w, kMSALanesWord);
break;
- case BMZ_V:
- wd.w[i] = (~wt.w[i] & ws.w[i]) | (wt.w[i] & wd.w[i]);
+ case MSA_DWORD:
+ MSA_3R_DF(d, kMSALanesDword);
break;
- case BSEL_V:
- wd.w[i] = (~wd.w[i] & ws.w[i]) | (wd.w[i] & wt.w[i]);
+ default:
+ UNREACHABLE();
+ }
+#undef MSA_3R_DF
+ }
+
+ void Simulator::DecodeTypeMsa3RF() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsa3RFMask;
+ switch (opcode) {
+ case FCAF:
+ case FCUN:
+ case FCEQ:
+ case FCUEQ:
+ case FCLT:
+ case FCULT:
+ case FCLE:
+ case FCULE:
+ case FSAF:
+ case FSUN:
+ case FSEQ:
+ case FSUEQ:
+ case FSLT:
+ case FSULT:
+ case FSLE:
+ case FSULE:
+ case FADD:
+ case FSUB:
+ case FMUL:
+ case FDIV:
+ case FMADD:
+ case FMSUB:
+ case FEXP2:
+ case FEXDO:
+ case FTQ:
+ case FMIN:
+ case FMIN_A:
+ case FMAX:
+ case FMAX_A:
+ case FCOR:
+ case FCUNE:
+ case FCNE:
+ case MUL_Q:
+ case MADD_Q:
+ case MSUB_Q:
+ case FSOR:
+ case FSUNE:
+ case FSNE:
+ case MULR_Q:
+ case MADDR_Q:
+ case MSUBR_Q:
+ UNIMPLEMENTED();
break;
default:
UNREACHABLE();
}
}
- set_msa_register(instr_.WdValue(), wd.w);
- TraceMSARegWr(wd.d);
-}
-void Simulator::DecodeTypeMsa2R() {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
- uint32_t opcode = instr_.InstructionBits() & kMsa2RMask;
- msa_reg_t wd, ws;
- switch (opcode) {
- case FILL:
- switch (DecodeMsaDataFormat()) {
- case MSA_BYTE: {
- int32_t rs = get_register(instr_.WsValue());
- for (int i = 0; i < kMSALanesByte; i++) {
- wd.b[i] = rs & 0xFFu;
- }
- set_msa_register(instr_.WdValue(), wd.b);
- TraceMSARegWr(wd.b);
+ void Simulator::DecodeTypeMsaVec() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsaVECMask;
+ msa_reg_t wd, ws, wt;
+
+ get_msa_register(instr_.WsValue(), ws.w);
+ get_msa_register(instr_.WtValue(), wt.w);
+ if (opcode == BMNZ_V || opcode == BMZ_V || opcode == BSEL_V) {
+ get_msa_register(instr_.WdValue(), wd.w);
+ }
+
+ for (int i = 0; i < kMSALanesWord; i++) {
+ switch (opcode) {
+ case AND_V:
+ wd.w[i] = ws.w[i] & wt.w[i];
break;
- }
- case MSA_HALF: {
- int32_t rs = get_register(instr_.WsValue());
- for (int i = 0; i < kMSALanesHalf; i++) {
- wd.h[i] = rs & 0xFFFFu;
- }
- set_msa_register(instr_.WdValue(), wd.h);
- TraceMSARegWr(wd.h);
+ case OR_V:
+ wd.w[i] = ws.w[i] | wt.w[i];
break;
- }
- case MSA_WORD: {
- int32_t rs = get_register(instr_.WsValue());
- for (int i = 0; i < kMSALanesWord; i++) {
- wd.w[i] = rs;
- }
- set_msa_register(instr_.WdValue(), wd.w);
- TraceMSARegWr(wd.w);
+ case NOR_V:
+ wd.w[i] = ~(ws.w[i] | wt.w[i]);
+ break;
+ case XOR_V:
+ wd.w[i] = ws.w[i] ^ wt.w[i];
+ break;
+ case BMNZ_V:
+ wd.w[i] = (wt.w[i] & ws.w[i]) | (~wt.w[i] & wd.w[i]);
+ break;
+ case BMZ_V:
+ wd.w[i] = (~wt.w[i] & ws.w[i]) | (wt.w[i] & wd.w[i]);
+ break;
+ case BSEL_V:
+ wd.w[i] = (~wd.w[i] & ws.w[i]) | (wd.w[i] & wt.w[i]);
break;
- }
default:
UNREACHABLE();
}
- break;
- case PCNT:
+ }
+ set_msa_register(instr_.WdValue(), wd.w);
+ TraceMSARegWr(wd.d);
+ }
+
+ void Simulator::DecodeTypeMsa2R() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsa2RMask;
+ msa_reg_t wd, ws;
+ switch (opcode) {
+ case FILL:
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE: {
+ int32_t rs = get_register(instr_.WsValue());
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = rs & 0xFFu;
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ }
+ case MSA_HALF: {
+ int32_t rs = get_register(instr_.WsValue());
+ for (int i = 0; i < kMSALanesHalf; i++) {
+ wd.h[i] = rs & 0xFFFFu;
+ }
+ set_msa_register(instr_.WdValue(), wd.h);
+ TraceMSARegWr(wd.h);
+ break;
+ }
+ case MSA_WORD: {
+ int32_t rs = get_register(instr_.WsValue());
+ for (int i = 0; i < kMSALanesWord; i++) {
+ wd.w[i] = rs;
+ }
+ set_msa_register(instr_.WdValue(), wd.w);
+ TraceMSARegWr(wd.w);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case PCNT:
#define PCNT_DF(elem, num_of_lanes) \
get_msa_register(instr_.WsValue(), ws.elem); \
for (int i = 0; i < num_of_lanes; i++) { \
@@ -5201,32 +5451,327 @@ void Simulator::DecodeTypeMsa2R() {
}
}
-void Simulator::DecodeTypeMsa2RF() {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
- uint32_t opcode = instr_.InstructionBits() & kMsa2RFMask;
+#define BIT(n) (0x1LL << n)
+#define QUIET_BIT_S(nan) (bit_cast<int32_t>(nan) & BIT(22))
+#define QUIET_BIT_D(nan) (bit_cast<int64_t>(nan) & BIT(51))
+static inline bool isSnan(float fp) { return !QUIET_BIT_S(fp); }
+static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); }
+#undef QUIET_BIT_S
+#undef QUIET_BIT_D
+
+template <typename T_int, typename T_fp, typename T_src, typename T_dst>
+T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
+ Simulator* sim) {
+ typedef typename std::make_unsigned<T_int>::type T_uint;
switch (opcode) {
- case FCLASS:
- case FTRUNC_S:
- case FTRUNC_U:
- case FSQRT:
- case FRSQRT:
- case FRCP:
- case FRINT:
- case FLOG2:
- case FEXUPL:
- case FEXUPR:
- case FFQL:
- case FFQR:
- case FTINT_S:
- case FTINT_U:
+ case FCLASS: {
+#define SNAN_BIT BIT(0)
+#define QNAN_BIT BIT(1)
+#define NEG_INFINITY_BIT BIT(2)
+#define NEG_NORMAL_BIT BIT(3)
+#define NEG_SUBNORMAL_BIT BIT(4)
+#define NEG_ZERO_BIT BIT(5)
+#define POS_INFINITY_BIT BIT(6)
+#define POS_NORMAL_BIT BIT(7)
+#define POS_SUBNORMAL_BIT BIT(8)
+#define POS_ZERO_BIT BIT(9)
+ T_fp element = *reinterpret_cast<T_fp*>(&src);
+ switch (std::fpclassify(element)) {
+ case FP_INFINITE:
+ if (std::signbit(element)) {
+ dst = NEG_INFINITY_BIT;
+ } else {
+ dst = POS_INFINITY_BIT;
+ }
+ break;
+ case FP_NAN:
+ if (isSnan(element)) {
+ dst = SNAN_BIT;
+ } else {
+ dst = QNAN_BIT;
+ }
+ break;
+ case FP_NORMAL:
+ if (std::signbit(element)) {
+ dst = NEG_NORMAL_BIT;
+ } else {
+ dst = POS_NORMAL_BIT;
+ }
+ break;
+ case FP_SUBNORMAL:
+ if (std::signbit(element)) {
+ dst = NEG_SUBNORMAL_BIT;
+ } else {
+ dst = POS_SUBNORMAL_BIT;
+ }
+ break;
+ case FP_ZERO:
+ if (std::signbit(element)) {
+ dst = NEG_ZERO_BIT;
+ } else {
+ dst = POS_ZERO_BIT;
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+#undef BIT
+#undef SNAN_BIT
+#undef QNAN_BIT
+#undef NEG_INFINITY_BIT
+#undef NEG_NORMAL_BIT
+#undef NEG_SUBNORMAL_BIT
+#undef NEG_ZERO_BIT
+#undef POS_INFINITY_BIT
+#undef POS_NORMAL_BIT
+#undef POS_SUBNORMAL_BIT
+#undef POS_ZERO_BIT
+ case FTRUNC_S: {
+ T_fp element = bit_cast<T_fp>(src);
+ const T_int max_int = std::numeric_limits<T_int>::max();
+ const T_int min_int = std::numeric_limits<T_int>::min();
+ if (std::isnan(element)) {
+ dst = 0;
+ } else if (element > max_int || element < min_int) {
+ dst = element > max_int ? max_int : min_int;
+ } else {
+ dst = static_cast<T_int>(std::trunc(element));
+ }
+ break;
+ }
+ case FTRUNC_U: {
+ T_fp element = bit_cast<T_fp>(src);
+ const T_uint max_int = std::numeric_limits<T_uint>::max();
+ if (std::isnan(element)) {
+ dst = 0;
+ } else if (element > max_int || element < 0) {
+ dst = element > max_int ? max_int : 0;
+ } else {
+ dst = static_cast<T_uint>(std::trunc(element));
+ }
+ break;
+ }
+ case FSQRT: {
+ T_fp element = bit_cast<T_fp>(src);
+ if (element < 0 || std::isnan(element)) {
+ dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ } else {
+ dst = bit_cast<T_int>(std::sqrt(element));
+ }
+ break;
+ }
+ case FRSQRT: {
+ T_fp element = bit_cast<T_fp>(src);
+ if (element < 0 || std::isnan(element)) {
+ dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ } else {
+ dst = bit_cast<T_int>(1 / std::sqrt(element));
+ }
+ break;
+ }
+ case FRCP: {
+ T_fp element = bit_cast<T_fp>(src);
+ if (std::isnan(element)) {
+ dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ } else {
+ dst = bit_cast<T_int>(1 / element);
+ }
+ break;
+ }
+ case FRINT: {
+ T_fp element = bit_cast<T_fp>(src);
+ if (std::isnan(element)) {
+ dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ } else {
+ T_int dummy;
+ sim->round_according_to_msacsr<T_fp, T_int>(element, element, dummy);
+ dst = bit_cast<T_int>(element);
+ }
+ break;
+ }
+ case FLOG2: {
+ T_fp element = bit_cast<T_fp>(src);
+ switch (std::fpclassify(element)) {
+ case FP_NORMAL:
+ case FP_SUBNORMAL:
+ dst = bit_cast<T_int>(std::logb(element));
+ break;
+ case FP_ZERO:
+ dst = bit_cast<T_int>(-std::numeric_limits<T_fp>::infinity());
+ break;
+ case FP_NAN:
+ dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ break;
+ case FP_INFINITE:
+ if (element < 0) {
+ dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ } else {
+ dst = bit_cast<T_int>(std::numeric_limits<T_fp>::infinity());
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case FTINT_S: {
+ T_fp element = bit_cast<T_fp>(src);
+ const T_int max_int = std::numeric_limits<T_int>::max();
+ const T_int min_int = std::numeric_limits<T_int>::min();
+ if (std::isnan(element)) {
+ dst = 0;
+ } else if (element < min_int || element > max_int) {
+ dst = element > max_int ? max_int : min_int;
+ } else {
+ sim->round_according_to_msacsr<T_fp, T_int>(element, element, dst);
+ }
+ break;
+ }
+ case FTINT_U: {
+ T_fp element = bit_cast<T_fp>(src);
+ const T_uint max_uint = std::numeric_limits<T_uint>::max();
+ if (std::isnan(element)) {
+ dst = 0;
+ } else if (element < 0 || element > max_uint) {
+ dst = element > max_uint ? max_uint : 0;
+ } else {
+ T_uint res;
+ sim->round_according_to_msacsr<T_fp, T_uint>(element, element, res);
+ dst = *reinterpret_cast<T_int*>(&res);
+ }
+ break;
+ }
case FFINT_S:
+ dst = bit_cast<T_int>(static_cast<T_fp>(src));
+ break;
case FFINT_U:
- UNIMPLEMENTED();
+ typedef typename std::make_unsigned<T_src>::type uT_src;
+ dst = bit_cast<T_int>(static_cast<T_fp>(bit_cast<uT_src>(src)));
break;
default:
UNREACHABLE();
}
+ return 0;
+}
+
+template <typename T_int, typename T_fp, typename T_reg, typename T_i>
+T_int Msa2RFInstrHelper2(uint32_t opcode, T_reg ws, T_i i) {
+ switch (opcode) {
+#define EXTRACT_FLOAT16_SIGN(fp16) (fp16 >> 15)
+#define EXTRACT_FLOAT16_EXP(fp16) (fp16 >> 10 & 0x1f)
+#define EXTRACT_FLOAT16_FRAC(fp16) (fp16 & 0x3ff)
+#define PACK_FLOAT32(sign, exp, frac) \
+ static_cast<uint32_t>(((sign) << 31) + ((exp) << 23) + (frac))
+#define FEXUP_DF(src_index) \
+ uint_fast16_t element = ws.uh[src_index]; \
+ uint_fast32_t aSign, aFrac; \
+ int_fast32_t aExp; \
+ aSign = EXTRACT_FLOAT16_SIGN(element); \
+ aExp = EXTRACT_FLOAT16_EXP(element); \
+ aFrac = EXTRACT_FLOAT16_FRAC(element); \
+ if (V8_LIKELY(aExp && aExp != 0x1f)) { \
+ return PACK_FLOAT32(aSign, aExp + 0x70, aFrac << 13); \
+ } else if (aExp == 0x1f) { \
+ if (aFrac) { \
+ return bit_cast<int32_t>(std::numeric_limits<float>::quiet_NaN()); \
+ } else { \
+ return bit_cast<uint32_t>(std::numeric_limits<float>::infinity()) | \
+ static_cast<uint32_t>(aSign) << 31; \
+ } \
+ } else { \
+ if (aFrac == 0) { \
+ return PACK_FLOAT32(aSign, 0, 0); \
+ } else { \
+ int_fast16_t shiftCount = \
+ base::bits::CountLeadingZeros32(static_cast<uint32_t>(aFrac)) - 21; \
+ aFrac <<= shiftCount; \
+ aExp = -shiftCount; \
+ return PACK_FLOAT32(aSign, aExp + 0x70, aFrac << 13); \
+ } \
+ }
+ case FEXUPL:
+ if (std::is_same<int32_t, T_int>::value) {
+ FEXUP_DF(i + kMSALanesWord)
+ } else {
+ return bit_cast<int64_t>(
+ static_cast<double>(bit_cast<float>(ws.w[i + kMSALanesDword])));
+ }
+ case FEXUPR:
+ if (std::is_same<int32_t, T_int>::value) {
+ FEXUP_DF(i)
+ } else {
+ return bit_cast<int64_t>(static_cast<double>(bit_cast<float>(ws.w[i])));
+ }
+ case FFQL: {
+ if (std::is_same<int32_t, T_int>::value) {
+ return bit_cast<int32_t>(static_cast<float>(ws.h[i + kMSALanesWord]) /
+ (1U << 15));
+ } else {
+ return bit_cast<int64_t>(static_cast<double>(ws.w[i + kMSALanesDword]) /
+ (1U << 31));
+ }
+ break;
+ }
+ case FFQR: {
+ if (std::is_same<int32_t, T_int>::value) {
+ return bit_cast<int32_t>(static_cast<float>(ws.h[i]) / (1U << 15));
+ } else {
+ return bit_cast<int64_t>(static_cast<double>(ws.w[i]) / (1U << 31));
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+#undef EXTRACT_FLOAT16_SIGN
+#undef EXTRACT_FLOAT16_EXP
+#undef EXTRACT_FLOAT16_FRAC
+#undef PACK_FLOAT32
+#undef FEXUP_DF
+}
+
+void Simulator::DecodeTypeMsa2RF() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsa2RFMask;
+ msa_reg_t wd, ws;
+ get_msa_register(ws_reg(), &ws);
+ if (opcode == FEXUPL || opcode == FEXUPR || opcode == FFQL ||
+ opcode == FFQR) {
+ switch (DecodeMsaDataFormat()) {
+ case MSA_WORD:
+ for (int i = 0; i < kMSALanesWord; i++) {
+ wd.w[i] = Msa2RFInstrHelper2<int32_t, float>(opcode, ws, i);
+ }
+ break;
+ case MSA_DWORD:
+ for (int i = 0; i < kMSALanesDword; i++) {
+ wd.d[i] = Msa2RFInstrHelper2<int64_t, double>(opcode, ws, i);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ switch (DecodeMsaDataFormat()) {
+ case MSA_WORD:
+ for (int i = 0; i < kMSALanesWord; i++) {
+ Msa2RFInstrHelper<int32_t, float>(opcode, ws.w[i], wd.w[i], this);
+ }
+ break;
+ case MSA_DWORD:
+ for (int i = 0; i < kMSALanesDword; i++) {
+ Msa2RFInstrHelper<int64_t, double>(opcode, ws.d[i], wd.d[i], this);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ set_msa_register(wd_reg(), &wd);
+ TraceMSARegWr(&wd);
}
void Simulator::DecodeTypeRegister() {
@@ -5264,6 +5809,9 @@ void Simulator::DecodeTypeRegister() {
case kMsaMinor2RF:
DecodeTypeMsa2RF();
break;
+ case kMsaMinorELM:
+ DecodeTypeMsaELM();
+ break;
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 37619b4295..fbc4ad19fb 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -257,7 +257,9 @@ class Simulator {
void set_fcsr_bit(uint32_t cc, bool value);
bool test_fcsr_bit(uint32_t cc);
void set_fcsr_rounding_mode(FPURoundingMode mode);
+ void set_msacsr_rounding_mode(FPURoundingMode mode);
unsigned int get_fcsr_rounding_mode();
+ unsigned int get_msacsr_rounding_mode();
bool set_fcsr_round_error(double original, double rounded);
bool set_fcsr_round_error(float original, float rounded);
bool set_fcsr_round64_error(double original, double rounded);
@@ -266,6 +268,8 @@ class Simulator {
int32_t& rounded_int, double fs);
void round_according_to_fcsr(float toRound, float& rounded,
int32_t& rounded_int, float fs);
+ template <typename Tfp, typename Tint>
+ void round_according_to_msacsr(Tfp toRound, Tfp& rounded, Tint& rounded_int);
void round64_according_to_fcsr(double toRound, double& rounded,
int64_t& rounded_int, double fs);
void round64_according_to_fcsr(float toRound, float& rounded,
@@ -436,6 +440,8 @@ class Simulator {
T MsaI5InstrHelper(uint32_t opcode, T ws, int32_t i5);
template <typename T>
T MsaBitInstrHelper(uint32_t opcode, T wd, T ws, int32_t m);
+ template <typename T>
+ T Msa3RInstrHelper(uint32_t opcode, T wd, T ws, T wt);
inline int32_t rs_reg() const { return instr_.RsValue(); }
inline int32_t rs() const { return get_register(rs_reg()); }
@@ -573,6 +579,8 @@ class Simulator {
int64_t FPUregisters_[kNumFPURegisters * 2];
// FPU control register.
uint32_t FCSR_;
+ // MSA control register.
+ uint32_t MSACSR_;
// Simulator support.
// Allocate 1MB for stack.
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index 18b4dc3267..dea9906e49 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -54,36 +54,15 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
// -----------------------------------------------------------------------------
// Operand and MemOperand.
-Operand::Operand(int64_t immediate, RelocInfo::Mode rmode) {
- rm_ = no_reg;
- value_.immediate = immediate;
- rmode_ = rmode;
-}
-
-
-Operand::Operand(const ExternalReference& f) {
- rm_ = no_reg;
- value_.immediate = reinterpret_cast<int64_t>(f.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-
-Operand::Operand(Smi* value) {
- rm_ = no_reg;
- value_.immediate = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE32;
-}
-
-
-Operand::Operand(Register rm) {
- rm_ = rm;
-}
-
-
bool Operand::is_reg() const {
return rm_.is_valid();
}
+int64_t Operand::immediate() const {
+ DCHECK(!is_reg());
+ DCHECK(!IsHeapObjectRequest());
+ return value_.immediate;
+}
// -----------------------------------------------------------------------------
// RelocInfo.
@@ -153,6 +132,12 @@ Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
}
+void Assembler::deserialization_set_special_target_at(
+ Isolate* isolate, Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(
+ isolate, instruction_payload - kInstructionsFor64BitConstant * kInstrSize,
+ code, target);
+}
void Assembler::set_target_internal_reference_encoded_at(Address pc,
Address target) {
@@ -366,6 +351,7 @@ void Assembler::emit(uint64_t data) {
EmitHelper(data);
}
+EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 9f342d3471..46d4fca740 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -193,10 +193,9 @@ void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
// Implementation of Operand and MemOperand.
// See assembler-mips-inl.h for inlined constructors.
-Operand::Operand(Handle<HeapObject> handle) {
- rm_ = no_reg;
+Operand::Operand(Handle<HeapObject> handle)
+ : rm_(no_reg), rmode_(RelocInfo::EMBEDDED_OBJECT) {
value_.immediate = reinterpret_cast<intptr_t>(handle.address());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
}
Operand Operand::EmbeddedNumber(double value) {
@@ -250,31 +249,31 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
// daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
// operations as post-increment of sp.
-const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
- (Register::kCode_sp << kRtShift) |
+const Instr kPopInstruction = DADDIU | (sp.code() << kRsShift) |
+ (sp.code() << kRtShift) |
(kPointerSize & kImm16Mask); // NOLINT
// daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
-const Instr kPushInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
- (Register::kCode_sp << kRtShift) |
+const Instr kPushInstruction = DADDIU | (sp.code() << kRsShift) |
+ (sp.code() << kRtShift) |
(-kPointerSize & kImm16Mask); // NOLINT
// Sd(r, MemOperand(sp, 0))
const Instr kPushRegPattern =
- SD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
+ SD | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
// Ld(r, MemOperand(sp, 0))
const Instr kPopRegPattern =
- LD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
+ LD | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
const Instr kLwRegFpOffsetPattern =
- LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
+ LW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
const Instr kSwRegFpOffsetPattern =
- SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
+ SW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
-const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) |
- (kNegOffset & kImm16Mask); // NOLINT
+const Instr kLwRegFpNegOffsetPattern =
+ LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
-const Instr kSwRegFpNegOffsetPattern = SW | (Register::kCode_fp << kRsShift) |
- (kNegOffset & kImm16Mask); // NOLINT
+const Instr kSwRegFpNegOffsetPattern =
+ SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
const Instr kLwSwInstrTypeMask = 0xffe00000;
@@ -337,23 +336,17 @@ void Assembler::CodeTargetAlign() {
Register Assembler::GetRtReg(Instr instr) {
- Register rt;
- rt.reg_code = (instr & kRtFieldMask) >> kRtShift;
- return rt;
+ return Register::from_code((instr & kRtFieldMask) >> kRtShift);
}
Register Assembler::GetRsReg(Instr instr) {
- Register rs;
- rs.reg_code = (instr & kRsFieldMask) >> kRsShift;
- return rs;
+ return Register::from_code((instr & kRsFieldMask) >> kRsShift);
}
Register Assembler::GetRdReg(Instr instr) {
- Register rd;
- rd.reg_code = (instr & kRdFieldMask) >> kRdShift;
- return rd;
+ return Register::from_code((instr & kRdFieldMask) >> kRdShift);
}
@@ -1454,15 +1447,15 @@ void Assembler::bgez(Register rs, int16_t offset) {
void Assembler::bgezc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rt != zero_reg);
GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs != zero_reg);
+ DCHECK(rt != zero_reg);
DCHECK(rs.code() != rt.code());
GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1470,16 +1463,16 @@ void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
void Assembler::bgec(Register rs, Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs != zero_reg);
+ DCHECK(rt != zero_reg);
DCHECK(rs.code() != rt.code());
GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgezal(Register rs, int16_t offset) {
- DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
- DCHECK(!(rs.is(ra)));
+ DCHECK(kArchVariant != kMips64r6 || rs == zero_reg);
+ DCHECK(rs != ra);
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
@@ -1495,7 +1488,7 @@ void Assembler::bgtz(Register rs, int16_t offset) {
void Assembler::bgtzc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rt != zero_reg);
GenInstrImmediate(BGTZL, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1510,7 +1503,7 @@ void Assembler::blez(Register rs, int16_t offset) {
void Assembler::blezc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rt != zero_reg);
GenInstrImmediate(BLEZL, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1518,15 +1511,15 @@ void Assembler::blezc(Register rt, int16_t offset) {
void Assembler::bltzc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!rt.is(zero_reg));
+ DCHECK(rt != zero_reg);
GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs != zero_reg);
+ DCHECK(rt != zero_reg);
DCHECK(rs.code() != rt.code());
GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1534,8 +1527,8 @@ void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
void Assembler::bltc(Register rs, Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!rs.is(zero_reg));
- DCHECK(!rt.is(zero_reg));
+ DCHECK(rs != zero_reg);
+ DCHECK(rt != zero_reg);
DCHECK(rs.code() != rt.code());
GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1549,8 +1542,8 @@ void Assembler::bltz(Register rs, int16_t offset) {
void Assembler::bltzal(Register rs, int16_t offset) {
- DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
- DCHECK(!(rs.is(ra)));
+ DCHECK(kArchVariant != kMips64r6 || rs == zero_reg);
+ DCHECK(rs != ra);
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
@@ -1586,8 +1579,8 @@ void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
void Assembler::blezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rt.is(zero_reg)));
- DCHECK(!(rt.is(ra)));
+ DCHECK(rt != zero_reg);
+ DCHECK(rt != ra);
GenInstrImmediate(BLEZ, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1595,16 +1588,16 @@ void Assembler::blezalc(Register rt, int16_t offset) {
void Assembler::bgezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rt.is(zero_reg)));
- DCHECK(!(rt.is(ra)));
+ DCHECK(rt != zero_reg);
+ DCHECK(rt != ra);
GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgezall(Register rs, int16_t offset) {
DCHECK(kArchVariant != kMips64r6);
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(!(rs.is(ra)));
+ DCHECK(rs != zero_reg);
+ DCHECK(rs != ra);
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
@@ -1613,16 +1606,16 @@ void Assembler::bgezall(Register rs, int16_t offset) {
void Assembler::bltzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rt.is(zero_reg)));
- DCHECK(!(rt.is(ra)));
+ DCHECK(rt != zero_reg);
+ DCHECK(rt != ra);
GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgtzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rt.is(zero_reg)));
- DCHECK(!(rt.is(ra)));
+ DCHECK(rt != zero_reg);
+ DCHECK(rt != ra);
GenInstrImmediate(BGTZ, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1630,8 +1623,8 @@ void Assembler::bgtzalc(Register rt, int16_t offset) {
void Assembler::beqzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rt.is(zero_reg)));
- DCHECK(!(rt.is(ra)));
+ DCHECK(rt != zero_reg);
+ DCHECK(rt != ra);
GenInstrImmediate(ADDI, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1639,8 +1632,8 @@ void Assembler::beqzalc(Register rt, int16_t offset) {
void Assembler::bnezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rt.is(zero_reg)));
- DCHECK(!(rt.is(ra)));
+ DCHECK(rt != zero_reg);
+ DCHECK(rt != ra);
GenInstrImmediate(DADDI, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1659,7 +1652,7 @@ void Assembler::beqc(Register rs, Register rt, int16_t offset) {
void Assembler::beqzc(Register rs, int32_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rs.is(zero_reg)));
+ DCHECK(rs != zero_reg);
GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1677,7 +1670,7 @@ void Assembler::bnec(Register rs, Register rt, int16_t offset) {
void Assembler::bnezc(Register rs, int32_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rs.is(zero_reg)));
+ DCHECK(rs != zero_reg);
GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1976,7 +1969,7 @@ void Assembler::sll(Register rd,
// Don't allow nop instructions in the form sll zero_reg, zero_reg to be
// generated using the sll instruction. They must be generated using
// nop(int/NopMarkerTypes).
- DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
+ DCHECK(coming_from_nop || (rd != zero_reg && rt != zero_reg));
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
}
@@ -2141,8 +2134,8 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
return;
}
- DCHECK(!src.rm().is(
- at)); // Must not overwrite the register 'base' while loading 'offset'.
+ DCHECK(src.rm() !=
+ at); // Must not overwrite the register 'base' while loading 'offset'.
#ifdef DEBUG
// Remember the "(mis)alignment" of 'offset', it will be checked at the end.
@@ -2373,7 +2366,7 @@ void Assembler::aui(Register rt, Register rs, int32_t j) {
void Assembler::daui(Register rt, Register rs, int32_t j) {
DCHECK(is_uint16(j));
- DCHECK(!rs.is(zero_reg));
+ DCHECK(rs != zero_reg);
GenInstrImmediate(DAUI, rs, rt, j);
}
@@ -2606,15 +2599,13 @@ void Assembler::movn(Register rd, Register rs, Register rt) {
void Assembler::movt(Register rd, Register rs, uint16_t cc) {
- Register rt;
- rt.reg_code = (cc & 0x0007) << 2 | 1;
+ Register rt = Register::from_code((cc & 0x0007) << 2 | 1);
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
void Assembler::movf(Register rd, Register rs, uint16_t cc) {
- Register rt;
- rt.reg_code = (cc & 0x0007) << 2 | 0;
+ Register rt = Register::from_code((cc & 0x0007) << 2 | 0);
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
@@ -2960,32 +2951,28 @@ void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(kArchVariant == kMips64r2);
- FPURegister ft;
- ft.reg_code = (cc & 0x0007) << 2 | 1;
+ FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(kArchVariant == kMips64r2);
- FPURegister ft;
- ft.reg_code = (cc & 0x0007) << 2 | 1;
+ FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(kArchVariant == kMips64r2);
- FPURegister ft;
- ft.reg_code = (cc & 0x0007) << 2 | 0;
+ FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(kArchVariant == kMips64r2);
- FPURegister ft;
- ft.reg_code = (cc & 0x0007) << 2 | 0;
+ FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
@@ -3461,7 +3448,7 @@ MSA_BRANCH_LIST(MSA_BRANCH)
} else { \
UseScratchRegisterScope temps(this); \
Register scratch = temps.Acquire(); \
- DCHECK(!rs.rm().is(scratch)); \
+ DCHECK(rs.rm() != scratch); \
daddiu(scratch, source.rm(), source.offset()); \
GenInstrMsaMI10(opcode, 0, scratch, wd); \
} \
@@ -4234,8 +4221,8 @@ void Assembler::set_target_value_at(Isolate* isolate, Address pc,
// Check we have the result from a li macro-instruction.
Instr instr0 = instr_at(pc);
Instr instr3 = instr_at(pc + kInstrSize * 3);
- CHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
- GetOpcodeField(instr3) == ORI));
+ DCHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
+ GetOpcodeField(instr3) == ORI));
#endif
// Must use 4 instructions to insure patchable code.
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index 7670446ab2..ce47cb761e 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -199,9 +199,15 @@ const int kSafepointRegisterStackIndexMap[kNumRegs] = {kUndefIndex, // zero_reg
// -----------------------------------------------------------------------------
// Implementation of Register and FPURegister.
-struct Register {
- static constexpr int kCpRegister = 23; // cp (s7) is the 23rd register.
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kRegAfterLast
+};
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ public:
#if defined(V8_TARGET_LITTLE_ENDIAN)
static constexpr int kMantissaOffset = 0;
static constexpr int kExponentOffset = 4;
@@ -212,45 +218,20 @@ struct Register {
#error Unknown endianness
#endif
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = -1
- };
-
- static constexpr int kNumRegisters = Code::kAfterLast;
-
- static Register from_code(int code) {
- DCHECK(code >= 0);
- DCHECK(code < kNumRegisters);
- Register r = { code };
- return r;
- }
-
- bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
- bool is(Register reg) const { return reg_code == reg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
-
- // Unfortunately we can't make this private in a struct.
- int reg_code;
+ private:
+ friend class RegisterBase;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
};
// s7: context register
// s3: lithium scratch
// s4: lithium scratch2
-#define DECLARE_REGISTER(R) constexpr Register R = {Register::kCode_##R};
+#define DECLARE_REGISTER(R) \
+ constexpr Register R = Register::from_code<kRegCode_##R>();
GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
-constexpr Register no_reg = {Register::kCode_no_reg};
+
+constexpr Register no_reg = Register::no_reg();
int ToNumber(Register reg);
@@ -259,103 +240,49 @@ Register ToRegister(int num);
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
-// Coprocessor register.
-struct FPURegister {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = kInvalidFPURegister
- };
-
- static constexpr int kMaxNumRegisters = Code::kAfterLast;
-
- inline static int NumRegisters();
+ kDoubleAfterLast
+};
+// Coprocessor register.
+class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
+ public:
// TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
// to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
// number of Double regs (64-bit regs, or FPU-reg-pairs).
- bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
- bool is(FPURegister reg) const { return reg_code == reg.reg_code; }
FPURegister low() const {
// TODO(plind): Create DCHECK for FR=0 mode. This usage suspect for FR=1.
// Find low reg of a Double-reg pair, which is the reg itself.
- DCHECK(reg_code % 2 == 0); // Specified Double reg must be even.
- FPURegister reg;
- reg.reg_code = reg_code;
- DCHECK(reg.is_valid());
- return reg;
+ DCHECK(code() % 2 == 0); // Specified Double reg must be even.
+ return FPURegister::from_code(code());
}
FPURegister high() const {
// TODO(plind): Create DCHECK for FR=0 mode. This usage illegal in FR=1.
// Find high reg of a Doubel-reg pair, which is reg + 1.
- DCHECK(reg_code % 2 == 0); // Specified Double reg must be even.
- FPURegister reg;
- reg.reg_code = reg_code + 1;
- DCHECK(reg.is_valid());
- return reg;
- }
-
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
-
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
+ DCHECK(code() % 2 == 0); // Specified Double reg must be even.
+ return FPURegister::from_code(code() + 1);
}
- static FPURegister from_code(int code) {
- FPURegister r = {code};
- return r;
- }
- void setcode(int f) {
- reg_code = f;
- DCHECK(is_valid());
- }
- // Unfortunately we can't make this private in a struct.
- int reg_code;
+ private:
+ friend class RegisterBase;
+ explicit constexpr FPURegister(int code) : RegisterBase(code) {}
};
-// MIPS SIMD (MSA) register
-struct MSARegister {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- SIMD128_REGISTERS(REGISTER_CODE)
+enum MSARegisterCode {
+#define REGISTER_CODE(R) kMsaCode_##R,
+ SIMD128_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = kInvalidMSARegister
- };
-
- static const int kMaxNumRegisters = Code::kAfterLast;
-
- inline static int NumRegisters();
-
- bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
- bool is(MSARegister reg) const { return reg_code == reg.reg_code; }
-
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
+ kMsaAfterLast
+};
- static MSARegister from_code(int code) {
- MSARegister r = {code};
- return r;
- }
- void setcode(int f) {
- reg_code = f;
- DCHECK(is_valid());
- }
- // Unfortunately we can't make this private in a struct.
- int reg_code;
+// MIPS SIMD (MSA) register
+class MSARegister : public RegisterBase<MSARegister, kMsaAfterLast> {
+ friend class RegisterBase;
+ explicit constexpr MSARegister(int code) : RegisterBase(code) {}
};
// A few double registers are reserved: one as a scratch register and one to
@@ -376,78 +303,22 @@ typedef FPURegister FloatRegister;
typedef FPURegister DoubleRegister;
-constexpr DoubleRegister no_freg = {kInvalidFPURegister};
-
-constexpr DoubleRegister f0 = {0}; // Return value in hard float mode.
-constexpr DoubleRegister f1 = {1};
-constexpr DoubleRegister f2 = {2};
-constexpr DoubleRegister f3 = {3};
-constexpr DoubleRegister f4 = {4};
-constexpr DoubleRegister f5 = {5};
-constexpr DoubleRegister f6 = {6};
-constexpr DoubleRegister f7 = {7};
-constexpr DoubleRegister f8 = {8};
-constexpr DoubleRegister f9 = {9};
-constexpr DoubleRegister f10 = {10};
-constexpr DoubleRegister f11 = {11};
-constexpr DoubleRegister f12 = {12}; // Arg 0 in hard float mode.
-constexpr DoubleRegister f13 = {13};
-constexpr DoubleRegister f14 = {14}; // Arg 1 in hard float mode.
-constexpr DoubleRegister f15 = {15};
-constexpr DoubleRegister f16 = {16};
-constexpr DoubleRegister f17 = {17};
-constexpr DoubleRegister f18 = {18};
-constexpr DoubleRegister f19 = {19};
-constexpr DoubleRegister f20 = {20};
-constexpr DoubleRegister f21 = {21};
-constexpr DoubleRegister f22 = {22};
-constexpr DoubleRegister f23 = {23};
-constexpr DoubleRegister f24 = {24};
-constexpr DoubleRegister f25 = {25};
-constexpr DoubleRegister f26 = {26};
-constexpr DoubleRegister f27 = {27};
-constexpr DoubleRegister f28 = {28};
-constexpr DoubleRegister f29 = {29};
-constexpr DoubleRegister f30 = {30};
-constexpr DoubleRegister f31 = {31};
+#define DECLARE_DOUBLE_REGISTER(R) \
+ constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
+DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
+#undef DECLARE_DOUBLE_REGISTER
+
+constexpr DoubleRegister no_freg = DoubleRegister::no_reg();
// SIMD registers.
typedef MSARegister Simd128Register;
-const Simd128Register no_msareg = {kInvalidMSARegister};
-
-constexpr Simd128Register w0 = {0};
-constexpr Simd128Register w1 = {1};
-constexpr Simd128Register w2 = {2};
-constexpr Simd128Register w3 = {3};
-constexpr Simd128Register w4 = {4};
-constexpr Simd128Register w5 = {5};
-constexpr Simd128Register w6 = {6};
-constexpr Simd128Register w7 = {7};
-constexpr Simd128Register w8 = {8};
-constexpr Simd128Register w9 = {9};
-constexpr Simd128Register w10 = {10};
-constexpr Simd128Register w11 = {11};
-constexpr Simd128Register w12 = {12};
-constexpr Simd128Register w13 = {13};
-constexpr Simd128Register w14 = {14};
-constexpr Simd128Register w15 = {15};
-constexpr Simd128Register w16 = {16};
-constexpr Simd128Register w17 = {17};
-constexpr Simd128Register w18 = {18};
-constexpr Simd128Register w19 = {19};
-constexpr Simd128Register w20 = {20};
-constexpr Simd128Register w21 = {21};
-constexpr Simd128Register w22 = {22};
-constexpr Simd128Register w23 = {23};
-constexpr Simd128Register w24 = {24};
-constexpr Simd128Register w25 = {25};
-constexpr Simd128Register w26 = {26};
-constexpr Simd128Register w27 = {27};
-constexpr Simd128Register w28 = {28};
-constexpr Simd128Register w29 = {29};
-constexpr Simd128Register w30 = {30};
-constexpr Simd128Register w31 = {31};
+#define DECLARE_SIMD128_REGISTER(R) \
+ constexpr Simd128Register R = Simd128Register::from_code<kMsaCode_##R>();
+SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
+#undef DECLARE_SIMD128_REGISTER
+
+const Simd128Register no_msareg = Simd128Register::no_reg();
// Register aliases.
// cp is assumed to be a callee saved register.
@@ -523,28 +394,33 @@ class Operand BASE_EMBEDDED {
public:
// Immediate.
INLINE(explicit Operand(int64_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE64));
- INLINE(explicit Operand(const ExternalReference& f));
+ RelocInfo::Mode rmode = RelocInfo::NONE64))
+ : rm_(no_reg), rmode_(rmode) {
+ value_.immediate = immediate;
+ }
+ INLINE(explicit Operand(const ExternalReference& f))
+ : rm_(no_reg), rmode_(RelocInfo::EXTERNAL_REFERENCE) {
+ value_.immediate = reinterpret_cast<int64_t>(f.address());
+ }
INLINE(explicit Operand(const char* s));
INLINE(explicit Operand(Object** opp));
INLINE(explicit Operand(Context** cpp));
explicit Operand(Handle<HeapObject> handle);
- INLINE(explicit Operand(Smi* value));
+ INLINE(explicit Operand(Smi* value))
+ : rm_(no_reg), rmode_(RelocInfo::NONE32) {
+ value_.immediate = reinterpret_cast<intptr_t>(value);
+ }
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedCode(CodeStub* stub);
// Register.
- INLINE(explicit Operand(Register rm));
+ INLINE(explicit Operand(Register rm)) : rm_(rm) {}
// Return true if this is a register operand.
INLINE(bool is_reg() const);
- inline int64_t immediate() const {
- DCHECK(!is_reg());
- DCHECK(!IsHeapObjectRequest());
- return value_.immediate;
- }
+ inline int64_t immediate() const;
bool IsImmediate() const { return !rm_.is_valid(); }
@@ -734,12 +610,7 @@ class Assembler : public AssemblerBase {
// has already deserialized the lui/ori instructions etc.
inline static void deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code,
- Address target) {
- set_target_address_at(
- isolate,
- instruction_payload - kInstructionsFor64BitConstant * kInstrSize, code,
- target);
- }
+ Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
@@ -2429,9 +2300,7 @@ class Assembler : public AssemblerBase {
class EnsureSpace BASE_EMBEDDED {
public:
- explicit EnsureSpace(Assembler* assembler) {
- assembler->CheckBuffer();
- }
+ explicit inline EnsureSpace(Assembler* assembler);
};
class UseScratchRegisterScope {
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 764d6e9281..1025bcd928 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -10,6 +10,7 @@
#include "src/codegen.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+#include "src/heap/heap-inl.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
@@ -43,7 +44,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
int double_offset = offset();
// Account for saved regs if input is sp.
- if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
+ if (input_reg == sp) double_offset += 3 * kPointerSize;
Register scratch =
GetRegisterThatIsNotOneOf(input_reg, result_reg);
@@ -220,7 +221,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(exponent.is(a2));
+ DCHECK(exponent == a2);
const DoubleRegister double_base = f2;
const DoubleRegister double_exponent = f4;
const DoubleRegister double_result = f0;
@@ -423,28 +424,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ AssertStackIsAligned();
- int frame_alignment = MacroAssembler::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- int result_stack_size;
- if (result_size() <= 2) {
- // a0 = argc, a1 = argv, a2 = isolate
- __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
- __ mov(a1, s1);
- result_stack_size = 0;
- } else {
- DCHECK_EQ(3, result_size());
- // Allocate additional space for the result.
- result_stack_size =
- ((result_size() * kPointerSize) + frame_alignment_mask) &
- ~frame_alignment_mask;
- __ Dsubu(sp, sp, Operand(result_stack_size));
-
- // a0 = hidden result argument, a1 = argc, a2 = argv, a3 = isolate.
- __ li(a3, Operand(ExternalReference::isolate_address(isolate())));
- __ mov(a2, s1);
- __ mov(a1, a0);
- __ mov(a0, sp);
- }
+ // a0 = argc, a1 = argv, a2 = isolate
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(a1, s1);
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
@@ -467,7 +449,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&find_ra);
// This spot was reserved in EnterExitFrame.
- __ Sd(ra, MemOperand(sp, result_stack_size));
+ __ Sd(ra, MemOperand(sp));
// Stack space reservation moved to the branch delay slot below.
// Stack is still aligned.
@@ -480,14 +462,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
DCHECK_EQ(kNumInstructionsToJump,
masm->InstructionsGeneratedSince(&find_ra));
}
- if (result_size() > 2) {
- DCHECK_EQ(3, result_size());
- // Read result values stored on stack.
- __ Ld(a0, MemOperand(v0, 2 * kPointerSize));
- __ Ld(v1, MemOperand(v0, 1 * kPointerSize));
- __ Ld(v0, MemOperand(v0, 0 * kPointerSize));
- }
- // Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
+
+ // Result returned in v0 or v1:v0 - do not destroy these registers!
// Check result for exception sentinel.
Label exception_returned;
@@ -513,14 +489,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- Register argc;
- if (argv_in_register()) {
- // We don't want to pop arguments so set argc to no_reg.
- argc = no_reg;
- } else {
- // s0: still holds argc (callee-saved).
- argc = s0;
- }
+ Register argc = argv_in_register()
+ // We don't want to pop arguments so set argc to no_reg.
+ ? no_reg
+ // s0: still holds argc (callee-saved).
+ : s0;
__ LeaveExitFrame(save_doubles(), argc, true, EMIT_RETURN);
// Handling of exception.
@@ -906,7 +879,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ Dlsa(tmp, properties, index, kPointerSizeLog2);
__ Ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
- DCHECK(!tmp.is(entity_name));
+ DCHECK(tmp != entity_name);
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
__ Branch(done, eq, entity_name, Operand(tmp));
@@ -1045,6 +1018,49 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
stub2.GetCode();
}
+RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
+ Instr first_instruction = Assembler::instr_at(stub->instruction_start());
+ Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+ 2 * Assembler::kInstrSize);
+
+ if (Assembler::IsBeq(first_instruction)) {
+ return INCREMENTAL;
+ }
+
+ DCHECK(Assembler::IsBne(first_instruction));
+
+ if (Assembler::IsBeq(second_instruction)) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ DCHECK(Assembler::IsBne(second_instruction));
+
+ return STORE_BUFFER_ONLY;
+}
+
+void RecordWriteStub::Patch(Code* stub, Mode mode) {
+ MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
+ stub->instruction_size(), CodeObjectRequired::kNo);
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ DCHECK(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ PatchBranchIntoNop(&masm, 0);
+ PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
+ break;
+ case INCREMENTAL:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, 0);
+ break;
+ case INCREMENTAL_COMPACTION:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
+ break;
+ }
+ DCHECK(GetMode(stub) == mode);
+ Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
+ 4 * Assembler::kInstrSize);
+}
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
@@ -1066,11 +1082,7 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
__ nop();
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(),
- address(),
- value(),
- save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
}
__ Ret();
@@ -1108,11 +1120,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm);
regs_.Restore(masm);
- __ RememberedSetHelper(object(),
- address(),
- value(),
- save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
__ bind(&dont_need_remembered_set);
}
@@ -1129,10 +1137,9 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
- Register address =
- a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
- DCHECK(!address.is(regs_.object()));
- DCHECK(!address.is(a0));
+ Register address = a0 == regs_.address() ? regs_.scratch0() : regs_.address();
+ DCHECK(address != regs_.object());
+ DCHECK(address != a0);
__ Move(address, regs_.address());
__ Move(a0, regs_.object());
__ Move(a1, address);
@@ -1145,6 +1152,9 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
+void RecordWriteStub::Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+}
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
@@ -1161,11 +1171,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(),
- address(),
- value(),
- save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ Ret();
}
@@ -1206,11 +1212,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(),
- address(),
- value(),
- save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ Ret();
}
@@ -1600,7 +1602,7 @@ static void CallApiFunctionAndReturn(
const int kLevelOffset = AddressOffset(
ExternalReference::handle_scope_level_address(isolate), next_address);
- DCHECK(function_address.is(a1) || function_address.is(a2));
+ DCHECK(function_address == a1 || function_address == a2);
Label profiler_disabled;
Label end_profiler_check;
@@ -1801,7 +1803,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
+ DCHECK(api_function_address != a0 && scratch != a0);
// a0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ Daddu(a0, sp, Operand(1 * kPointerSize));
diff --git a/deps/v8/src/mips64/code-stubs-mips64.h b/deps/v8/src/mips64/code-stubs-mips64.h
index 403f3f4691..ca82b96c25 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.h
+++ b/deps/v8/src/mips64/code-stubs-mips64.h
@@ -104,49 +104,9 @@ class RecordWriteStub: public PlatformCodeStub {
DCHECK(Assembler::IsBeq(masm->instr_at(pos)));
}
- static Mode GetMode(Code* stub) {
- Instr first_instruction = Assembler::instr_at(stub->instruction_start());
- Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
- 2 * Assembler::kInstrSize);
+ static Mode GetMode(Code* stub);
- if (Assembler::IsBeq(first_instruction)) {
- return INCREMENTAL;
- }
-
- DCHECK(Assembler::IsBne(first_instruction));
-
- if (Assembler::IsBeq(second_instruction)) {
- return INCREMENTAL_COMPACTION;
- }
-
- DCHECK(Assembler::IsBne(second_instruction));
-
- return STORE_BUFFER_ONLY;
- }
-
- static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
- stub->instruction_size(), CodeObjectRequired::kNo);
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- PatchBranchIntoNop(&masm, 0);
- PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 0);
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
- break;
- }
- DCHECK(GetMode(stub) == mode);
- Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
- 4 * Assembler::kInstrSize);
- }
+ static void Patch(Code* stub, Mode mode);
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
@@ -156,12 +116,11 @@ class RecordWriteStub: public PlatformCodeStub {
// the caller.
class RegisterAllocation {
public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch0)
+ RegisterAllocation(Register object, Register address, Register scratch0)
: object_(object),
address_(address),
- scratch0_(scratch0) {
+ scratch0_(scratch0),
+ scratch1_(no_reg) {
DCHECK(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
@@ -224,9 +183,7 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- void Activate(Code* code) override {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
+ void Activate(Code* code) override;
Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_));
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index 5563d39055..139f7514d8 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -350,6 +350,7 @@ const int kMsaI5Mask = ((7U << 23) | ((1 << 6) - 1));
const int kMsaMI10Mask = (15U << 2);
const int kMsaBITMask = ((7U << 23) | ((1 << 6) - 1));
const int kMsaELMMask = (15U << 22);
+const int kMsaLongerELMMask = kMsaELMMask | (63U << 16);
const int kMsa3RMask = ((7U << 23) | ((1 << 6) - 1));
const int kMsa3RFMask = ((15U << 22) | ((1 << 6) - 1));
const int kMsaVECMask = (23U << 21);
@@ -1658,7 +1659,8 @@ class InstructionGetters : public T {
}
inline int32_t MsaElmDf() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+ this->InstructionType() == InstructionBase::kImmediateType);
int32_t df_n = this->Bits(21, 16);
if (((df_n >> 4) & 3U) == 0) {
return 0;
@@ -1674,7 +1676,8 @@ class InstructionGetters : public T {
}
inline int32_t MsaElmNValue() const {
- DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+ this->InstructionType() == InstructionBase::kImmediateType);
return this->Bits(16 + 4 - this->MsaElmDf(), 16);
}
@@ -1866,6 +1869,15 @@ InstructionBase::Type InstructionBase::InstructionType() const {
case kMsaMinor2R:
case kMsaMinor2RF:
return kRegisterType;
+ case kMsaMinorELM:
+ switch (InstructionBits() & kMsaLongerELMMask) {
+ case CFCMSA:
+ case CTCMSA:
+ case MOVE_V:
+ return kRegisterType;
+ default:
+ return kImmediateType;
+ }
default:
return kImmediateType;
}
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index ae8f0c1c2d..642eabdfc8 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/assembler-inl.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/register-configuration.h"
@@ -25,8 +26,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
RegList saved_regs = restored_regs | sp.bit() | ra.bit();
- const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kMaxNumRegisters;
- const int kFloatRegsSize = kFloatSize * FloatRegister::kMaxNumRegisters;
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
+ const int kFloatRegsSize = kFloatSize * FloatRegister::kNumRegisters;
// Save all double FPU registers before messing with them.
__ Dsubu(sp, sp, Operand(kDoubleRegsSize));
@@ -207,16 +208,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Ldc1(fpu_reg, MemOperand(a1, src_offset));
}
- // Push state, pc, and continuation from the last output frame.
- __ Ld(a6, MemOperand(a2, FrameDescription::state_offset()));
- __ push(a6);
-
+ // Push pc and continuation from the last output frame.
__ Ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
__ push(a6);
__ Ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
__ push(a6);
-
// Technically restoring 'at' should work unless zero_reg is also restored
// but it's safer to check for this.
DCHECK(!(at.bit() & restored_regs));
@@ -312,6 +309,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
}
}
+bool Deoptimizer::PadTopOfStackRegister() { return false; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
diff --git a/deps/v8/src/mips64/disasm-mips64.cc b/deps/v8/src/mips64/disasm-mips64.cc
index e596ab2cb2..2c35653e88 100644
--- a/deps/v8/src/mips64/disasm-mips64.cc
+++ b/deps/v8/src/mips64/disasm-mips64.cc
@@ -1886,6 +1886,9 @@ int Decoder::DecodeTypeRegister(Instruction* instr) {
case kMsaMinor2RF:
DecodeTypeMsa2RF(instr);
break;
+ case kMsaMinorELM:
+ DecodeTypeMsaELM(instr);
+ break;
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/mips64/frame-constants-mips64.cc b/deps/v8/src/mips64/frame-constants-mips64.cc
index 421fa5a952..0d5348e526 100644
--- a/deps/v8/src/mips64/frame-constants-mips64.cc
+++ b/deps/v8/src/mips64/frame-constants-mips64.cc
@@ -18,6 +18,10 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
+int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
+ return register_count;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index a685f8a6d1..679a10ad68 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -22,9 +22,14 @@ void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // TODO(albertnetymk): Use default for now; should call
- // RestrictAllocatableRegisters like src/x64/interface-descriptors-x64.cc
- DefaultInitializePlatformSpecific(data, kParameterCount);
+ const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
+
+ data->RestrictAllocatableRegisters(default_stub_registers,
+ arraysize(default_stub_registers));
+
+ CHECK_LE(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
const Register FastNewFunctionContextDescriptor::FunctionRegister() {
@@ -83,27 +88,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void FastCloneRegExpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a3, a2, a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a3, a2, a1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a3, a2, a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1};
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 3bea6eb3cc..06b2c262eb 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -9,10 +9,12 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
+#include "src/callable.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/mips64/assembler-mips64-inl.h"
#include "src/mips64/macro-assembler-mips64.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -24,45 +26,93 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, size, create_code_object) {}
-void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1, Register exclusion2,
- Register exclusion3) {
+TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(isolate, buffer, buffer_size),
+ isolate_(isolate),
+ has_double_zero_reg_set_(false) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
+ }
+}
+
+int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) const {
+ int bytes = 0;
RegList exclusions = 0;
- if (!exclusion1.is(no_reg)) {
+ if (exclusion1 != no_reg) {
exclusions |= exclusion1.bit();
- if (!exclusion2.is(no_reg)) {
+ if (exclusion2 != no_reg) {
exclusions |= exclusion2.bit();
- if (!exclusion3.is(no_reg)) {
+ if (exclusion3 != no_reg) {
exclusions |= exclusion3.bit();
}
}
}
- MultiPush(kJSCallerSaved & ~exclusions);
+ RegList list = kJSCallerSaved & ~exclusions;
+ bytes += NumRegs(list) * kPointerSize;
if (fp_mode == kSaveFPRegs) {
- MultiPushFPU(kCallerSavedFPU);
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
+
+ return bytes;
}
-void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ int bytes = 0;
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = kJSCallerSaved & ~exclusions;
+ MultiPush(list);
+ bytes += NumRegs(list) * kPointerSize;
+
+ if (fp_mode == kSaveFPRegs) {
+ MultiPushFPU(kCallerSavedFPU);
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ }
+
+ return bytes;
+}
+
+int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
if (fp_mode == kSaveFPRegs) {
MultiPopFPU(kCallerSavedFPU);
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
RegList exclusions = 0;
- if (!exclusion1.is(no_reg)) {
+ if (exclusion1 != no_reg) {
exclusions |= exclusion1.bit();
- if (!exclusion2.is(no_reg)) {
+ if (exclusion2 != no_reg) {
exclusions |= exclusion2.bit();
- if (!exclusion3.is(no_reg)) {
+ if (exclusion3 != no_reg) {
exclusions |= exclusion3.bit();
}
}
}
- MultiPop(kJSCallerSaved & ~exclusions);
+ RegList list = kJSCallerSaved & ~exclusions;
+ MultiPop(list);
+ bytes += NumRegs(list) * kPointerSize;
+
+ return bytes;
}
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
@@ -138,16 +188,12 @@ void MacroAssembler::InNewSpace(Register object,
// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- RAStatus ra_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
+void MacroAssembler::RecordWriteField(Register object, int offset,
+ Register value, Register dst,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
DCHECK(!AreAliased(value, dst, t8, object));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
@@ -171,14 +217,8 @@ void MacroAssembler::RecordWriteField(
bind(&ok);
}
- RecordWrite(object,
- dst,
- value,
- ra_status,
- save_fp,
- remembered_set_action,
- OMIT_SMI_CHECK,
- pointers_to_here_check_for_value);
+ RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK);
bind(&done);
@@ -190,99 +230,74 @@ void MacroAssembler::RecordWriteField(
}
}
-
-// Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
-void MacroAssembler::RecordWriteForMap(Register object,
- Register map,
- Register dst,
- RAStatus ra_status,
- SaveFPRegsMode fp_mode) {
- if (emit_debug_code()) {
- DCHECK(!dst.is(at));
- Ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
- Check(eq,
- kWrongAddressOrValuePassedToRecordWrite,
- dst,
- Operand(isolate()->factory()->meta_map()));
- }
-
- if (!FLAG_incremental_marking) {
- return;
+void TurboAssembler::SaveRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
}
+ MultiPush(regs);
+}
- if (emit_debug_code()) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- Ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- Check(eq, kWrongAddressOrValuePassedToRecordWrite, map, Operand(scratch));
+void TurboAssembler::RestoreRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
}
+ MultiPop(regs);
+}
- Label done;
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
+ // i.e. always emit remember set and save FP registers in RecordWriteStub. If
+ // large performance regression is observed, we should use these values to
+ // avoid unnecessary work.
- // A single check of the map's pages interesting flag suffices, since it is
- // only set during incremental collection, and then it's also guaranteed that
- // the from object's page's interesting flag is also set. This optimization
- // relies on the fact that maps can never be in new space.
- CheckPageFlag(map,
- map, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
+ RegList registers = callable.descriptor().allocatable_registers();
- Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
- if (emit_debug_code()) {
- Label ok;
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- And(scratch, dst, Operand(kPointerSize - 1));
- Branch(&ok, eq, scratch, Operand(zero_reg));
- stop("Unaligned cell in write barrier");
- bind(&ok);
- }
+ SaveRegisters(registers);
+ Register object_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kObject));
+ Register slot_parameter(
+ callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register isolate_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kIsolate));
+ Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kFPMode));
- // Record the actual write.
- if (ra_status == kRAHasNotBeenSaved) {
- push(ra);
- }
- RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
- fp_mode);
- CallStub(&stub);
- if (ra_status == kRAHasNotBeenSaved) {
- pop(ra);
- }
+ Push(object);
+ Push(address);
- bind(&done);
+ Pop(slot_parameter);
+ Pop(object_parameter);
- {
- // Count number of write barriers in generated code.
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- isolate()->counters()->write_barriers_static()->Increment();
- IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1,
- scratch, dst);
- }
+ li(isolate_parameter, Operand(ExternalReference::isolate_address(isolate())));
+ Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
+ Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
+ Call(callable.code(), RelocInfo::CODE_TARGET);
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- li(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
- li(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
- }
+ RestoreRegisters(registers);
}
-
// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWrite(
- Register object,
- Register address,
- Register value,
- RAStatus ra_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
+void MacroAssembler::RecordWrite(Register object, Register address,
+ Register value, RAStatus ra_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
DCHECK(!AreAliased(object, address, value, t8));
DCHECK(!AreAliased(object, address, value, t9));
@@ -308,13 +323,9 @@ void MacroAssembler::RecordWrite(
JumpIfSmi(value, &done);
}
- if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
- }
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -325,9 +336,13 @@ void MacroAssembler::RecordWrite(
if (ra_status == kRAHasNotBeenSaved) {
push(ra);
}
+#ifdef V8_CSA_WRITE_BARRIER
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+#else
RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
fp_mode);
CallStub(&stub);
+#endif
if (ra_status == kRAHasNotBeenSaved) {
pop(ra);
}
@@ -352,10 +367,8 @@ void MacroAssembler::RecordWrite(
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address,
- Register scratch,
- SaveFPRegsMode fp_mode,
- RememberedSetFinalAction and_then) {
+ Register address, Register scratch,
+ SaveFPRegsMode fp_mode) {
Label done;
if (emit_debug_code()) {
Label ok;
@@ -376,21 +389,14 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Call stub on end of buffer.
// Check for end of buffer.
And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
- DCHECK(!scratch.is(t8));
- if (and_then == kFallThroughAtEnd) {
- Branch(&done, ne, t8, Operand(zero_reg));
- } else {
- DCHECK(and_then == kReturnAtEnd);
- Ret(ne, t8, Operand(zero_reg));
- }
+ DCHECK(scratch != t8);
+ Ret(ne, t8, Operand(zero_reg));
push(ra);
StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
CallStub(&store_buffer_overflow);
pop(ra);
bind(&done);
- if (and_then == kReturnAtEnd) {
- Ret();
- }
+ Ret();
}
@@ -407,7 +413,7 @@ void TurboAssembler::Addu(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
addu(rd, rs, scratch);
}
@@ -424,7 +430,7 @@ void TurboAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
daddu(rd, rs, scratch);
}
@@ -443,7 +449,7 @@ void TurboAssembler::Subu(Register rd, Register rs, const Operand& rt) {
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
if (-rt.immediate() >> 16 == 0 && !MustUseReg(rt.rmode())) {
// Use load -imm and addu when loading -imm generates one instruction.
li(scratch, -rt.immediate());
@@ -465,7 +471,7 @@ void TurboAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
static_cast<int32_t>(
-rt.immediate())); // No dsubiu instr, use daddiu(x, y, -imm).
} else {
- DCHECK(!rs.is(at));
+ DCHECK(rs != at);
int li_count = InstrCountForLi64Bit(rt.immediate());
int li_neg_count = InstrCountForLi64Bit(-rt.immediate());
if (li_neg_count < li_count && !MustUseReg(rt.rmode())) {
@@ -492,7 +498,7 @@ void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
mul(rd, rs, scratch);
}
@@ -510,7 +516,7 @@ void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant != kMips64r6) {
mult(rs, scratch);
@@ -533,7 +539,7 @@ void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant != kMips64r6) {
multu(rs, scratch);
@@ -556,7 +562,7 @@ void TurboAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant == kMips64r6) {
dmul(rd, rs, scratch);
@@ -579,7 +585,7 @@ void TurboAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant == kMips64r6) {
dmuh(rd, rs, scratch);
@@ -597,7 +603,7 @@ void TurboAssembler::Mult(Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
mult(rs, scratch);
}
@@ -610,7 +616,7 @@ void TurboAssembler::Dmult(Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
dmult(rs, scratch);
}
@@ -623,7 +629,7 @@ void TurboAssembler::Multu(Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
multu(rs, scratch);
}
@@ -636,7 +642,7 @@ void TurboAssembler::Dmultu(Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
dmultu(rs, scratch);
}
@@ -649,7 +655,7 @@ void TurboAssembler::Div(Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
div(rs, scratch);
}
@@ -667,7 +673,7 @@ void TurboAssembler::Div(Register res, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant != kMips64r6) {
div(rs, scratch);
@@ -690,7 +696,7 @@ void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant != kMips64r6) {
div(rs, scratch);
@@ -713,7 +719,7 @@ void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant != kMips64r6) {
divu(rs, scratch);
@@ -731,7 +737,7 @@ void TurboAssembler::Ddiv(Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
ddiv(rs, scratch);
}
@@ -746,7 +752,7 @@ void TurboAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
ddiv(rs, scratch);
mflo(rd);
@@ -758,7 +764,7 @@ void TurboAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
ddiv(rd, rs, scratch);
}
@@ -772,7 +778,7 @@ void TurboAssembler::Divu(Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
divu(rs, scratch);
}
@@ -790,7 +796,7 @@ void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant != kMips64r6) {
divu(rs, scratch);
@@ -808,7 +814,7 @@ void TurboAssembler::Ddivu(Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
ddivu(rs, scratch);
}
@@ -826,7 +832,7 @@ void TurboAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant != kMips64r6) {
ddivu(rs, scratch);
@@ -846,7 +852,7 @@ void TurboAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
ddiv(rs, scratch);
mfhi(rd);
@@ -858,7 +864,7 @@ void TurboAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
dmod(rd, rs, scratch);
}
@@ -874,7 +880,7 @@ void TurboAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
ddivu(rs, scratch);
mfhi(rd);
@@ -886,7 +892,7 @@ void TurboAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
dmodu(rd, rs, scratch);
}
@@ -903,7 +909,7 @@ void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
and_(rd, rs, scratch);
}
@@ -920,7 +926,7 @@ void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
or_(rd, rs, scratch);
}
@@ -937,7 +943,7 @@ void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
xor_(rd, rs, scratch);
}
@@ -951,7 +957,7 @@ void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
nor(rd, rs, scratch);
}
@@ -971,7 +977,7 @@ void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
slt(rd, rs, scratch);
}
@@ -994,7 +1000,7 @@ void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
li(scratch, rt);
sltu(rd, rs, scratch);
}
@@ -1038,8 +1044,8 @@ void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
if (kArchVariant == kMips64r6 && sa <= 4) {
lsa(rd, rt, rs, sa - 1);
} else {
- Register tmp = rd.is(rt) ? scratch : rd;
- DCHECK(!tmp.is(rt));
+ Register tmp = rd == rt ? scratch : rd;
+ DCHECK(tmp != rt);
sll(tmp, rs, sa);
Addu(rd, rt, tmp);
}
@@ -1051,8 +1057,8 @@ void TurboAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
if (kArchVariant == kMips64r6 && sa <= 4) {
dlsa(rd, rt, rs, sa - 1);
} else {
- Register tmp = rd.is(rt) ? scratch : rd;
- DCHECK(!tmp.is(rt));
+ Register tmp = rd == rt ? scratch : rd;
+ DCHECK(tmp != rt);
dsll(tmp, rs, sa);
Daddu(rd, rt, tmp);
}
@@ -1128,8 +1134,8 @@ void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
}
void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
- DCHECK(!rd.is(at));
- DCHECK(!rs.rm().is(at));
+ DCHECK(rd != at);
+ DCHECK(rs.rm() != at);
if (kArchVariant == kMips64r6) {
Lw(rd, rs);
} else {
@@ -1138,7 +1144,7 @@ void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 3 fits into int16_t.
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
- if (!rd.is(source.rm())) {
+ if (rd != source.rm()) {
lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset));
lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset));
} else {
@@ -1162,9 +1168,9 @@ void TurboAssembler::Ulwu(Register rd, const MemOperand& rs) {
}
void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
- DCHECK(!rd.is(at));
- DCHECK(!rs.rm().is(at));
- DCHECK(!rd.is(rs.rm()));
+ DCHECK(rd != at);
+ DCHECK(rs.rm() != at);
+ DCHECK(rd != rs.rm());
if (kArchVariant == kMips64r6) {
Sw(rd, rs);
} else {
@@ -1179,8 +1185,8 @@ void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
}
void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
- DCHECK(!rd.is(at));
- DCHECK(!rs.rm().is(at));
+ DCHECK(rd != at);
+ DCHECK(rs.rm() != at);
if (kArchVariant == kMips64r6) {
Lh(rd, rs);
} else {
@@ -1190,7 +1196,7 @@ void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- if (source.rm().is(scratch)) {
+ if (source.rm() == scratch) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
Lb(rd, MemOperand(source.rm(), source.offset() + 1));
Lbu(scratch, source);
@@ -1213,8 +1219,8 @@ void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
}
void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
- DCHECK(!rd.is(at));
- DCHECK(!rs.rm().is(at));
+ DCHECK(rd != at);
+ DCHECK(rs.rm() != at);
if (kArchVariant == kMips64r6) {
Lhu(rd, rs);
} else {
@@ -1224,7 +1230,7 @@ void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- if (source.rm().is(scratch)) {
+ if (source.rm() == scratch) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
Lbu(rd, MemOperand(source.rm(), source.offset() + 1));
Lbu(scratch, source);
@@ -1247,10 +1253,10 @@ void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
}
void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
- DCHECK(!rd.is(at));
- DCHECK(!rs.rm().is(at));
- DCHECK(!rs.rm().is(scratch));
- DCHECK(!scratch.is(at));
+ DCHECK(rd != at);
+ DCHECK(rs.rm() != at);
+ DCHECK(rs.rm() != scratch);
+ DCHECK(scratch != at);
if (kArchVariant == kMips64r6) {
Sh(rd, rs);
} else {
@@ -1259,7 +1265,7 @@ void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
- if (!scratch.is(rd)) {
+ if (scratch != rd) {
mov(scratch, rd);
}
@@ -1276,8 +1282,8 @@ void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
}
void TurboAssembler::Uld(Register rd, const MemOperand& rs) {
- DCHECK(!rd.is(at));
- DCHECK(!rs.rm().is(at));
+ DCHECK(rd != at);
+ DCHECK(rs.rm() != at);
if (kArchVariant == kMips64r6) {
Ld(rd, rs);
} else {
@@ -1286,7 +1292,7 @@ void TurboAssembler::Uld(Register rd, const MemOperand& rs) {
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 7 fits into int16_t.
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 7);
- if (!rd.is(source.rm())) {
+ if (rd != source.rm()) {
ldr(rd, MemOperand(source.rm(), source.offset() + kMipsLdrOffset));
ldl(rd, MemOperand(source.rm(), source.offset() + kMipsLdlOffset));
} else {
@@ -1312,8 +1318,8 @@ void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
}
void TurboAssembler::Usd(Register rd, const MemOperand& rs) {
- DCHECK(!rd.is(at));
- DCHECK(!rs.rm().is(at));
+ DCHECK(rd != at);
+ DCHECK(rs.rm() != at);
if (kArchVariant == kMips64r6) {
Sd(rd, rs);
} else {
@@ -1360,7 +1366,7 @@ void TurboAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
- DCHECK(!scratch.is(at));
+ DCHECK(scratch != at);
if (kArchVariant == kMips64r6) {
Ldc1(fd, rs);
} else {
@@ -1372,7 +1378,7 @@ void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
- DCHECK(!scratch.is(at));
+ DCHECK(scratch != at);
if (kArchVariant == kMips64r6) {
Sdc1(fd, rs);
} else {
@@ -2084,8 +2090,8 @@ void TurboAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
- DCHECK(!rs.is(t9));
- DCHECK(!rs.is(at));
+ DCHECK(rs != t9);
+ DCHECK(rs != at);
// Zero extend int32 in rs.
Dext(t9, rs, 0, 32);
@@ -2102,8 +2108,8 @@ void TurboAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
- DCHECK(!rs.is(t9));
- DCHECK(!rs.is(at));
+ DCHECK(rs != t9);
+ DCHECK(rs != at);
Label msb_clear, conversion_done;
@@ -2134,8 +2140,8 @@ void TurboAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
- DCHECK(!rs.is(t9));
- DCHECK(!rs.is(at));
+ DCHECK(rs != t9);
+ DCHECK(rs != at);
// Zero extend int32 in rs.
Dext(t9, rs, 0, 32);
@@ -2152,8 +2158,8 @@ void TurboAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
- DCHECK(!rs.is(t9));
- DCHECK(!rs.is(at));
+ DCHECK(rs != t9);
+ DCHECK(rs != at);
Label positive, conversion_done;
@@ -2259,8 +2265,8 @@ void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
void TurboAssembler::Trunc_uw_d(FPURegister fd, Register rs,
FPURegister scratch) {
- DCHECK(!fd.is(scratch));
- DCHECK(!rs.is(at));
+ DCHECK(fd != scratch);
+ DCHECK(rs != at);
{
// Load 2^31 into scratch as its float representation.
@@ -2294,8 +2300,8 @@ void TurboAssembler::Trunc_uw_d(FPURegister fd, Register rs,
void TurboAssembler::Trunc_uw_s(FPURegister fd, Register rs,
FPURegister scratch) {
- DCHECK(!fd.is(scratch));
- DCHECK(!rs.is(at));
+ DCHECK(fd != scratch);
+ DCHECK(rs != at);
{
// Load 2^31 into scratch as its float representation.
@@ -2328,7 +2334,7 @@ void TurboAssembler::Trunc_uw_s(FPURegister fd, Register rs,
void TurboAssembler::Trunc_ul_d(FPURegister fd, Register rs,
FPURegister scratch, Register result) {
- DCHECK(!fd.is(scratch));
+ DCHECK(fd != scratch);
DCHECK(!AreAliased(rs, result, at));
Label simple_convert, done, fail;
@@ -2379,7 +2385,7 @@ void TurboAssembler::Trunc_ul_d(FPURegister fd, Register rs,
void TurboAssembler::Trunc_ul_s(FPURegister fd, Register rs,
FPURegister scratch, Register result) {
- DCHECK(!fd.is(scratch));
+ DCHECK(fd != scratch);
DCHECK(!AreAliased(rs, result, at));
Label simple_convert, done, fail;
@@ -2434,28 +2440,28 @@ void TurboAssembler::Trunc_ul_s(FPURegister fd, Register rs,
void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
- DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ DCHECK(fr != scratch && fs != scratch && ft != scratch);
mul_s(scratch, fs, ft);
add_s(fd, fr, scratch);
}
void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
- DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ DCHECK(fr != scratch && fs != scratch && ft != scratch);
mul_d(scratch, fs, ft);
add_d(fd, fr, scratch);
}
void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
- DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ DCHECK(fr != scratch && fs != scratch && ft != scratch);
mul_s(scratch, fs, ft);
sub_s(fd, scratch, fr);
}
void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
- DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ DCHECK(fr != scratch && fs != scratch && ft != scratch);
mul_d(scratch, fs, ft);
sub_d(fd, scratch, fr);
}
@@ -2497,7 +2503,7 @@ void TurboAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
// Use kDoubleCompareReg for comparison result. It has to be unavailable
// to lithium
// register allocator.
- DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
+ DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
if (long_branch) {
Label skip;
cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
@@ -2599,7 +2605,7 @@ void TurboAssembler::BranchShortF(SecondaryField sizeField, Label* target,
// Unsigned conditions are treated as their signed counterpart.
// Use kDoubleCompareReg for comparison result, it is valid in fp64 (FR =
// 1) mode.
- DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
+ DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
switch (cc) {
case lt:
cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
@@ -2740,7 +2746,7 @@ void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target,
void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(!src_low.is(scratch));
+ DCHECK(src_low != scratch);
mfhc1(scratch, dst);
mtc1(src_low, dst);
mthc1(scratch, dst);
@@ -2783,7 +2789,7 @@ void TurboAssembler::Move(FPURegister dst, double imm) {
} else {
mthc1(zero_reg, dst);
}
- if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
+ if (dst == kDoubleRegZero) has_double_zero_reg_set_ = true;
}
}
@@ -2826,9 +2832,9 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
DoubleRegister double_scratch,
Register except_flag,
CheckForInexactConversion check_inexact) {
- DCHECK(!result.is(scratch));
- DCHECK(!double_input.is(double_scratch));
- DCHECK(!except_flag.is(scratch));
+ DCHECK(result != scratch);
+ DCHECK(double_input != double_scratch);
+ DCHECK(except_flag != scratch);
Label done;
@@ -2929,9 +2935,9 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
// Emulated condtional branches do not emit a nop in the branch delay slot.
//
// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
-#define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
- (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
- (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
+#define BRANCH_ARGS_CHECK(cond, rs, rt) \
+ DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \
+ (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg)))
void TurboAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
DCHECK(kArchVariant == kMips64r6 ? is_int26(offset) : is_int16(offset));
@@ -3038,7 +3044,7 @@ void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
static inline bool IsZero(const Operand& rt) {
if (rt.is_reg()) {
- return rt.rm().is(zero_reg);
+ return rt.rm() == zero_reg;
} else {
return rt.immediate() == 0;
}
@@ -3099,7 +3105,7 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
bc(offset);
break;
case eq:
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
// Pre R6 beq is used here to make the code patchable. Otherwise bc
// should be used which has no condition field so is not patchable.
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
@@ -3117,7 +3123,7 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
}
break;
case ne:
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
// Pre R6 bne is used here to make the code patchable. Otherwise we
// should not generate any instruction.
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
@@ -3138,9 +3144,9 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
// Signed comparison.
case greater:
// rs > rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bltzc(scratch, offset);
@@ -3150,16 +3156,16 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
bltc(scratch, rs, offset);
}
break;
case greater_equal:
// rs >= rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
bc(offset);
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
blezc(scratch, offset);
@@ -3169,15 +3175,15 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
bgec(rs, scratch, offset);
}
break;
case less:
// rs < rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bgtzc(scratch, offset);
@@ -3187,16 +3193,16 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
bltc(rs, scratch, offset);
}
break;
case less_equal:
// rs <= rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
bc(offset);
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bgezc(scratch, offset);
@@ -3206,7 +3212,7 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
bgec(scratch, rs, offset);
}
break;
@@ -3214,9 +3220,9 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
// Unsigned comparison.
case Ugreater:
// rs > rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
return false;
bnezc(scratch, offset);
@@ -3226,16 +3232,16 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
bltuc(scratch, rs, offset);
}
break;
case Ugreater_equal:
// rs >= rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
bc(offset);
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
return false;
beqzc(scratch, offset);
@@ -3245,15 +3251,15 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
bgeuc(rs, scratch, offset);
}
break;
case Uless:
// rs < rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
return false;
bnezc(scratch, offset);
@@ -3262,16 +3268,16 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
bltuc(rs, scratch, offset);
}
break;
case Uless_equal:
// rs <= rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rt.is_reg() && rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
bc(offset);
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26, scratch, rt))
return false;
bc(offset);
@@ -3281,7 +3287,7 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
- DCHECK(!rs.is(scratch));
+ DCHECK(rs != scratch);
bgeuc(scratch, rs, offset);
}
break;
@@ -3581,9 +3587,9 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
// Signed comparison.
case greater:
// rs > rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bltzalc(scratch, offset);
@@ -3599,10 +3605,10 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
break;
case greater_equal:
// rs >= rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
balc(offset);
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
blezalc(scratch, offset);
@@ -3618,9 +3624,9 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
break;
case less:
// rs < rt
- if (rs.code() == rt.rm().reg_code) {
+ if (rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bgtzalc(scratch, offset);
@@ -3636,10 +3642,10 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
break;
case less_equal:
// rs <= r2
- if (rs.code() == rt.rm().reg_code) {
+ if (rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
balc(offset);
- } else if (rs.is(zero_reg)) {
+ } else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bgezalc(scratch, offset);
@@ -3907,8 +3913,8 @@ void TurboAssembler::Call(Register target, Condition cond, Register rs,
}
#ifdef DEBUG
- CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
+ DCHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
+ SizeOfCodeGeneratedSince(&start));
#endif
}
@@ -4038,7 +4044,7 @@ void TurboAssembler::Drop(int count, Condition cond, Register reg,
void MacroAssembler::Swap(Register reg1,
Register reg2,
Register scratch) {
- if (scratch.is(no_reg)) {
+ if (scratch == no_reg) {
Xor(reg1, reg1, Operand(reg2));
Xor(reg2, reg2, Operand(reg1));
Xor(reg1, reg1, Operand(reg2));
@@ -4107,87 +4113,6 @@ void MacroAssembler::PopStackHandler() {
}
-void MacroAssembler::Allocate(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
- DCHECK(object_size <= kMaxRegularHeapObjectSize);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- li(result, 0x7091);
- li(scratch1, 0x7191);
- li(scratch2, 0x7291);
- }
- jmp(gc_required);
- return;
- }
-
- DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- DCHECK(0 == (object_size & kObjectAlignmentMask));
-
- // Check relative positions of allocation top and limit addresses.
- // ARM adds additional checks to make sure the ldm instruction can be
- // used. On MIPS we don't have ldm so we don't need additional checks either.
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
- intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
- DCHECK((limit - top) == kPointerSize);
-
- // Set up allocation top address and allocation limit registers.
- Register top_address = scratch1;
- // This code stores a temporary value in t9.
- Register alloc_limit = t9;
- Register result_end = scratch2;
- li(top_address, Operand(allocation_top));
-
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into alloc_limit.
- Ld(result, MemOperand(top_address));
- Ld(alloc_limit, MemOperand(top_address, kPointerSize));
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry.
- Ld(alloc_limit, MemOperand(top_address));
- Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
- }
- // Load allocation limit. Result already contains allocation top.
- Ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
- }
-
- // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
- // the same alignment on ARM64.
- STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
-
- if (emit_debug_code()) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- And(scratch, result, Operand(kDoubleAlignmentMask));
- Check(eq, kAllocationIsNotDoubleAligned, scratch, Operand(zero_reg));
- }
-
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top.
- Daddu(result_end, result, Operand(object_size));
- Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
-
- Sd(result_end, MemOperand(top_address));
-
- // Tag object.
- Daddu(result, result, Operand(kHeapObjectTag));
-}
-
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
Label* not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
@@ -4203,78 +4128,6 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
bind(&succeed);
}
-void MacroAssembler::AllocateJSValue(Register result, Register constructor,
- Register value, Register scratch1,
- Register scratch2, Label* gc_required) {
- DCHECK(!result.is(constructor));
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!result.is(value));
-
- // Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Initialize the JSValue.
- LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
- Sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
- LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- Sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOrHashOffset));
- Sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
- Sd(value, FieldMemOperand(result, JSValue::kValueOffset));
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-}
-
-void MacroAssembler::CompareMapAndBranch(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* early_success,
- Condition cond,
- Label* branch_to) {
- Ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
-}
-
-
-void MacroAssembler::CompareMapAndBranch(Register obj_map,
- Handle<Map> map,
- Label* early_success,
- Condition cond,
- Label* branch_to) {
- Branch(branch_to, cond, obj_map, Operand(map));
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
- Label success;
- CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
- bind(&success);
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
- UseScratchRegisterScope temps(this);
- Register scratch1 = temps.Acquire();
- Ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- LoadRoot(scratch1, index);
- Branch(fail, ne, scratch, Operand(scratch1));
-}
-
-
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
li(value, Operand(cell));
Ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
@@ -4343,8 +4196,8 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
if (!IsMipsSoftFloatABI) {
const DoubleRegister fparg2 = f13;
- if (src2.is(f12)) {
- DCHECK(!src1.is(fparg2));
+ if (src2 == f12) {
+ DCHECK(src1 != fparg2);
Move(fparg2, src2);
Move(f12, src1);
} else {
@@ -4442,8 +4295,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract if values are
// passed in registers.
- DCHECK(actual.is_immediate() || actual.reg().is(a0));
- DCHECK(expected.is_immediate() || expected.reg().is(a2));
+ DCHECK(actual.is_immediate() || actual.reg() == a0);
+ DCHECK(expected.is_immediate() || expected.reg() == a2);
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -4532,8 +4385,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
InvokeFlag flag) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- DCHECK(function.is(a1));
- DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
+ DCHECK(function == a1);
+ DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
// On function call, call into the debugger if necessary.
CheckDebugHook(function, new_target, expected, actual);
@@ -4573,7 +4426,7 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in a1.
- DCHECK(function.is(a1));
+ DCHECK(function == a1);
Register expected_reg = a2;
Register temp_reg = t0;
Ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
@@ -4595,7 +4448,7 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in a1.
- DCHECK(function.is(a1));
+ DCHECK(function == a1);
// Get the function and setup the context.
Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
@@ -4694,12 +4547,12 @@ void TurboAssembler::DaddBranchOvf(Register dst, Register left,
scratch);
} else {
Register overflow_dst = t9;
- DCHECK(!dst.is(scratch));
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!scratch.is(overflow_dst));
- DCHECK(!left.is(overflow_dst));
+ DCHECK(dst != scratch);
+ DCHECK(dst != overflow_dst);
+ DCHECK(scratch != overflow_dst);
+ DCHECK(left != overflow_dst);
li(overflow_dst, right); // Load right.
- if (dst.is(left)) {
+ if (dst == left) {
mov(scratch, left); // Preserve left.
Daddu(dst, left, overflow_dst); // Left is overwritten.
xor_(scratch, dst, scratch); // Original left.
@@ -4719,26 +4572,26 @@ void TurboAssembler::DaddBranchOvf(Register dst, Register left, Register right,
Label* overflow_label,
Label* no_overflow_label, Register scratch) {
Register overflow_dst = t9;
- DCHECK(!dst.is(scratch));
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!scratch.is(overflow_dst));
- DCHECK(!left.is(overflow_dst));
- DCHECK(!right.is(overflow_dst));
- DCHECK(!left.is(scratch));
- DCHECK(!right.is(scratch));
-
- if (left.is(right) && dst.is(left)) {
+ DCHECK(dst != scratch);
+ DCHECK(dst != overflow_dst);
+ DCHECK(scratch != overflow_dst);
+ DCHECK(left != overflow_dst);
+ DCHECK(right != overflow_dst);
+ DCHECK(left != scratch);
+ DCHECK(right != scratch);
+
+ if (left == right && dst == left) {
mov(overflow_dst, right);
right = overflow_dst;
}
- if (dst.is(left)) {
+ if (dst == left) {
mov(scratch, left); // Preserve left.
daddu(dst, left, right); // Left is overwritten.
xor_(scratch, dst, scratch); // Original left.
xor_(overflow_dst, dst, right);
and_(overflow_dst, overflow_dst, scratch);
- } else if (dst.is(right)) {
+ } else if (dst == right) {
mov(scratch, right); // Preserve right.
daddu(dst, left, right); // Right is overwritten.
xor_(scratch, dst, scratch); // Original right.
@@ -4762,13 +4615,13 @@ void TurboAssembler::DsubBranchOvf(Register dst, Register left,
scratch);
} else {
Register overflow_dst = t9;
- DCHECK(!dst.is(scratch));
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!scratch.is(overflow_dst));
- DCHECK(!left.is(overflow_dst));
- DCHECK(!left.is(scratch));
+ DCHECK(dst != scratch);
+ DCHECK(dst != overflow_dst);
+ DCHECK(scratch != overflow_dst);
+ DCHECK(left != overflow_dst);
+ DCHECK(left != scratch);
li(overflow_dst, right); // Load right.
- if (dst.is(left)) {
+ if (dst == left) {
mov(scratch, left); // Preserve left.
Dsubu(dst, left, overflow_dst); // Left is overwritten.
xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
@@ -4789,30 +4642,30 @@ void TurboAssembler::DsubBranchOvf(Register dst, Register left, Register right,
Label* no_overflow_label, Register scratch) {
DCHECK(overflow_label || no_overflow_label);
Register overflow_dst = t9;
- DCHECK(!dst.is(scratch));
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!scratch.is(overflow_dst));
- DCHECK(!overflow_dst.is(left));
- DCHECK(!overflow_dst.is(right));
- DCHECK(!scratch.is(left));
- DCHECK(!scratch.is(right));
+ DCHECK(dst != scratch);
+ DCHECK(dst != overflow_dst);
+ DCHECK(scratch != overflow_dst);
+ DCHECK(overflow_dst != left);
+ DCHECK(overflow_dst != right);
+ DCHECK(scratch != left);
+ DCHECK(scratch != right);
// This happens with some crankshaft code. Since Subu works fine if
// left == right, let's not make that restriction here.
- if (left.is(right)) {
+ if (left == right) {
mov(dst, zero_reg);
if (no_overflow_label) {
Branch(no_overflow_label);
}
}
- if (dst.is(left)) {
+ if (dst == left) {
mov(scratch, left); // Preserve left.
dsubu(dst, left, right); // Left is overwritten.
xor_(overflow_dst, dst, scratch); // scratch is original left.
xor_(scratch, scratch, right); // scratch is original left.
and_(overflow_dst, scratch, overflow_dst);
- } else if (dst.is(right)) {
+ } else if (dst == right) {
mov(scratch, right); // Preserve right.
dsubu(dst, left, right); // Right is overwritten.
xor_(overflow_dst, dst, left);
@@ -4850,13 +4703,13 @@ void TurboAssembler::MulBranchOvf(Register dst, Register left,
scratch);
} else {
Register overflow_dst = t9;
- DCHECK(!dst.is(scratch));
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!scratch.is(overflow_dst));
- DCHECK(!left.is(overflow_dst));
- DCHECK(!left.is(scratch));
+ DCHECK(dst != scratch);
+ DCHECK(dst != overflow_dst);
+ DCHECK(scratch != overflow_dst);
+ DCHECK(left != overflow_dst);
+ DCHECK(left != scratch);
- if (dst.is(left)) {
+ if (dst == left) {
Mul(scratch, left, static_cast<int32_t>(right.immediate()));
Mulh(overflow_dst, left, static_cast<int32_t>(right.immediate()));
mov(dst, scratch);
@@ -4877,15 +4730,15 @@ void TurboAssembler::MulBranchOvf(Register dst, Register left, Register right,
Label* no_overflow_label, Register scratch) {
DCHECK(overflow_label || no_overflow_label);
Register overflow_dst = t9;
- DCHECK(!dst.is(scratch));
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!scratch.is(overflow_dst));
- DCHECK(!overflow_dst.is(left));
- DCHECK(!overflow_dst.is(right));
- DCHECK(!scratch.is(left));
- DCHECK(!scratch.is(right));
-
- if (dst.is(left) || dst.is(right)) {
+ DCHECK(dst != scratch);
+ DCHECK(dst != overflow_dst);
+ DCHECK(scratch != overflow_dst);
+ DCHECK(overflow_dst != left);
+ DCHECK(overflow_dst != right);
+ DCHECK(scratch != left);
+ DCHECK(scratch != right);
+
+ if (dst == left || dst == right) {
Mul(scratch, left, right);
Mulh(overflow_dst, left, right);
mov(dst, scratch);
@@ -5047,21 +4900,6 @@ void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
}
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch) {
- // Load the initial map. The global functions all have initial maps.
- Ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
- Branch(&ok);
- bind(&fail);
- Abort(kGlobalFunctionsMustHaveInitialMap);
- bind(&ok);
- }
-}
-
void TurboAssembler::StubPrologue(StackFrame::Type type) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5170,7 +5008,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (save_doubles) {
// The stack is already aligned to 0 modulo 8 for stores with sdc1.
- int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
+ int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
int space = kNumOfSavedRegisters * kDoubleSize;
Dsubu(sp, sp, Operand(space));
// Remember: we only need to save every 2nd double FPU value.
@@ -5205,7 +5043,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
// Optionally restore all double registers.
if (save_doubles) {
// Remember: we only need to restore every 2nd double FPU value.
- int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
+ int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
Dsubu(t8, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
kNumOfSavedRegisters * kDoubleSize));
for (int i = 0; i < kNumOfSavedRegisters; i++) {
@@ -5301,7 +5139,7 @@ void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
void MacroAssembler::UntagAndJumpIfSmi(Register dst,
Register src,
Label* smi_case) {
- // DCHECK(!dst.is(src));
+ // DCHECK(dst!=src);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
JumpIfSmi(src, smi_case, scratch, USE_DELAY_SLOT);
@@ -5430,24 +5268,9 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
-void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
- Register first, Register second, Register scratch1, Register scratch2,
- Label* failure) {
- // Test that both first and second are sequential one-byte strings.
- // Assume that they are non-smis.
- Ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
- Ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
- Lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- Lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
-
- JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
- scratch2, failure);
-}
-
-
void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
- if (src1.is(src2)) {
+ if (src1 == src2) {
Move_s(dst, src1);
return;
}
@@ -5470,13 +5293,13 @@ void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
Branch(&return_right);
bind(&return_right);
- if (!src2.is(dst)) {
+ if (src2 != dst) {
Move_s(dst, src2);
}
Branch(&done);
bind(&return_left);
- if (!src1.is(dst)) {
+ if (src1 != dst) {
Move_s(dst, src1);
}
@@ -5491,7 +5314,7 @@ void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
- if (src1.is(src2)) {
+ if (src1 == src2) {
Move_s(dst, src1);
return;
}
@@ -5514,13 +5337,13 @@ void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
Branch(&return_left);
bind(&return_right);
- if (!src2.is(dst)) {
+ if (src2 != dst) {
Move_s(dst, src2);
}
Branch(&done);
bind(&return_left);
- if (!src1.is(dst)) {
+ if (src1 != dst) {
Move_s(dst, src1);
}
@@ -5535,7 +5358,7 @@ void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
- if (src1.is(src2)) {
+ if (src1 == src2) {
Move_d(dst, src1);
return;
}
@@ -5557,13 +5380,13 @@ void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
Branch(&return_right);
bind(&return_right);
- if (!src2.is(dst)) {
+ if (src2 != dst) {
Move_d(dst, src2);
}
Branch(&done);
bind(&return_left);
- if (!src1.is(dst)) {
+ if (src1 != dst) {
Move_d(dst, src1);
}
@@ -5578,7 +5401,7 @@ void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
- if (src1.is(src2)) {
+ if (src1 == src2) {
Move_d(dst, src1);
return;
}
@@ -5600,13 +5423,13 @@ void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
Branch(&return_left);
bind(&return_right);
- if (!src2.is(dst)) {
+ if (src2 != dst) {
Move_d(dst, src2);
}
Branch(&done);
bind(&return_left);
- if (!src1.is(dst)) {
+ if (src1 != dst) {
Move_d(dst, src1);
}
@@ -5619,20 +5442,6 @@ void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
add_d(dst, src1, src2);
}
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
- Register first, Register second, Register scratch1, Register scratch2,
- Label* failure) {
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
- andi(scratch1, first, kFlatOneByteStringMask);
- Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
- andi(scratch2, second, kFlatOneByteStringMask);
- Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
-}
-
static const int kRegisterPassedArguments = 8;
int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -5738,7 +5547,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// allow preemption, so the return address in the link register
// stays correct.
- if (!function.is(t9)) {
+ if (function != t9) {
mov(t9, function);
function = t9;
}
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 92bed703e1..1f1bb4bdb0 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -13,20 +13,20 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {Register::kCode_v0};
-const Register kReturnRegister1 = {Register::kCode_v1};
-const Register kReturnRegister2 = {Register::kCode_a0};
-const Register kJSFunctionRegister = {Register::kCode_a1};
-const Register kContextRegister = {Register::kCpRegister};
-const Register kAllocateSizeRegister = {Register::kCode_a0};
-const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
-const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t0};
-const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t1};
-const Register kInterpreterDispatchTableRegister = {Register::kCode_t2};
-const Register kJavaScriptCallArgCountRegister = {Register::kCode_a0};
-const Register kJavaScriptCallNewTargetRegister = {Register::kCode_a3};
-const Register kRuntimeCallFunctionRegister = {Register::kCode_a1};
-const Register kRuntimeCallArgCountRegister = {Register::kCode_a0};
+constexpr Register kReturnRegister0 = v0;
+constexpr Register kReturnRegister1 = v1;
+constexpr Register kReturnRegister2 = a0;
+constexpr Register kJSFunctionRegister = a1;
+constexpr Register kContextRegister = s7;
+constexpr Register kAllocateSizeRegister = a0;
+constexpr Register kInterpreterAccumulatorRegister = v0;
+constexpr Register kInterpreterBytecodeOffsetRegister = t0;
+constexpr Register kInterpreterBytecodeArrayRegister = t1;
+constexpr Register kInterpreterDispatchTableRegister = t2;
+constexpr Register kJavaScriptCallArgCountRegister = a0;
+constexpr Register kJavaScriptCallNewTargetRegister = a3;
+constexpr Register kRuntimeCallFunctionRegister = a1;
+constexpr Register kRuntimeCallArgCountRegister = a0;
// Forward declaration.
class JumpTarget;
@@ -84,10 +84,6 @@ enum LiFlags {
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum PointersToHereCheck {
- kPointersToHereMaybeInteresting,
- kPointersToHereAreAlwaysInteresting
-};
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1,
@@ -157,15 +153,7 @@ inline MemOperand CFunctionArgumentOperand(int index) {
class TurboAssembler : public Assembler {
public:
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size),
- isolate_(isolate),
- has_double_zero_reg_set_(false) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ =
- Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
- }
- }
+ CodeObjectRequired create_code_object);
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() const { return has_frame_; }
@@ -398,18 +386,36 @@ class TurboAssembler : public Assembler {
Sd(src, MemOperand(sp, 0));
}
+ void SaveRegisters(RegList registers);
+ void RestoreRegisters(RegList registers);
+
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode);
+
// Push multiple registers on the stack.
// Registers are saved in numerical order, with higher numbered registers
// saved in higher memory addresses.
void MultiPush(RegList regs);
void MultiPushFPU(RegList regs);
- void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
- void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ // Calculate how much stack space (in bytes) are required to store caller
+ // registers excluding those specified in the arguments.
+ int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg) const;
+
+ // Push caller saved registers on the stack, and return the number of bytes
+ // stack pointer is adjusted.
+ int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
+ // Restore caller saved registers from the stack, and return the number of
+ // bytes stack pointer is adjusted.
+ int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
void pop(Register dst) {
Ld(dst, MemOperand(sp, 0));
@@ -419,7 +425,7 @@ class TurboAssembler : public Assembler {
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
- DCHECK(!src1.is(src2));
+ DCHECK(src1 != src2);
Ld(src2, MemOperand(sp, 0 * kPointerSize));
Ld(src1, MemOperand(sp, 1 * kPointerSize));
Daddu(sp, sp, 2 * kPointerSize);
@@ -666,7 +672,7 @@ class TurboAssembler : public Assembler {
// handled in out-of-line code. The specific behaviour depends on supported
// instructions.
//
- // These functions assume (and assert) that !src1.is(src2). It is permitted
+ // These functions assume (and assert) that src1!=src2. It is permitted
// for the result to alias either input register.
void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
Label* out_of_line);
@@ -691,7 +697,7 @@ class TurboAssembler : public Assembler {
inline void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
inline void Move(Register dst, Register src) {
- if (!dst.is(src)) {
+ if (dst != src) {
mov(dst, src);
}
}
@@ -727,13 +733,13 @@ class TurboAssembler : public Assembler {
}
inline void Move_d(FPURegister dst, FPURegister src) {
- if (!dst.is(src)) {
+ if (dst != src) {
mov_d(dst, src);
}
}
inline void Move_s(FPURegister dst, FPURegister src) {
- if (!dst.is(src)) {
+ if (dst != src) {
mov_s(dst, src);
}
}
@@ -983,20 +989,12 @@ class MacroAssembler : public TurboAssembler {
Register value,
Register address);
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
-
-
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
+ Register addr, Register scratch,
+ SaveFPRegsMode save_fp);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but it will be clobbered.
@@ -1038,47 +1036,10 @@ class MacroAssembler : public TurboAssembler {
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- RAStatus ra_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
-
- // As above, but the offset has the tag presubtracted. For use with
- // MemOperand(reg, off).
- inline void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
- RAStatus ra_status,
- SaveFPRegsMode save_fp,
+ Register object, int offset, Register value, Register scratch,
+ RAStatus ra_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- ra_status,
- save_fp,
- remembered_set_action,
- smi_check,
- pointers_to_here_check_for_value);
- }
-
- void RecordWriteForMap(
- Register object,
- Register map,
- Register dst,
- RAStatus ra_status,
- SaveFPRegsMode save_fp);
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
@@ -1087,28 +1048,7 @@ class MacroAssembler : public TurboAssembler {
Register object, Register address, Register value, RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
-
- // ---------------------------------------------------------------------------
- // Allocation support.
-
- // Allocate an object in new space or old space. The object_size is
- // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. If the space is exhausted control continues at the gc_required
- // label. The allocated object is returned in result. If the flag
- // tag_allocated_object is true the result is tagged as as a heap object.
- // All registers are clobbered also when control continues at the gc_required
- // label.
- void Allocate(int object_size, Register result, Register scratch1,
- Register scratch2, Label* gc_required, AllocationFlags flags);
-
- // Allocate and initialize a JSValue wrapper with the specified {constructor}
- // and {value}.
- void AllocateJSValue(Register result, Register constructor, Register value,
- Register scratch1, Register scratch2,
- Label* gc_required);
+ SmiCheck smi_check = INLINE_SMI_CHECK);
void Pref(int32_t hint, const MemOperand& rs);
@@ -1174,11 +1114,6 @@ class MacroAssembler : public TurboAssembler {
// Make sure the stack is aligned. Only emits code in debug mode.
void AssertStackIsAligned();
- // Load the global object from the current context.
- void LoadGlobalObject(Register dst) {
- LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
- }
-
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
@@ -1241,47 +1176,6 @@ class MacroAssembler : public TurboAssembler {
Register map,
Register type_reg);
- void GetInstanceType(Register object_map, Register object_instance_type) {
- Lbu(object_instance_type,
- FieldMemOperand(object_map, Map::kInstanceTypeOffset));
- }
-
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
- // "branch_to" if the result of the comparison is "cond". If multiple map
- // compares are required, the compare sequences branches to early_success.
- void CompareMapAndBranch(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* early_success,
- Condition cond,
- Label* branch_to);
-
- // As above, but the map of the object is already loaded into the register
- // which is preserved by the code generated.
- void CompareMapAndBranch(Register obj_map,
- Handle<Map> map,
- Label* early_success,
- Condition cond,
- Label* branch_to);
-
- // Check if the map of an object is equal to a specified map and branch to
- // label if not. Skip the smi check if not required (object is known to be a
- // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specificed map.
- void CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type);
-
-
- void CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- SmiCheckType smi_check_type);
-
// Get value of the weak cell.
void GetWeakValue(Register value, Handle<WeakCell> cell);
@@ -1412,22 +1306,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// -------------------------------------------------------------------------
// String utilities.
- // Checks if both instance types are sequential one-byte strings and jumps to
- // label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialOneByte(
- Register first_object_instance_type, Register second_object_instance_type,
- Register scratch1, Register scratch2, Label* failure);
-
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
- // Checks if both objects are sequential one-byte strings and jumps to label
- // if either is not. Assumes that neither object is a smi.
- void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
void LoadInstanceDescriptors(Register map, Register descriptors);
void LoadAccessor(Register dst, Register holder, int accessor_index,
AccessorComponent accessor);
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index dc3d67cc1a..65ed498e5a 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -9,7 +9,7 @@
#if V8_TARGET_ARCH_MIPS64
-#include "src/assembler.h"
+#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/codegen.h"
#include "src/disasm.h"
@@ -845,6 +845,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
if (kArchVariant == kMips64r6) {
FCSR_ = kFCSRNaN2008FlagMask;
+ MSACSR_ = 0;
} else {
FCSR_ = 0;
}
@@ -1176,11 +1177,17 @@ void Simulator::set_fcsr_rounding_mode(FPURoundingMode mode) {
FCSR_ |= mode & kFPURoundingModeMask;
}
+void Simulator::set_msacsr_rounding_mode(FPURoundingMode mode) {
+ MSACSR_ |= mode & kFPURoundingModeMask;
+}
unsigned int Simulator::get_fcsr_rounding_mode() {
return FCSR_ & kFPURoundingModeMask;
}
+unsigned int Simulator::get_msacsr_rounding_mode() {
+ return MSACSR_ & kFPURoundingModeMask;
+}
// Sets the rounding error codes in FCSR based on the result of the rounding.
// Returns true if the operation was invalid.
@@ -1460,6 +1467,7 @@ void Simulator::round_according_to_fcsr(double toRound, double& rounded,
// If the number is halfway between two integers,
// round to the even one.
rounded_int--;
+ rounded -= 1.;
}
break;
case kRoundToZero:
@@ -1501,6 +1509,7 @@ void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
// If the number is halfway between two integers,
// round to the even one.
rounded_int--;
+ rounded -= 1.;
}
break;
case kRoundToZero:
@@ -1543,6 +1552,7 @@ void Simulator::round_according_to_fcsr(float toRound, float& rounded,
// If the number is halfway between two integers,
// round to the even one.
rounded_int--;
+ rounded -= 1.f;
}
break;
case kRoundToZero:
@@ -1584,6 +1594,7 @@ void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
// If the number is halfway between two integers,
// round to the even one.
rounded_int--;
+ rounded -= 1.f;
}
break;
case kRoundToZero:
@@ -1601,6 +1612,47 @@ void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
}
}
+template <typename T_fp, typename T_int>
+void Simulator::round_according_to_msacsr(T_fp toRound, T_fp& rounded,
+ T_int& rounded_int) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero. Behave like round_w_d.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or
+ // equal to the infinitely accurate result. Behave like trunc_w_d.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up. Behave like ceil_w_d.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down. Behave like floor_w_d.
+ switch (get_msacsr_rounding_mode()) {
+ case kRoundToNearest:
+ rounded = std::floor(toRound + 0.5);
+ rounded_int = static_cast<T_int>(rounded);
+ if ((rounded_int & 1) != 0 && rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ rounded_int--;
+ rounded -= 1.;
+ }
+ break;
+ case kRoundToZero:
+ rounded = trunc(toRound);
+ rounded_int = static_cast<T_int>(rounded);
+ break;
+ case kRoundToPlusInf:
+ rounded = std::ceil(toRound);
+ rounded_int = static_cast<T_int>(rounded);
+ break;
+ case kRoundToMinusInf:
+ rounded = std::floor(toRound);
+ rounded_int = static_cast<T_int>(rounded);
+ break;
+ }
+}
// Raw access to the PC register.
void Simulator::set_pc(int64_t value) {
@@ -2190,10 +2242,6 @@ typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0, int64_t arg1,
int64_t arg6, int64_t arg7,
int64_t arg8);
-typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int64_t arg0, int64_t arg1,
- int64_t arg2, int64_t arg3,
- int64_t arg4);
-
// These prototypes handle the four types of FP calls.
typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
@@ -2396,29 +2444,6 @@ void Simulator::SoftwareInterrupt() {
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
target(arg0, arg1, Redirection::ReverseRedirection(arg2));
- } else if (redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE) {
- // builtin call returning ObjectTriple.
- SimulatorRuntimeTripleCall target =
- reinterpret_cast<SimulatorRuntimeTripleCall>(external);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF(
- "Call to host triple returning runtime function %p "
- "args %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64
- ", %016" PRIx64 "\n",
- static_cast<void*>(FUNCTION_ADDR(target)), arg1, arg2, arg3, arg4,
- arg5);
- }
- // arg0 is a hidden argument pointing to the return location, so don't
- // pass it to the target function.
- ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned { %p, %p, %p }\n", static_cast<void*>(result.x),
- static_cast<void*>(result.y), static_cast<void*>(result.z));
- }
- // Return is passed back in address pointed to by hidden first argument.
- ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
- *sim_result = result;
- set_register(v0, arg0);
} else {
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
@@ -4840,93 +4865,113 @@ void Simulator::DecodeTypeMsaI10() {
void Simulator::DecodeTypeMsaELM() {
DCHECK(kArchVariant == kMips64r6);
DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
- uint32_t opcode = instr_.InstructionBits() & kMsaELMMask;
+ uint32_t opcode = instr_.InstructionBits() & kMsaLongerELMMask;
int32_t n = instr_.MsaElmNValue();
int64_t alu_out;
switch (opcode) {
- case COPY_S:
- case COPY_U: {
- msa_reg_t ws;
- switch (DecodeMsaDataFormat()) {
- case MSA_BYTE:
- DCHECK(n < kMSALanesByte);
- get_msa_register(instr_.WsValue(), ws.b);
- alu_out = static_cast<int32_t>(ws.b[n]);
- SetResult(wd_reg(), (opcode == COPY_U) ? alu_out & 0xFFu : alu_out);
- break;
- case MSA_HALF:
- DCHECK(n < kMSALanesHalf);
- get_msa_register(instr_.WsValue(), ws.h);
- alu_out = static_cast<int32_t>(ws.h[n]);
- SetResult(wd_reg(), (opcode == COPY_U) ? alu_out & 0xFFFFu : alu_out);
- break;
- case MSA_WORD:
- DCHECK(n < kMSALanesWord);
- get_msa_register(instr_.WsValue(), ws.w);
- alu_out = static_cast<int32_t>(ws.w[n]);
- SetResult(wd_reg(),
- (opcode == COPY_U) ? alu_out & 0xFFFFFFFFu : alu_out);
- break;
- case MSA_DWORD:
- DCHECK(n < kMSALanesDword);
- get_msa_register(instr_.WsValue(), ws.d);
- alu_out = static_cast<int64_t>(ws.d[n]);
- SetResult(wd_reg(), alu_out);
- break;
- default:
- UNREACHABLE();
- }
- } break;
- case INSERT: {
- msa_reg_t wd;
- switch (DecodeMsaDataFormat()) {
- case MSA_BYTE: {
- DCHECK(n < kMSALanesByte);
- int64_t rs = get_register(instr_.WsValue());
- get_msa_register(instr_.WdValue(), wd.b);
- wd.b[n] = rs & 0xFFu;
- set_msa_register(instr_.WdValue(), wd.b);
- TraceMSARegWr(wd.b);
- break;
- }
- case MSA_HALF: {
- DCHECK(n < kMSALanesHalf);
- int64_t rs = get_register(instr_.WsValue());
- get_msa_register(instr_.WdValue(), wd.h);
- wd.h[n] = rs & 0xFFFFu;
- set_msa_register(instr_.WdValue(), wd.h);
- TraceMSARegWr(wd.h);
- break;
- }
- case MSA_WORD: {
- DCHECK(n < kMSALanesWord);
- int64_t rs = get_register(instr_.WsValue());
- get_msa_register(instr_.WdValue(), wd.w);
- wd.w[n] = rs & 0xFFFFFFFFu;
- set_msa_register(instr_.WdValue(), wd.w);
- TraceMSARegWr(wd.w);
- break;
- }
- case MSA_DWORD: {
- DCHECK(n < kMSALanesDword);
- int64_t rs = get_register(instr_.WsValue());
- get_msa_register(instr_.WdValue(), wd.d);
- wd.d[n] = rs;
- set_msa_register(instr_.WdValue(), wd.d);
- TraceMSARegWr(wd.d);
+ case CTCMSA:
+ DCHECK(sa() == kMSACSRRegister);
+ MSACSR_ = bit_cast<uint32_t>(
+ static_cast<int32_t>(registers_[rd_reg()] & kMaxUInt32));
+ TraceRegWr(static_cast<int32_t>(MSACSR_));
+ break;
+ case CFCMSA:
+ DCHECK(rd_reg() == kMSACSRRegister);
+ SetResult(sa(), static_cast<int64_t>(bit_cast<int32_t>(MSACSR_)));
+ break;
+ case MOVE_V:
+ UNIMPLEMENTED();
+ break;
+ default:
+ opcode &= kMsaELMMask;
+ switch (opcode) {
+ case COPY_S:
+ case COPY_U: {
+ msa_reg_t ws;
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ DCHECK(n < kMSALanesByte);
+ get_msa_register(instr_.WsValue(), ws.b);
+ alu_out = static_cast<int32_t>(ws.b[n]);
+ SetResult(wd_reg(),
+ (opcode == COPY_U) ? alu_out & 0xFFu : alu_out);
+ break;
+ case MSA_HALF:
+ DCHECK(n < kMSALanesHalf);
+ get_msa_register(instr_.WsValue(), ws.h);
+ alu_out = static_cast<int32_t>(ws.h[n]);
+ SetResult(wd_reg(),
+ (opcode == COPY_U) ? alu_out & 0xFFFFu : alu_out);
+ break;
+ case MSA_WORD:
+ DCHECK(n < kMSALanesWord);
+ get_msa_register(instr_.WsValue(), ws.w);
+ alu_out = static_cast<int32_t>(ws.w[n]);
+ SetResult(wd_reg(),
+ (opcode == COPY_U) ? alu_out & 0xFFFFFFFFu : alu_out);
+ break;
+ case MSA_DWORD:
+ DCHECK(n < kMSALanesDword);
+ get_msa_register(instr_.WsValue(), ws.d);
+ alu_out = static_cast<int64_t>(ws.d[n]);
+ SetResult(wd_reg(), alu_out);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } break;
+ case INSERT: {
+ msa_reg_t wd;
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE: {
+ DCHECK(n < kMSALanesByte);
+ int64_t rs = get_register(instr_.WsValue());
+ get_msa_register(instr_.WdValue(), wd.b);
+ wd.b[n] = rs & 0xFFu;
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ }
+ case MSA_HALF: {
+ DCHECK(n < kMSALanesHalf);
+ int64_t rs = get_register(instr_.WsValue());
+ get_msa_register(instr_.WdValue(), wd.h);
+ wd.h[n] = rs & 0xFFFFu;
+ set_msa_register(instr_.WdValue(), wd.h);
+ TraceMSARegWr(wd.h);
+ break;
+ }
+ case MSA_WORD: {
+ DCHECK(n < kMSALanesWord);
+ int64_t rs = get_register(instr_.WsValue());
+ get_msa_register(instr_.WdValue(), wd.w);
+ wd.w[n] = rs & 0xFFFFFFFFu;
+ set_msa_register(instr_.WdValue(), wd.w);
+ TraceMSARegWr(wd.w);
+ break;
+ }
+ case MSA_DWORD: {
+ DCHECK(n < kMSALanesDword);
+ int64_t rs = get_register(instr_.WsValue());
+ get_msa_register(instr_.WdValue(), wd.d);
+ wd.d[n] = rs;
+ set_msa_register(instr_.WdValue(), wd.d);
+ TraceMSARegWr(wd.d);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } break;
+ case SLDI:
+ case SPLATI:
+ case INSVE:
+ UNIMPLEMENTED();
break;
- }
default:
UNREACHABLE();
}
- } break;
- case SLDI:
- case SPLATI:
- case INSVE:
- UNIMPLEMENTED();
break;
- default:
- UNREACHABLE();
}
}
@@ -5122,53 +5167,213 @@ void Simulator::DecodeTypeMsaMI10() {
#undef MSA_MI10_STORE
}
-void Simulator::DecodeTypeMsa3R() {
- DCHECK(kArchVariant == kMips64r6);
- DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
- uint32_t opcode = instr_.InstructionBits() & kMsa3RMask;
+template <typename T>
+T Simulator::Msa3RInstrHelper(uint32_t opcode, T wd, T ws, T wt) {
+ typedef typename std::make_unsigned<T>::type uT;
+ T res;
+ int wt_modulo = wt % (sizeof(T) * 8);
switch (opcode) {
case SLL_MSA:
+ res = static_cast<T>(ws << wt_modulo);
+ break;
case SRA_MSA:
+ res = static_cast<T>(ArithmeticShiftRight(ws, wt_modulo));
+ break;
case SRL_MSA:
+ res = static_cast<T>(static_cast<uT>(ws) >> wt_modulo);
+ break;
case BCLR:
+ res = static_cast<T>(static_cast<T>(~(1ull << wt_modulo)) & ws);
+ break;
case BSET:
+ res = static_cast<T>(static_cast<T>(1ull << wt_modulo) | ws);
+ break;
case BNEG:
- case BINSL:
- case BINSR:
+ res = static_cast<T>(static_cast<T>(1ull << wt_modulo) ^ ws);
+ break;
+ case BINSL: {
+ int elem_size = 8 * sizeof(T);
+ int bits = wt_modulo + 1;
+ if (bits == elem_size) {
+ res = static_cast<T>(ws);
+ } else {
+ uint64_t mask = ((1ull << bits) - 1) << (elem_size - bits);
+ res = static_cast<T>((static_cast<T>(mask) & ws) |
+ (static_cast<T>(~mask) & wd));
+ }
+ } break;
+ case BINSR: {
+ int elem_size = 8 * sizeof(T);
+ int bits = wt_modulo + 1;
+ if (bits == elem_size) {
+ res = static_cast<T>(ws);
+ } else {
+ uint64_t mask = (1ull << bits) - 1;
+ res = static_cast<T>((static_cast<T>(mask) & ws) |
+ (static_cast<T>(~mask) & wd));
+ }
+ } break;
case ADDV:
+ res = ws + wt;
+ break;
case SUBV:
+ res = ws - wt;
+ break;
case MAX_S:
+ res = Max(ws, wt);
+ break;
case MAX_U:
+ res = static_cast<T>(Max(static_cast<uT>(ws), static_cast<uT>(wt)));
+ break;
case MIN_S:
+ res = Min(ws, wt);
+ break;
case MIN_U:
+ res = static_cast<T>(Min(static_cast<uT>(ws), static_cast<uT>(wt)));
+ break;
case MAX_A:
+ // We use negative abs in order to avoid problems
+ // with corner case for MIN_INT
+ res = Nabs(ws) < Nabs(wt) ? ws : wt;
+ break;
case MIN_A:
+ // We use negative abs in order to avoid problems
+ // with corner case for MIN_INT
+ res = Nabs(ws) > Nabs(wt) ? ws : wt;
+ break;
case CEQ:
+ res = static_cast<T>(!Compare(ws, wt) ? -1ull : 0ull);
+ break;
case CLT_S:
+ res = static_cast<T>((Compare(ws, wt) == -1) ? -1ull : 0ull);
+ break;
case CLT_U:
+ res = static_cast<T>(
+ (Compare(static_cast<uT>(ws), static_cast<uT>(wt)) == -1) ? -1ull
+ : 0ull);
+ break;
case CLE_S:
+ res = static_cast<T>((Compare(ws, wt) != 1) ? -1ull : 0ull);
+ break;
case CLE_U:
+ res = static_cast<T>(
+ (Compare(static_cast<uT>(ws), static_cast<uT>(wt)) != 1) ? -1ull
+ : 0ull);
+ break;
case ADD_A:
- case ADDS_A:
+ res = static_cast<T>(Abs(ws) + Abs(wt));
+ break;
+ case ADDS_A: {
+ T ws_nabs = Nabs(ws);
+ T wt_nabs = Nabs(wt);
+ if (ws_nabs < -std::numeric_limits<T>::max() - wt_nabs) {
+ res = std::numeric_limits<T>::max();
+ } else {
+ res = -(ws_nabs + wt_nabs);
+ }
+ } break;
case ADDS_S:
- case ADDS_U:
+ res = SaturateAdd(ws, wt);
+ break;
+ case ADDS_U: {
+ uT ws_u = static_cast<uT>(ws);
+ uT wt_u = static_cast<uT>(wt);
+ res = static_cast<T>(SaturateAdd(ws_u, wt_u));
+ } break;
case AVE_S:
- case AVE_U:
+ res = static_cast<T>((wt & ws) + ((wt ^ ws) >> 1));
+ break;
+ case AVE_U: {
+ uT ws_u = static_cast<uT>(ws);
+ uT wt_u = static_cast<uT>(wt);
+ res = static_cast<T>((wt_u & ws_u) + ((wt_u ^ ws_u) >> 1));
+ } break;
case AVER_S:
- case AVER_U:
+ res = static_cast<T>((wt | ws) - ((wt ^ ws) >> 1));
+ break;
+ case AVER_U: {
+ uT ws_u = static_cast<uT>(ws);
+ uT wt_u = static_cast<uT>(wt);
+ res = static_cast<T>((wt_u | ws_u) - ((wt_u ^ ws_u) >> 1));
+ } break;
case SUBS_S:
- case SUBS_U:
- case SUBSUS_U:
- case SUBSUU_S:
+ res = SaturateSub(ws, wt);
+ break;
+ case SUBS_U: {
+ uT ws_u = static_cast<uT>(ws);
+ uT wt_u = static_cast<uT>(wt);
+ res = static_cast<T>(SaturateSub(ws_u, wt_u));
+ } break;
+ case SUBSUS_U: {
+ uT wsu = static_cast<uT>(ws);
+ if (wt > 0) {
+ uT wtu = static_cast<uT>(wt);
+ if (wtu > wsu) {
+ res = 0;
+ } else {
+ res = static_cast<T>(wsu - wtu);
+ }
+ } else {
+ if (wsu > std::numeric_limits<uT>::max() + wt) {
+ res = static_cast<T>(std::numeric_limits<uT>::max());
+ } else {
+ res = static_cast<T>(wsu - wt);
+ }
+ }
+ } break;
+ case SUBSUU_S: {
+ uT wsu = static_cast<uT>(ws);
+ uT wtu = static_cast<uT>(wt);
+ uT wdu;
+ if (wsu > wtu) {
+ wdu = wsu - wtu;
+ if (wdu > std::numeric_limits<T>::max()) {
+ res = std::numeric_limits<T>::max();
+ } else {
+ res = static_cast<T>(wdu);
+ }
+ } else {
+ wdu = wtu - wsu;
+ CHECK(-std::numeric_limits<T>::max() ==
+ std::numeric_limits<T>::min() + 1);
+ if (wdu <= std::numeric_limits<T>::max()) {
+ res = -static_cast<T>(wdu);
+ } else {
+ res = std::numeric_limits<T>::min();
+ }
+ }
+ } break;
case ASUB_S:
- case ASUB_U:
+ res = static_cast<T>(Abs(ws - wt));
+ break;
+ case ASUB_U: {
+ uT wsu = static_cast<uT>(ws);
+ uT wtu = static_cast<uT>(wt);
+ res = static_cast<T>(wsu > wtu ? wsu - wtu : wtu - wsu);
+ } break;
case MULV:
+ res = ws * wt;
+ break;
case MADDV:
+ res = wd + ws * wt;
+ break;
case MSUBV:
+ res = wd - ws * wt;
+ break;
case DIV_S_MSA:
+ res = wt != 0 ? ws / wt : static_cast<T>(Unpredictable);
+ break;
case DIV_U:
+ res = wt != 0 ? static_cast<T>(static_cast<uT>(ws) / static_cast<uT>(wt))
+ : static_cast<T>(Unpredictable);
+ break;
case MOD_S:
+ res = wt != 0 ? ws % wt : static_cast<T>(Unpredictable);
+ break;
case MOD_U:
+ res = wt != 0 ? static_cast<T>(static_cast<uT>(ws) % static_cast<uT>(wt))
+ : static_cast<T>(Unpredictable);
+ break;
case DOTP_S:
case DOTP_U:
case DPADD_S:
@@ -5184,8 +5389,17 @@ void Simulator::DecodeTypeMsa3R() {
case ILVEV:
case ILVOD:
case VSHF:
- case SRAR:
- case SRLR:
+ UNIMPLEMENTED();
+ break;
+ case SRAR: {
+ int bit = wt_modulo == 0 ? 0 : (ws >> (wt_modulo - 1)) & 1;
+ res = static_cast<T>(ArithmeticShiftRight(ws, wt_modulo) + bit);
+ } break;
+ case SRLR: {
+ uT wsu = static_cast<uT>(ws);
+ int bit = wt_modulo == 0 ? 0 : (wsu >> (wt_modulo - 1)) & 1;
+ res = static_cast<T>((wsu >> wt_modulo) + bit);
+ } break;
case HADD_S:
case HADD_U:
case HSUB_S:
@@ -5195,6 +5409,42 @@ void Simulator::DecodeTypeMsa3R() {
default:
UNREACHABLE();
}
+ return res;
+}
+
+void Simulator::DecodeTypeMsa3R() {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsa3RMask;
+ msa_reg_t ws, wd, wt;
+
+#define MSA_3R_DF(elem, num_of_lanes) \
+ get_msa_register(instr_.WdValue(), wd.elem); \
+ get_msa_register(instr_.WsValue(), ws.elem); \
+ get_msa_register(instr_.WtValue(), wt.elem); \
+ for (int i = 0; i < num_of_lanes; i++) { \
+ wd.elem[i] = Msa3RInstrHelper(opcode, wd.elem[i], ws.elem[i], wt.elem[i]); \
+ } \
+ set_msa_register(instr_.WdValue(), wd.elem); \
+ TraceMSARegWr(wd.elem);
+
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ MSA_3R_DF(b, kMSALanesByte);
+ break;
+ case MSA_HALF:
+ MSA_3R_DF(h, kMSALanesHalf);
+ break;
+ case MSA_WORD:
+ MSA_3R_DF(w, kMSALanesWord);
+ break;
+ case MSA_DWORD:
+ MSA_3R_DF(d, kMSALanesDword);
+ break;
+ default:
+ UNREACHABLE();
+ }
+#undef MSA_3R_DF
}
void Simulator::DecodeTypeMsa3RF() {
@@ -5433,32 +5683,327 @@ void Simulator::DecodeTypeMsa2R() {
}
}
-void Simulator::DecodeTypeMsa2RF() {
- DCHECK(kArchVariant == kMips64r6);
- DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
- uint32_t opcode = instr_.InstructionBits() & kMsa2RFMask;
+#define BIT(n) (0x1LL << n)
+#define QUIET_BIT_S(nan) (bit_cast<int32_t>(nan) & BIT(22))
+#define QUIET_BIT_D(nan) (bit_cast<int64_t>(nan) & BIT(51))
+static inline bool isSnan(float fp) { return !QUIET_BIT_S(fp); }
+static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); }
+#undef QUIET_BIT_S
+#undef QUIET_BIT_D
+
+template <typename T_int, typename T_fp, typename T_src, typename T_dst>
+T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
+ Simulator* sim) {
+ typedef typename std::make_unsigned<T_int>::type T_uint;
switch (opcode) {
- case FCLASS:
- case FTRUNC_S:
- case FTRUNC_U:
- case FSQRT:
- case FRSQRT:
- case FRCP:
- case FRINT:
- case FLOG2:
- case FEXUPL:
- case FEXUPR:
- case FFQL:
- case FFQR:
- case FTINT_S:
- case FTINT_U:
+ case FCLASS: {
+#define SNAN_BIT BIT(0)
+#define QNAN_BIT BIT(1)
+#define NEG_INFINITY_BIT BIT(2)
+#define NEG_NORMAL_BIT BIT(3)
+#define NEG_SUBNORMAL_BIT BIT(4)
+#define NEG_ZERO_BIT BIT(5)
+#define POS_INFINITY_BIT BIT(6)
+#define POS_NORMAL_BIT BIT(7)
+#define POS_SUBNORMAL_BIT BIT(8)
+#define POS_ZERO_BIT BIT(9)
+ T_fp element = *reinterpret_cast<T_fp*>(&src);
+ switch (std::fpclassify(element)) {
+ case FP_INFINITE:
+ if (std::signbit(element)) {
+ dst = NEG_INFINITY_BIT;
+ } else {
+ dst = POS_INFINITY_BIT;
+ }
+ break;
+ case FP_NAN:
+ if (isSnan(element)) {
+ dst = SNAN_BIT;
+ } else {
+ dst = QNAN_BIT;
+ }
+ break;
+ case FP_NORMAL:
+ if (std::signbit(element)) {
+ dst = NEG_NORMAL_BIT;
+ } else {
+ dst = POS_NORMAL_BIT;
+ }
+ break;
+ case FP_SUBNORMAL:
+ if (std::signbit(element)) {
+ dst = NEG_SUBNORMAL_BIT;
+ } else {
+ dst = POS_SUBNORMAL_BIT;
+ }
+ break;
+ case FP_ZERO:
+ if (std::signbit(element)) {
+ dst = NEG_ZERO_BIT;
+ } else {
+ dst = POS_ZERO_BIT;
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+#undef BIT
+#undef SNAN_BIT
+#undef QNAN_BIT
+#undef NEG_INFINITY_BIT
+#undef NEG_NORMAL_BIT
+#undef NEG_SUBNORMAL_BIT
+#undef NEG_ZERO_BIT
+#undef POS_INFINITY_BIT
+#undef POS_NORMAL_BIT
+#undef POS_SUBNORMAL_BIT
+#undef POS_ZERO_BIT
+ case FTRUNC_S: {
+ T_fp element = bit_cast<T_fp>(src);
+ const T_int max_int = std::numeric_limits<T_int>::max();
+ const T_int min_int = std::numeric_limits<T_int>::min();
+ if (std::isnan(element)) {
+ dst = 0;
+ } else if (element > max_int || element < min_int) {
+ dst = element > max_int ? max_int : min_int;
+ } else {
+ dst = static_cast<T_int>(std::trunc(element));
+ }
+ break;
+ }
+ case FTRUNC_U: {
+ T_fp element = bit_cast<T_fp>(src);
+ const T_uint max_int = std::numeric_limits<T_uint>::max();
+ if (std::isnan(element)) {
+ dst = 0;
+ } else if (element > max_int || element < 0) {
+ dst = element > max_int ? max_int : 0;
+ } else {
+ dst = static_cast<T_uint>(std::trunc(element));
+ }
+ break;
+ }
+ case FSQRT: {
+ T_fp element = bit_cast<T_fp>(src);
+ if (element < 0 || std::isnan(element)) {
+ dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ } else {
+ dst = bit_cast<T_int>(std::sqrt(element));
+ }
+ break;
+ }
+ case FRSQRT: {
+ T_fp element = bit_cast<T_fp>(src);
+ if (element < 0 || std::isnan(element)) {
+ dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ } else {
+ dst = bit_cast<T_int>(1 / std::sqrt(element));
+ }
+ break;
+ }
+ case FRCP: {
+ T_fp element = bit_cast<T_fp>(src);
+ if (std::isnan(element)) {
+ dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ } else {
+ dst = bit_cast<T_int>(1 / element);
+ }
+ break;
+ }
+ case FRINT: {
+ T_fp element = bit_cast<T_fp>(src);
+ if (std::isnan(element)) {
+ dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ } else {
+ T_int dummy;
+ sim->round_according_to_msacsr<T_fp, T_int>(element, element, dummy);
+ dst = bit_cast<T_int>(element);
+ }
+ break;
+ }
+ case FLOG2: {
+ T_fp element = bit_cast<T_fp>(src);
+ switch (std::fpclassify(element)) {
+ case FP_NORMAL:
+ case FP_SUBNORMAL:
+ dst = bit_cast<T_int>(std::logb(element));
+ break;
+ case FP_ZERO:
+ dst = bit_cast<T_int>(-std::numeric_limits<T_fp>::infinity());
+ break;
+ case FP_NAN:
+ dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ break;
+ case FP_INFINITE:
+ if (element < 0) {
+ dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ } else {
+ dst = bit_cast<T_int>(std::numeric_limits<T_fp>::infinity());
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case FTINT_S: {
+ T_fp element = bit_cast<T_fp>(src);
+ const T_int max_int = std::numeric_limits<T_int>::max();
+ const T_int min_int = std::numeric_limits<T_int>::min();
+ if (std::isnan(element)) {
+ dst = 0;
+ } else if (element < min_int || element > max_int) {
+ dst = element > max_int ? max_int : min_int;
+ } else {
+ sim->round_according_to_msacsr<T_fp, T_int>(element, element, dst);
+ }
+ break;
+ }
+ case FTINT_U: {
+ T_fp element = bit_cast<T_fp>(src);
+ const T_uint max_uint = std::numeric_limits<T_uint>::max();
+ if (std::isnan(element)) {
+ dst = 0;
+ } else if (element < 0 || element > max_uint) {
+ dst = element > max_uint ? max_uint : 0;
+ } else {
+ T_uint res;
+ sim->round_according_to_msacsr<T_fp, T_uint>(element, element, res);
+ dst = *reinterpret_cast<T_int*>(&res);
+ }
+ break;
+ }
case FFINT_S:
+ dst = bit_cast<T_int>(static_cast<T_fp>(src));
+ break;
case FFINT_U:
- UNIMPLEMENTED();
+ typedef typename std::make_unsigned<T_src>::type uT_src;
+ dst = bit_cast<T_int>(static_cast<T_fp>(bit_cast<uT_src>(src)));
break;
default:
UNREACHABLE();
}
+ return 0;
+}
+
+template <typename T_int, typename T_fp, typename T_reg, typename T_i>
+T_int Msa2RFInstrHelper2(uint32_t opcode, T_reg ws, T_i i) {
+ switch (opcode) {
+#define EXTRACT_FLOAT16_SIGN(fp16) (fp16 >> 15)
+#define EXTRACT_FLOAT16_EXP(fp16) (fp16 >> 10 & 0x1f)
+#define EXTRACT_FLOAT16_FRAC(fp16) (fp16 & 0x3ff)
+#define PACK_FLOAT32(sign, exp, frac) \
+ static_cast<uint32_t>(((sign) << 31) + ((exp) << 23) + (frac))
+#define FEXUP_DF(src_index) \
+ uint_fast16_t element = ws.uh[src_index]; \
+ uint_fast32_t aSign, aFrac; \
+ int_fast32_t aExp; \
+ aSign = EXTRACT_FLOAT16_SIGN(element); \
+ aExp = EXTRACT_FLOAT16_EXP(element); \
+ aFrac = EXTRACT_FLOAT16_FRAC(element); \
+ if (V8_LIKELY(aExp && aExp != 0x1f)) { \
+ return PACK_FLOAT32(aSign, aExp + 0x70, aFrac << 13); \
+ } else if (aExp == 0x1f) { \
+ if (aFrac) { \
+ return bit_cast<int32_t>(std::numeric_limits<float>::quiet_NaN()); \
+ } else { \
+ return bit_cast<uint32_t>(std::numeric_limits<float>::infinity()) | \
+ static_cast<uint32_t>(aSign) << 31; \
+ } \
+ } else { \
+ if (aFrac == 0) { \
+ return PACK_FLOAT32(aSign, 0, 0); \
+ } else { \
+ int_fast16_t shiftCount = \
+ base::bits::CountLeadingZeros32(static_cast<uint32_t>(aFrac)) - 21; \
+ aFrac <<= shiftCount; \
+ aExp = -shiftCount; \
+ return PACK_FLOAT32(aSign, aExp + 0x70, aFrac << 13); \
+ } \
+ }
+ case FEXUPL:
+ if (std::is_same<int32_t, T_int>::value) {
+ FEXUP_DF(i + kMSALanesWord)
+ } else {
+ return bit_cast<int64_t>(
+ static_cast<double>(bit_cast<float>(ws.w[i + kMSALanesDword])));
+ }
+ case FEXUPR:
+ if (std::is_same<int32_t, T_int>::value) {
+ FEXUP_DF(i)
+ } else {
+ return bit_cast<int64_t>(static_cast<double>(bit_cast<float>(ws.w[i])));
+ }
+ case FFQL: {
+ if (std::is_same<int32_t, T_int>::value) {
+ return bit_cast<int32_t>(static_cast<float>(ws.h[i + kMSALanesWord]) /
+ (1U << 15));
+ } else {
+ return bit_cast<int64_t>(static_cast<double>(ws.w[i + kMSALanesDword]) /
+ (1U << 31));
+ }
+ break;
+ }
+ case FFQR: {
+ if (std::is_same<int32_t, T_int>::value) {
+ return bit_cast<int32_t>(static_cast<float>(ws.h[i]) / (1U << 15));
+ } else {
+ return bit_cast<int64_t>(static_cast<double>(ws.w[i]) / (1U << 31));
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+#undef EXTRACT_FLOAT16_SIGN
+#undef EXTRACT_FLOAT16_EXP
+#undef EXTRACT_FLOAT16_FRAC
+#undef PACK_FLOAT32
+#undef FEXUP_DF
+}
+
+void Simulator::DecodeTypeMsa2RF() {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsa2RFMask;
+ msa_reg_t wd, ws;
+ get_msa_register(ws_reg(), &ws);
+ if (opcode == FEXUPL || opcode == FEXUPR || opcode == FFQL ||
+ opcode == FFQR) {
+ switch (DecodeMsaDataFormat()) {
+ case MSA_WORD:
+ for (int i = 0; i < kMSALanesWord; i++) {
+ wd.w[i] = Msa2RFInstrHelper2<int32_t, float>(opcode, ws, i);
+ }
+ break;
+ case MSA_DWORD:
+ for (int i = 0; i < kMSALanesDword; i++) {
+ wd.d[i] = Msa2RFInstrHelper2<int64_t, double>(opcode, ws, i);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ switch (DecodeMsaDataFormat()) {
+ case MSA_WORD:
+ for (int i = 0; i < kMSALanesWord; i++) {
+ Msa2RFInstrHelper<int32_t, float>(opcode, ws.w[i], wd.w[i], this);
+ }
+ break;
+ case MSA_DWORD:
+ for (int i = 0; i < kMSALanesDword; i++) {
+ Msa2RFInstrHelper<int64_t, double>(opcode, ws.d[i], wd.d[i], this);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ set_msa_register(wd_reg(), &wd);
+ TraceMSARegWr(&wd);
}
void Simulator::DecodeTypeRegister() {
@@ -5496,6 +6041,9 @@ void Simulator::DecodeTypeRegister() {
case kMsaMinor2RF:
DecodeTypeMsa2RF();
break;
+ case kMsaMinorELM:
+ DecodeTypeMsaELM();
+ break;
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index 0bb9d860be..4ef22cbcfe 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -277,8 +277,13 @@ class Simulator {
int32_t& rounded_int, float fs);
void round64_according_to_fcsr(float toRound, float& rounded,
int64_t& rounded_int, float fs);
+ template <typename T_fp, typename T_int>
+ void round_according_to_msacsr(T_fp toRound, T_fp& rounded,
+ T_int& rounded_int);
void set_fcsr_rounding_mode(FPURoundingMode mode);
+ void set_msacsr_rounding_mode(FPURoundingMode mode);
unsigned int get_fcsr_rounding_mode();
+ unsigned int get_msacsr_rounding_mode();
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int64_t value);
int64_t get_pc() const;
@@ -452,6 +457,8 @@ class Simulator {
T MsaI5InstrHelper(uint32_t opcode, T ws, int32_t i5);
template <typename T>
T MsaBitInstrHelper(uint32_t opcode, T wd, T ws, int32_t m);
+ template <typename T>
+ T Msa3RInstrHelper(uint32_t opcode, T wd, T ws, T wt);
// Executing is handled based on the instruction type.
void DecodeTypeRegister();
@@ -600,6 +607,8 @@ class Simulator {
int64_t FPUregisters_[kNumFPURegisters * 2];
// FPU control register.
uint32_t FCSR_;
+ // MSA control register.
+ uint32_t MSACSR_;
// Simulator support.
// Allocate 1MB for stack.
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index fba2ff70cc..f15659c7b3 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -108,13 +108,8 @@ class JSObject::FastBodyDescriptor final : public BodyDescriptorBase {
}
};
-// Iterates the function object according to the visiting policy.
-template <JSFunction::BodyVisitingPolicy body_visiting_policy>
-class JSFunction::BodyDescriptorImpl final : public BodyDescriptorBase {
+class JSFunction::BodyDescriptor final : public BodyDescriptorBase {
public:
- STATIC_ASSERT(kNonWeakFieldsEndOffset == kNextFunctionLinkOffset);
- STATIC_ASSERT(kNextFunctionLinkOffset + kPointerSize == kSize);
-
static bool IsValidSlot(HeapObject* obj, int offset) {
if (offset < kSize) return true;
return IsValidSlotImpl(obj, offset);
@@ -123,10 +118,7 @@ class JSFunction::BodyDescriptorImpl final : public BodyDescriptorBase {
template <typename ObjectVisitor>
static inline void IterateBody(HeapObject* obj, int object_size,
ObjectVisitor* v) {
- IteratePointers(obj, kPropertiesOrHashOffset, kNonWeakFieldsEndOffset, v);
- if (body_visiting_policy == kIgnoreWeakness) {
- IteratePointers(obj, kNextFunctionLinkOffset, kSize, v);
- }
+ IteratePointers(obj, kPropertiesOrHashOffset, kSize, v);
IterateBodyImpl(obj, kSize, object_size, v);
}
@@ -222,6 +214,19 @@ class BytecodeArray::BodyDescriptor final : public BodyDescriptorBase {
}
};
+class BigInt::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {}
+
+ static inline int SizeOf(Map* map, HeapObject* obj) {
+ return BigInt::SizeFor(BigInt::cast(obj)->length());
+ }
+};
+
class FixedDoubleArray::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
@@ -368,6 +373,9 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
v->VisitNextCodeLink(Code::cast(obj),
HeapObject::RawField(obj, kNextCodeLinkOffset));
+ // GC does not visit data/code in the header and in the body directly.
+ STATIC_ASSERT(Code::kNextCodeLinkOffset + kPointerSize == kDataStart);
+
RelocIterator it(Code::cast(obj), mode_mask);
Isolate* isolate = obj->GetIsolate();
for (; !it.done(); it.next()) {
@@ -455,7 +463,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case JS_ERROR_TYPE:
case JS_ARGUMENTS_TYPE:
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
- case JS_PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
@@ -531,6 +538,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case FILLER_TYPE:
case BYTE_ARRAY_TYPE:
case FREE_SPACE_TYPE:
+ case BIGINT_TYPE:
return ReturnType();
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 392e8f1628..e403fe9b25 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -13,6 +13,7 @@
#include "src/layout-descriptor.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/objects/bigint-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/literal-objects.h"
#include "src/objects/module.h"
@@ -72,6 +73,9 @@ void HeapObject::HeapObjectVerify() {
case MUTABLE_HEAP_NUMBER_TYPE:
HeapNumber::cast(this)->HeapNumberVerify();
break;
+ case BIGINT_TYPE:
+ BigInt::cast(this)->BigIntVerify();
+ break;
case HASH_TABLE_TYPE:
case FIXED_ARRAY_TYPE:
FixedArray::cast(this)->FixedArrayVerify();
@@ -199,9 +203,6 @@ void HeapObject::HeapObjectVerify() {
case JS_WEAK_SET_TYPE:
JSWeakSet::cast(this)->JSWeakSetVerify();
break;
- case JS_PROMISE_CAPABILITY_TYPE:
- JSPromiseCapability::cast(this)->JSPromiseCapabilityVerify();
- break;
case JS_PROMISE_TYPE:
JSPromise::cast(this)->JSPromiseVerify();
break;
@@ -370,6 +371,15 @@ void JSObject::JSObjectVerify() {
field_type));
}
}
+
+ if (map()->EnumLength() != kInvalidEnumCacheSentinel) {
+ EnumCache* enum_cache = descriptors->GetEnumCache();
+ FixedArray* keys = enum_cache->keys();
+ FixedArray* indices = enum_cache->indices();
+ CHECK_LE(map()->EnumLength(), keys->length());
+ CHECK_IMPLIES(indices != isolate->heap()->empty_fixed_array(),
+ keys->length() == indices->length());
+ }
}
// If a GC was caused while constructing this object, the elements
@@ -414,13 +424,17 @@ void Map::MapVerify() {
CHECK_IMPLIES(has_named_interceptor(), may_have_interesting_symbols());
CHECK_IMPLIES(is_dictionary_map(), may_have_interesting_symbols());
CHECK_IMPLIES(is_access_check_needed(), may_have_interesting_symbols());
+ CHECK_IMPLIES(IsJSObjectMap() && !CanHaveFastTransitionableElementsKind(),
+ IsDictionaryElementsKind(elements_kind()) ||
+ IsTerminalElementsKind(elements_kind()));
}
void Map::DictionaryMapVerify() {
MapVerify();
CHECK(is_dictionary_map());
- CHECK(instance_descriptors()->IsEmpty());
+ CHECK_EQ(kInvalidEnumCacheSentinel, EnumLength());
+ CHECK_EQ(GetHeap()->empty_descriptor_array(), instance_descriptors());
CHECK_EQ(0, unused_property_fields());
CHECK_EQ(Map::GetVisitorId(this), visitor_id());
}
@@ -435,9 +449,22 @@ void FixedArray::FixedArrayVerify() {
Object* e = get(i);
VerifyPointer(e);
}
+ Heap* heap = GetHeap();
+ if (this == heap->empty_descriptor_array()) {
+ DescriptorArray* descriptors = DescriptorArray::cast(this);
+ CHECK_EQ(2, length());
+ CHECK_EQ(0, descriptors->number_of_descriptors());
+ CHECK_EQ(heap->empty_enum_cache(), descriptors->GetEnumCache());
+ }
}
void PropertyArray::PropertyArrayVerify() {
+ if (length() == 0) {
+ CHECK_EQ(this, this->GetHeap()->empty_property_array());
+ return;
+ }
+ // There are no empty PropertyArrays.
+ CHECK_LT(0, length());
for (int i = 0; i < length(); i++) {
Object* e = get(i);
VerifyPointer(e);
@@ -688,11 +715,7 @@ void JSBoundFunction::JSBoundFunctionVerify() {
void JSFunction::JSFunctionVerify() {
CHECK(IsJSFunction());
VerifyObjectField(kPrototypeOrInitialMapOffset);
- VerifyObjectField(kNextFunctionLinkOffset);
CHECK(code()->IsCode());
- CHECK(next_function_link() == NULL ||
- next_function_link()->IsUndefined(GetIsolate()) ||
- next_function_link()->IsJSFunction());
CHECK(map()->is_callable());
}
@@ -715,7 +738,8 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
Isolate* isolate = GetIsolate();
CHECK(function_data()->IsUndefined(isolate) || IsApiFunction() ||
- HasBytecodeArray() || HasAsmWasmData());
+ HasBytecodeArray() || HasAsmWasmData() ||
+ HasLazyDeserializationBuiltinId());
CHECK(function_identifier()->IsUndefined(isolate) || HasBuiltinFunctionId() ||
HasInferredName());
@@ -832,7 +856,7 @@ void Code::CodeVerify() {
}
}
CHECK(raw_type_feedback_info() == Smi::kZero ||
- raw_type_feedback_info()->IsSmi() == IsCodeStubOrIC());
+ raw_type_feedback_info()->IsSmi() == is_stub());
}
@@ -988,9 +1012,8 @@ void JSWeakSet::JSWeakSetVerify() {
CHECK(table()->IsHashTable() || table()->IsUndefined(GetIsolate()));
}
-void JSPromiseCapability::JSPromiseCapabilityVerify() {
- CHECK(IsJSPromiseCapability());
- JSObjectVerify();
+void PromiseCapability::PromiseCapabilityVerify() {
+ CHECK(IsPromiseCapability());
VerifyPointer(promise());
VerifyPointer(resolve());
VerifyPointer(reject());
@@ -1199,6 +1222,8 @@ void AsyncGeneratorRequest::AsyncGeneratorRequestVerify() {
next()->ObjectVerify();
}
+void BigInt::BigIntVerify() { CHECK(IsBigInt()); }
+
void JSModuleNamespace::JSModuleNamespaceVerify() {
CHECK(IsJSModuleNamespace());
VerifyPointer(module());
@@ -1266,8 +1291,14 @@ void PrototypeInfo::PrototypeInfoVerify() {
void Tuple2::Tuple2Verify() {
CHECK(IsTuple2());
- VerifyObjectField(kValue1Offset);
- VerifyObjectField(kValue2Offset);
+ Heap* heap = GetHeap();
+ if (this == heap->empty_enum_cache()) {
+ CHECK_EQ(heap->empty_fixed_array(), EnumCache::cast(this)->keys());
+ CHECK_EQ(heap->empty_fixed_array(), EnumCache::cast(this)->indices());
+ } else {
+ VerifyObjectField(kValue1Offset);
+ VerifyObjectField(kValue2Offset);
+ }
}
void Tuple3::Tuple3Verify() {
@@ -1560,7 +1591,7 @@ bool TransitionArray::IsSortedNoDuplicates(int valid_entries) {
attributes = details.attributes();
} else {
// Duplicate entries are not allowed for non-property transitions.
- CHECK_NE(prev_key, key);
+ DCHECK_NE(prev_key, key);
}
int cmp = CompareKeys(prev_key, prev_hash, prev_kind, prev_attributes, key,
@@ -1632,67 +1663,10 @@ void Code::VerifyEmbeddedObjects(VerifyMode mode) {
bool skip_weak_cell = (mode == kNoContextSpecificPointers) ? false : true;
for (RelocIterator it(this, mask); !it.done(); it.next()) {
Object* target = it.rinfo()->target_object();
- CHECK(!CanLeak(target, heap, skip_weak_cell));
- }
-}
-
-
-// Verify that the debugger can redirect old code to the new code.
-void Code::VerifyRecompiledCode(Code* old_code, Code* new_code) {
- if (old_code->kind() != FUNCTION) return;
- if (new_code->kind() != FUNCTION) return;
- Isolate* isolate = old_code->GetIsolate();
- // Do not verify during bootstrapping. We may replace code using %SetCode.
- if (isolate->bootstrapper()->IsActive()) return;
-
- static const int mask = RelocInfo::kCodeTargetMask;
- RelocIterator old_it(old_code, mask);
- RelocIterator new_it(new_code, mask);
- Code* stack_check = isolate->builtins()->builtin(Builtins::kStackCheck);
-
- while (!old_it.done()) {
- RelocInfo* rinfo = old_it.rinfo();
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- CHECK(!target->is_handler() && !target->is_inline_cache_stub());
- if (target == stack_check) break;
- old_it.next();
+ DCHECK(!CanLeak(target, heap, skip_weak_cell));
}
-
- while (!new_it.done()) {
- RelocInfo* rinfo = new_it.rinfo();
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- CHECK(!target->is_handler() && !target->is_inline_cache_stub());
- if (target == stack_check) break;
- new_it.next();
- }
-
- // Either both are done because there is no stack check.
- // Or we are past the prologue for both.
- CHECK_EQ(new_it.done(), old_it.done());
-
- // After the prologue, each call in the old code has a corresponding call
- // in the new code.
- while (!old_it.done() && !new_it.done()) {
- Code* old_target =
- Code::GetCodeFromTargetAddress(old_it.rinfo()->target_address());
- Code* new_target =
- Code::GetCodeFromTargetAddress(new_it.rinfo()->target_address());
- CHECK_EQ(old_target->kind(), new_target->kind());
- // Check call target for equality unless it's an IC or an interrupt check.
- // In both cases they may be patched to be something else.
- if (!old_target->is_handler() && !old_target->is_inline_cache_stub() &&
- new_target != isolate->builtins()->builtin(Builtins::kInterruptCheck)) {
- CHECK_EQ(old_target, new_target);
- }
- old_it.next();
- new_it.next();
- }
-
- // Both are done at the same time.
- CHECK_EQ(new_it.done(), old_it.done());
}
-
#endif // DEBUG
} // namespace internal
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 82b7eb05a6..f29c4d8c49 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -14,6 +14,7 @@
#include "src/base/atomicops.h"
#include "src/base/bits.h"
+#include "src/base/tsan.h"
#include "src/builtins/builtins.h"
#include "src/contexts-inl.h"
#include "src/conversions-inl.h"
@@ -32,12 +33,14 @@
#include "src/lookup.h"
#include "src/objects.h"
#include "src/objects/arguments-inl.h"
+#include "src/objects/bigint-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/hash-table.h"
#include "src/objects/literal-objects.h"
#include "src/objects/module-inl.h"
#include "src/objects/regexp-match-info.h"
#include "src/objects/scope-info.h"
+#include "src/objects/template-objects.h"
#include "src/property.h"
#include "src/prototype.h"
#include "src/transitions-inl.h"
@@ -96,7 +99,6 @@ TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
TYPE_CHECKER(JSMap, JS_MAP_TYPE)
TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
-TYPE_CHECKER(JSPromiseCapability, JS_PROMISE_CAPABILITY_TYPE)
TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
TYPE_CHECKER(JSSet, JS_SET_TYPE)
@@ -114,6 +116,8 @@ TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
TYPE_CHECKER(SmallOrderedHashMap, SMALL_ORDERED_HASH_MAP_TYPE)
TYPE_CHECKER(SmallOrderedHashSet, SMALL_ORDERED_HASH_SET_TYPE)
TYPE_CHECKER(SourcePositionTableWithFrameCache, TUPLE2_TYPE)
+TYPE_CHECKER(TemplateMap, HASH_TABLE_TYPE)
+TYPE_CHECKER(TemplateObjectDescription, TUPLE3_TYPE)
TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
TYPE_CHECKER(TypeFeedbackInfo, TUPLE3_TYPE)
TYPE_CHECKER(WasmInstanceObject, WASM_INSTANCE_TYPE)
@@ -314,6 +318,10 @@ bool HeapObject::IsJSCollection() const { return IsJSMap() || IsJSSet(); }
bool HeapObject::IsDescriptorArray() const { return IsFixedArray(); }
+bool HeapObject::IsPropertyDescriptorObject() const { return IsFixedArray(); }
+
+bool HeapObject::IsEnumCache() const { return IsTuple2(); }
+
bool HeapObject::IsFrameArray() const { return IsFixedArray(); }
bool HeapObject::IsArrayList() const { return IsFixedArray(); }
@@ -443,8 +451,6 @@ bool HeapObject::IsNormalizedMapCache() const {
bool HeapObject::IsCompilationCacheTable() const { return IsHashTable(); }
-bool HeapObject::IsCodeCacheHashTable() const { return IsHashTable(); }
-
bool HeapObject::IsMapCache() const { return IsHashTable(); }
bool HeapObject::IsObjectHashTable() const { return IsHashTable(); }
@@ -540,6 +546,7 @@ CAST_ACCESSOR(AllocationMemento)
CAST_ACCESSOR(AllocationSite)
CAST_ACCESSOR(ArrayList)
CAST_ACCESSOR(AsyncGeneratorRequest)
+CAST_ACCESSOR(BigInt)
CAST_ACCESSOR(BoilerplateDescription)
CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(BytecodeArray)
@@ -551,6 +558,7 @@ CAST_ACCESSOR(ContextExtension)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DependentCode)
CAST_ACCESSOR(DescriptorArray)
+CAST_ACCESSOR(EnumCache)
CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(FixedArrayBase)
CAST_ACCESSOR(FixedDoubleArray)
@@ -579,7 +587,6 @@ CAST_ACCESSOR(JSMapIterator)
CAST_ACCESSOR(JSMessageObject)
CAST_ACCESSOR(JSObject)
CAST_ACCESSOR(JSPromise)
-CAST_ACCESSOR(JSPromiseCapability)
CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(JSReceiver)
CAST_ACCESSOR(JSRegExp)
@@ -601,6 +608,7 @@ CAST_ACCESSOR(ObjectTemplateInfo)
CAST_ACCESSOR(Oddball)
CAST_ACCESSOR(OrderedHashMap)
CAST_ACCESSOR(OrderedHashSet)
+CAST_ACCESSOR(PromiseCapability)
CAST_ACCESSOR(PromiseReactionJobInfo)
CAST_ACCESSOR(PromiseResolveThenableJobInfo)
CAST_ACCESSOR(PropertyArray)
@@ -619,6 +627,8 @@ CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(Struct)
CAST_ACCESSOR(TemplateInfo)
CAST_ACCESSOR(TemplateList)
+CAST_ACCESSOR(TemplateMap)
+CAST_ACCESSOR(TemplateObjectDescription)
CAST_ACCESSOR(Tuple2)
CAST_ACCESSOR(Tuple3)
CAST_ACCESSOR(TypeFeedbackInfo)
@@ -2024,23 +2034,16 @@ Object** FixedArray::RawFieldOfElementAt(int index) {
return HeapObject::RawField(this, OffsetOfElementAt(index));
}
-bool DescriptorArray::IsEmpty() {
- DCHECK(length() >= kFirstIndex ||
- this == GetHeap()->empty_descriptor_array());
- return length() < kFirstIndex;
-}
-
+ACCESSORS(EnumCache, keys, FixedArray, kKeysOffset)
+ACCESSORS(EnumCache, indices, FixedArray, kIndicesOffset)
int DescriptorArray::number_of_descriptors() {
- DCHECK(length() >= kFirstIndex || IsEmpty());
- int len = length();
- return len == 0 ? 0 : Smi::ToInt(get(kDescriptorLengthIndex));
+ return Smi::ToInt(get(kDescriptorLengthIndex));
}
int DescriptorArray::number_of_descriptors_storage() {
- int len = length();
- return len == 0 ? 0 : (len - kFirstIndex) / kEntrySize;
+ return (length() - kFirstIndex) / kEntrySize;
}
@@ -2050,8 +2053,7 @@ int DescriptorArray::NumberOfSlackDescriptors() {
void DescriptorArray::SetNumberOfDescriptors(int number_of_descriptors) {
- WRITE_FIELD(
- this, kDescriptorLengthOffset, Smi::FromInt(number_of_descriptors));
+ set(kDescriptorLengthIndex, Smi::FromInt(number_of_descriptors));
}
@@ -2059,40 +2061,14 @@ inline int DescriptorArray::number_of_entries() {
return number_of_descriptors();
}
-
-bool DescriptorArray::HasEnumCache() {
- return !IsEmpty() && !get(kEnumCacheBridgeIndex)->IsSmi();
-}
-
-
void DescriptorArray::CopyEnumCacheFrom(DescriptorArray* array) {
- set(kEnumCacheBridgeIndex, array->get(kEnumCacheBridgeIndex));
-}
-
-
-FixedArray* DescriptorArray::GetEnumCache() {
- DCHECK(HasEnumCache());
- FixedArray* bridge = FixedArray::cast(get(kEnumCacheBridgeIndex));
- return FixedArray::cast(bridge->get(kEnumCacheBridgeCacheIndex));
+ set(kEnumCacheIndex, array->get(kEnumCacheIndex));
}
-
-bool DescriptorArray::HasEnumIndicesCache() {
- if (IsEmpty()) return false;
- Object* object = get(kEnumCacheBridgeIndex);
- if (object->IsSmi()) return false;
- FixedArray* bridge = FixedArray::cast(object);
- return !bridge->get(kEnumCacheBridgeIndicesCacheIndex)->IsSmi();
-}
-
-
-FixedArray* DescriptorArray::GetEnumIndicesCache() {
- DCHECK(HasEnumIndicesCache());
- FixedArray* bridge = FixedArray::cast(get(kEnumCacheBridgeIndex));
- return FixedArray::cast(bridge->get(kEnumCacheBridgeIndicesCacheIndex));
+EnumCache* DescriptorArray::GetEnumCache() {
+ return EnumCache::cast(get(kEnumCacheIndex));
}
-
// Perform a binary search in a fixed array.
template <SearchMode search_mode, typename T>
int BinarySearch(T* array, Name* name, int valid_entries,
@@ -2243,7 +2219,6 @@ int Map::EnumLength() const { return EnumLengthBits::decode(bit_field3()); }
void Map::SetEnumLength(int length) {
if (length != kInvalidEnumCacheSentinel) {
DCHECK(length >= 0);
- DCHECK(length == 0 || instance_descriptors()->HasEnumCache());
DCHECK(length <= NumberOfOwnDescriptors());
}
set_bit_field3(EnumLengthBits::update(bit_field3(), length));
@@ -2679,33 +2654,31 @@ SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
int PropertyArray::length() const {
Object* value_obj = READ_FIELD(this, kLengthAndHashOffset);
int value = Smi::ToInt(value_obj);
- return value & kLengthMask;
+ return LengthField::decode(value);
}
void PropertyArray::initialize_length(int len) {
SLOW_DCHECK(len >= 0);
- SLOW_DCHECK(len < kMaxLength);
+ SLOW_DCHECK(len < LengthField::kMax);
WRITE_FIELD(this, kLengthAndHashOffset, Smi::FromInt(len));
}
int PropertyArray::synchronized_length() const {
Object* value_obj = ACQUIRE_READ_FIELD(this, kLengthAndHashOffset);
int value = Smi::ToInt(value_obj);
- return value & kLengthMask;
+ return LengthField::decode(value);
}
int PropertyArray::Hash() const {
Object* value_obj = READ_FIELD(this, kLengthAndHashOffset);
int value = Smi::ToInt(value_obj);
- int hash = value & kHashMask;
- return hash;
+ return HashField::decode(value);
}
-void PropertyArray::SetHash(int masked_hash) {
- DCHECK_EQ(masked_hash & JSReceiver::kHashMask, masked_hash);
+void PropertyArray::SetHash(int hash) {
Object* value_obj = READ_FIELD(this, kLengthAndHashOffset);
int value = Smi::ToInt(value_obj);
- value = (value & kLengthMask) | masked_hash;
+ value = HashField::update(value, hash);
WRITE_FIELD(this, kLengthAndHashOffset, Smi::FromInt(value));
}
@@ -3027,20 +3000,33 @@ double Float64ArrayTraits::defaultValue() {
return std::numeric_limits<double>::quiet_NaN();
}
-
template <class Traits>
typename Traits::ElementType FixedTypedArray<Traits>::get_scalar(int index) {
DCHECK((index >= 0) && (index < this->length()));
- ElementType* ptr = reinterpret_cast<ElementType*>(DataPtr());
- return ptr[index];
+ // The JavaScript memory model allows for racy reads and writes to a
+ // SharedArrayBuffer's backing store, which will always be a FixedTypedArray.
+ // ThreadSanitizer will catch these racy accesses and warn about them, so we
+ // disable TSAN for these reads and writes using annotations.
+ //
+ // We don't use relaxed atomics here, as it is not a requirement of the
+ // JavaScript memory model to have tear-free reads of overlapping accesses,
+ // and using relaxed atomics may introduce overhead.
+ auto* ptr = reinterpret_cast<ElementType*>(DataPtr());
+ TSAN_ANNOTATE_IGNORE_READS_BEGIN;
+ auto result = ptr[index];
+ TSAN_ANNOTATE_IGNORE_READS_END;
+ return result;
}
template <class Traits>
void FixedTypedArray<Traits>::set(int index, ElementType value) {
CHECK((index >= 0) && (index < this->length()));
- ElementType* ptr = reinterpret_cast<ElementType*>(DataPtr());
+ // See the comment in FixedTypedArray<Traits>::get_scalar.
+ auto* ptr = reinterpret_cast<ElementType*>(DataPtr());
+ TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
ptr[index] = value;
+ TSAN_ANNOTATE_IGNORE_WRITES_END;
}
template <class Traits>
@@ -3281,6 +3267,9 @@ int HeapObject::SizeFromMap(Map* map) const {
return FeedbackVector::SizeFor(
reinterpret_cast<const FeedbackVector*>(this)->length());
}
+ if (instance_type == BIGINT_TYPE) {
+ return BigInt::SizeFor(reinterpret_cast<const BigInt*>(this)->length());
+ }
DCHECK(instance_type == CODE_TYPE);
return reinterpret_cast<const Code*>(this)->CodeSize();
}
@@ -3498,11 +3487,6 @@ bool Map::is_dictionary_map() const {
return DictionaryMap::decode(bit_field3());
}
-Code::Flags Code::flags() const {
- return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
-}
-
-
void Map::set_owns_descriptors(bool owns_descriptors) {
set_bit_field3(OwnsDescriptors::update(bit_field3(), owns_descriptors));
}
@@ -3695,27 +3679,20 @@ void DependentCode::copy(int from, int to) {
set(kCodesStartIndex + to, get(kCodesStartIndex + from));
}
-
-void Code::set_flags(Code::Flags flags) {
- STATIC_ASSERT(Code::NUMBER_OF_KINDS <= KindField::kMax + 1);
- WRITE_INT_FIELD(this, kFlagsOffset, flags);
+Code::Kind Code::kind() const {
+ return KindField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
}
-Code::Kind Code::kind() const { return ExtractKindFromFlags(flags()); }
-
-bool Code::IsCodeStubOrIC() const {
- switch (kind()) {
- case STUB:
- case HANDLER:
-#define CASE_KIND(kind) case kind:
- IC_KIND_LIST(CASE_KIND)
-#undef CASE_KIND
- return true;
- default:
- return false;
- }
+void Code::initialize_flags(Kind kind) {
+ WRITE_UINT32_FIELD(this, kFlagsOffset, KindField::encode(kind));
}
+void Code::set_kind(Kind kind) {
+ STATIC_ASSERT(Code::NUMBER_OF_KINDS <= KindField::kMax + 1);
+ uint32_t previous = READ_UINT32_FIELD(this, kFlagsOffset);
+ uint32_t updated_value = KindField::update(previous, kind);
+ WRITE_UINT32_FIELD(this, kFlagsOffset, updated_value);
+}
// For initialization.
void Code::set_raw_kind_specific_flags1(int value) {
@@ -3729,17 +3706,23 @@ void Code::set_raw_kind_specific_flags2(int value) {
inline bool Code::is_interpreter_trampoline_builtin() const {
Builtins* builtins = GetIsolate()->builtins();
- return this == builtins->builtin(Builtins::kInterpreterEntryTrampoline) ||
- this ==
- builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance) ||
- this == builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
+ bool is_interpreter_trampoline =
+ (this == builtins->builtin(Builtins::kInterpreterEntryTrampoline) ||
+ this == builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance) ||
+ this == builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch));
+ DCHECK_IMPLIES(is_interpreter_trampoline, !Builtins::IsLazy(builtin_index()));
+ return is_interpreter_trampoline;
}
inline bool Code::checks_optimization_marker() const {
Builtins* builtins = GetIsolate()->builtins();
- return this == builtins->builtin(Builtins::kCompileLazy) ||
- this == builtins->builtin(Builtins::kInterpreterEntryTrampoline) ||
- this == builtins->builtin(Builtins::kCheckOptimizationMarker);
+ bool checks_marker =
+ (this == builtins->builtin(Builtins::kCompileLazy) ||
+ this == builtins->builtin(Builtins::kInterpreterEntryTrampoline) ||
+ this == builtins->builtin(Builtins::kCheckOptimizationMarker));
+ DCHECK_IMPLIES(checks_marker, !Builtins::IsLazy(builtin_index()));
+ return checks_marker ||
+ (kind() == OPTIMIZED_FUNCTION && marked_for_deoptimization());
}
inline bool Code::has_unwinding_info() const {
@@ -3834,43 +3817,19 @@ inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() {
return HandlerTable::UNCAUGHT;
}
-bool Code::has_reloc_info_for_serialization() const {
- DCHECK_EQ(FUNCTION, kind());
- unsigned flags = READ_UINT32_FIELD(this, kFullCodeFlags);
- return FullCodeFlagsHasRelocInfoForSerialization::decode(flags);
-}
-
-
-void Code::set_has_reloc_info_for_serialization(bool value) {
- DCHECK_EQ(FUNCTION, kind());
- unsigned flags = READ_UINT32_FIELD(this, kFullCodeFlags);
- flags = FullCodeFlagsHasRelocInfoForSerialization::update(flags, value);
- WRITE_UINT32_FIELD(this, kFullCodeFlags, flags);
-}
-
-int Code::allow_osr_at_loop_nesting_level() const {
- DCHECK_EQ(FUNCTION, kind());
- int fields = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
- return AllowOSRAtLoopNestingLevelField::decode(fields);
-}
-
-
-void Code::set_allow_osr_at_loop_nesting_level(int level) {
- DCHECK_EQ(FUNCTION, kind());
- DCHECK(level >= 0 && level <= AbstractCode::kMaxLoopNestingMarker);
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
- int updated = AllowOSRAtLoopNestingLevelField::update(previous, level);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
-}
-
int Code::builtin_index() const {
- return READ_INT_FIELD(this, kBuiltinIndexOffset);
+ int index = READ_INT_FIELD(this, kBuiltinIndexOffset);
+ DCHECK(index == -1 || Builtins::IsBuiltinId(index));
+ return index;
}
void Code::set_builtin_index(int index) {
+ DCHECK(index == -1 || Builtins::IsBuiltinId(index));
WRITE_INT_FIELD(this, kBuiltinIndexOffset, index);
}
+bool Code::is_builtin() const { return builtin_index() != -1; }
+
unsigned Code::stack_slots() const {
DCHECK(is_turbofanned());
return StackSlotsField::decode(
@@ -3902,27 +3861,6 @@ void Code::set_safepoint_table_offset(unsigned offset) {
WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
-unsigned Code::back_edge_table_offset() const {
- DCHECK_EQ(FUNCTION, kind());
- return BackEdgeTableOffsetField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags2Offset)) << kPointerSizeLog2;
-}
-
-
-void Code::set_back_edge_table_offset(unsigned offset) {
- DCHECK_EQ(FUNCTION, kind());
- DCHECK(IsAligned(offset, static_cast<unsigned>(kPointerSize)));
- offset = offset >> kPointerSizeLog2;
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
- int updated = BackEdgeTableOffsetField::update(previous, offset);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
-}
-
-bool Code::back_edges_patched_for_osr() const {
- DCHECK_EQ(FUNCTION, kind());
- return allow_osr_at_loop_nesting_level() > 0;
-}
-
bool Code::marked_for_deoptimization() const {
DCHECK(kind() == OPTIMIZED_FUNCTION);
@@ -3953,17 +3891,6 @@ void Code::set_deopt_already_counted(bool flag) {
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
-bool Code::is_inline_cache_stub() const {
- Kind kind = this->kind();
- switch (kind) {
-#define CASE(name) case name: return true;
- IC_KIND_LIST(CASE)
-#undef CASE
- default: return false;
- }
-}
-
-bool Code::is_handler() const { return kind() == HANDLER; }
bool Code::is_stub() const { return kind() == STUB; }
bool Code::is_optimized_code() const { return kind() == OPTIMIZED_FUNCTION; }
bool Code::is_wasm_code() const { return kind() == WASM_FUNCTION; }
@@ -3979,28 +3906,6 @@ Address Code::constant_pool() {
return constant_pool;
}
-Code::Flags Code::ComputeFlags(Kind kind, ExtraICState extra_ic_state) {
- // Compute the bit mask.
- unsigned int bits =
- KindField::encode(kind) | ExtraICStateField::encode(extra_ic_state);
- return static_cast<Flags>(bits);
-}
-
-Code::Flags Code::ComputeHandlerFlags(Kind handler_kind) {
- return ComputeFlags(Code::HANDLER, handler_kind);
-}
-
-
-Code::Kind Code::ExtractKindFromFlags(Flags flags) {
- return KindField::decode(flags);
-}
-
-
-ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
- return ExtraICStateField::decode(flags);
-}
-
-
Code* Code::GetCodeFromTargetAddress(Address address) {
HeapObject* code = HeapObject::FromAddress(address - Code::kHeaderSize);
// GetCodeFromTargetAddress might be called when marking objects during mark
@@ -4122,8 +4027,6 @@ bool AbstractCode::contains(byte* inner_pointer) {
AbstractCode::Kind AbstractCode::kind() {
if (IsCode()) {
- STATIC_ASSERT(AbstractCode::FUNCTION ==
- static_cast<AbstractCode::Kind>(Code::FUNCTION));
return static_cast<AbstractCode::Kind>(GetCode()->kind());
} else {
return INTERPRETED_FUNCTION;
@@ -4238,7 +4141,7 @@ void Map::AppendDescriptor(Descriptor* desc) {
// it should never try to (otherwise, layout descriptor must be updated too).
#ifdef DEBUG
PropertyDetails details = desc->GetDetails();
- CHECK(details.location() != kField || !details.representation().IsDouble());
+ DCHECK(details.location() != kField || !details.representation().IsDouble());
#endif
}
@@ -4283,7 +4186,6 @@ void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
set_constructor_or_backpointer(value, mode);
}
-ACCESSORS(Map, code_cache, FixedArray, kCodeCacheOffset)
ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(Map, weak_cell_cache, Object, kWeakCellCacheOffset)
ACCESSORS(Map, constructor_or_backpointer, Object,
@@ -4332,7 +4234,6 @@ ACCESSORS(JSBoundFunction, bound_arguments, FixedArray, kBoundArgumentsOffset)
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
ACCESSORS(JSFunction, feedback_vector_cell, Cell, kFeedbackVectorOffset)
-ACCESSORS(JSFunction, next_function_link, Object, kNextFunctionLinkOffset)
ACCESSORS(JSGlobalObject, native_context, Context, kNativeContextOffset)
ACCESSORS(JSGlobalObject, global_proxy, JSObject, kGlobalProxyOffset)
@@ -4434,6 +4335,11 @@ bool ConstantElementsPair::is_empty() const {
return constant_values()->length() == 0;
}
+SMI_ACCESSORS(TemplateObjectDescription, hash, kHashOffset)
+ACCESSORS(TemplateObjectDescription, raw_strings, FixedArray, kRawStringsOffset)
+ACCESSORS(TemplateObjectDescription, cooked_strings, FixedArray,
+ kCookedStringsOffset)
+
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
@@ -4598,13 +4504,21 @@ FeedbackVector* JSFunction::feedback_vector() const {
return FeedbackVector::cast(feedback_vector_cell()->value());
}
+// Code objects that are marked for deoptimization are not considered to be
+// optimized. This is because the JSFunction might have been already
+// deoptimized but its code() still needs to be unlinked, which will happen on
+// its next activation.
+// TODO(jupvfranco): rename this function. Maybe RunOptimizedCode,
+// or IsValidOptimizedCode.
bool JSFunction::IsOptimized() {
- return code()->kind() == Code::OPTIMIZED_FUNCTION;
+ return code()->kind() == Code::OPTIMIZED_FUNCTION &&
+ !code()->marked_for_deoptimization();
}
bool JSFunction::HasOptimizedCode() {
return IsOptimized() ||
- (has_feedback_vector() && feedback_vector()->has_optimized_code());
+ (has_feedback_vector() && feedback_vector()->has_optimized_code() &&
+ !feedback_vector()->optimized_code()->marked_for_deoptimization());
}
bool JSFunction::HasOptimizationMarker() {
@@ -4707,22 +4621,6 @@ void JSFunction::SetOptimizationMarker(OptimizationMarker marker) {
feedback_vector()->SetOptimizationMarker(marker);
}
-void JSFunction::ReplaceCode(Code* code) {
- bool was_optimized = this->code()->kind() == Code::OPTIMIZED_FUNCTION;
- bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION;
-
- set_code(code);
-
- // Add/remove the function from the list of optimized functions for this
- // context based on the state change.
- if (!was_optimized && is_optimized) {
- context()->native_context()->AddOptimizedFunction(this);
- } else if (was_optimized && !is_optimized) {
- // TODO(titzer): linear in the number of optimized functions; fix!
- context()->native_context()->RemoveOptimizedFunction(this);
- }
-}
-
bool JSFunction::has_feedback_vector() const {
return !feedback_vector_cell()->value()->IsUndefined(GetIsolate());
}
@@ -4907,7 +4805,6 @@ SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
SMI_ACCESSORS(JSMessageObject, error_level, kErrorLevelOffset)
INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
-INT_ACCESSORS(Code, prologue_offset, kPrologueOffset)
INT_ACCESSORS(Code, constant_pool_offset, kConstantPoolOffset)
#define CODE_ACCESSORS(name, type, offset) \
ACCESSORS_CHECKED2(Code, name, type, offset, true, \
@@ -4940,19 +4837,6 @@ void Code::clear_padding() {
memset(data_end, 0, CodeSize() - (data_end - address()));
}
-Object* Code::type_feedback_info() const {
- DCHECK(kind() == FUNCTION);
- return raw_type_feedback_info();
-}
-
-
-void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
- DCHECK(kind() == FUNCTION);
- set_raw_type_feedback_info(value, mode);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
- value, mode);
-}
-
ByteArray* Code::SourcePositionTable() const {
Object* maybe_table = source_position_table();
if (maybe_table->IsByteArray()) return ByteArray::cast(maybe_table);
@@ -4962,14 +4846,14 @@ ByteArray* Code::SourcePositionTable() const {
}
uint32_t Code::stub_key() const {
- DCHECK(IsCodeStubOrIC());
+ DCHECK(is_stub());
Smi* smi_key = Smi::cast(raw_type_feedback_info());
return static_cast<uint32_t>(smi_key->value());
}
void Code::set_stub_key(uint32_t key) {
- DCHECK(IsCodeStubOrIC());
+ DCHECK(is_stub());
set_raw_type_feedback_info(Smi::FromInt(key));
}
@@ -5022,9 +4906,6 @@ int Code::SizeIncludingMetadata() const {
size += relocation_info()->Size();
size += deoptimization_data()->Size();
size += handler_table()->Size();
- if (kind() == FUNCTION) {
- size += SourcePositionTable()->Size();
- }
return size;
}
@@ -5246,9 +5127,9 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset)
#endif
-ACCESSORS(JSPromiseCapability, promise, Object, kPromiseOffset)
-ACCESSORS(JSPromiseCapability, resolve, Object, kResolveOffset)
-ACCESSORS(JSPromiseCapability, reject, Object, kRejectOffset)
+ACCESSORS(PromiseCapability, promise, Object, kPromiseOffset)
+ACCESSORS(PromiseCapability, resolve, Object, kResolveOffset)
+ACCESSORS(PromiseCapability, reject, Object, kRejectOffset)
ACCESSORS(JSPromise, result, Object, kResultOffset)
ACCESSORS(JSPromise, deferred_promise, Object, kDeferredPromiseOffset)
@@ -5840,6 +5721,10 @@ void GlobalDictionary::SetEntry(int entry, Object* key, Object* value,
DetailsAtPut(entry, details);
}
+void GlobalDictionary::ValueAtPut(int entry, Object* value) {
+ set(EntryToIndex(entry), value);
+}
+
bool NumberDictionaryShape::IsMatch(uint32_t key, Object* other) {
DCHECK(other->IsNumber());
return key == static_cast<uint32_t>(other->Number());
@@ -5981,15 +5866,6 @@ Handle<Object> WeakHashTableShape<entrysize>::AsHandle(Isolate* isolate,
}
-void Map::ClearCodeCache(Heap* heap) {
- // No write barrier is needed since empty_fixed_array is not in new space.
- // Please note this function is used during marking:
- // - MarkCompactCollector::MarkUnmarkedObject
- // - IncrementalMarking::Step
- WRITE_FIELD(this, kCodeCacheOffset, heap->empty_fixed_array());
-}
-
-
int Map::SlackForArraySize(int old_size, int size_limit) {
const int max_slack = size_limit - old_size;
CHECK_LE(0, max_slack);
@@ -6210,6 +6086,24 @@ ACCESSORS(JSAsyncFromSyncIterator, sync_iterator, JSReceiver,
ACCESSORS(JSStringIterator, string, String, kStringOffset)
SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
+bool ScopeInfo::IsAsmModule() { return AsmModuleField::decode(Flags()); }
+
+bool ScopeInfo::HasSimpleParameters() {
+ return HasSimpleParametersField::decode(Flags());
+}
+
+#define FIELD_ACCESSORS(name) \
+ void ScopeInfo::Set##name(int value) { set(k##name, Smi::FromInt(value)); } \
+ int ScopeInfo::name() { \
+ if (length() > 0) { \
+ return Smi::ToInt(get(k##name)); \
+ } else { \
+ return 0; \
+ } \
+ }
+FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(FIELD_ACCESSORS)
+#undef FIELD_ACCESSORS
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 190117b020..33928a5aa7 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -77,6 +77,10 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
HeapNumber::cast(this)->HeapNumberPrint(os);
os << ">\n";
break;
+ case BIGINT_TYPE:
+ BigInt::cast(this)->BigIntPrint(os);
+ os << "\n";
+ break;
case FIXED_DOUBLE_ARRAY_TYPE:
FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(os);
break;
@@ -128,7 +132,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_ARGUMENTS_TYPE:
case JS_ERROR_TYPE:
- case JS_PROMISE_CAPABILITY_TYPE:
case WASM_INSTANCE_TYPE: // TODO(titzer): debug printing for wasm objects
case WASM_MEMORY_TYPE:
case WASM_MODULE_TYPE:
@@ -353,7 +356,8 @@ void DoPrintElements(std::ostream& os, Object* object) { // NOLINT
}
}
-void PrintFixedArrayElements(std::ostream& os, FixedArray* array) {
+template <typename T>
+void PrintFixedArrayElements(std::ostream& os, T* array) {
// Print in array notation for non-sparse arrays.
Object* previous_value = array->length() > 0 ? array->get(0) : nullptr;
Object* value = nullptr;
@@ -391,14 +395,14 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
SloppyArgumentsElements* elements) {
Isolate* isolate = elements->GetIsolate();
FixedArray* arguments_store = elements->arguments();
- os << "\n 0: context= " << Brief(elements->context())
- << "\n 1: arguments_store= " << Brief(arguments_store)
+ os << "\n 0: context = " << Brief(elements->context())
+ << "\n 1: arguments_store = " << Brief(arguments_store)
<< "\n parameter to context slot map:";
for (uint32_t i = 0; i < elements->parameter_map_length(); i++) {
uint32_t raw_index = i + SloppyArgumentsElements::kParameterMapStart;
Object* mapped_entry = elements->get_mapped_entry(i);
os << "\n " << raw_index << ": param(" << i
- << ")= " << Brief(mapped_entry);
+ << ") = " << Brief(mapped_entry);
if (mapped_entry->IsTheHole(isolate)) {
os << " in the arguments_store[" << i << "]";
} else {
@@ -415,7 +419,6 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
DCHECK_EQ(kind, SLOW_SLOPPY_ARGUMENTS_ELEMENTS);
PrintDictionaryElements(os, arguments_store);
}
- os << "\n }";
}
} // namespace
@@ -423,7 +426,7 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
void JSObject::PrintElements(std::ostream& os) { // NOLINT
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
- os << " - elements= " << Brief(elements()) << " {";
+ os << " - elements = " << Brief(elements()) << " {";
if (elements()->length() == 0) {
os << " }\n";
return;
@@ -492,7 +495,12 @@ static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
static void JSObjectPrintBody(std::ostream& os, JSObject* obj, // NOLINT
bool print_elements = true) {
- os << "\n - properties = " << Brief(obj->raw_properties_or_hash()) << " {";
+ os << "\n - properties = ";
+ Object* properties_or_hash = obj->raw_properties_or_hash();
+ if (!properties_or_hash->IsSmi()) {
+ os << Brief(properties_or_hash);
+ }
+ os << " {";
if (obj->PrintProperties(os)) os << "\n ";
os << "}\n";
if (print_elements && obj->elements()->length() > 0) {
@@ -606,7 +614,6 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
}
os << "\n - prototype: " << Brief(prototype());
os << "\n - constructor: " << Brief(GetConstructor());
- os << "\n - code cache: " << Brief(code_cache());
os << "\n - dependent code: " << Brief(dependent_code());
os << "\n - construction counter: " << construction_counter();
os << "\n";
@@ -628,16 +635,12 @@ void FixedArray::FixedArrayPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-// TODO(gsathya): Templatize PrintFixedArrayElements to print this as
-// well.
void PropertyArray::PropertyArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "PropertyArray");
os << "\n - map = " << Brief(map());
os << "\n - length: " << length();
- for (int i = 0; i < length(); i++) {
- os << "\n" << i << " : " << std::setw(8) << Brief(get(i));
- }
-
+ os << "\n - hash: " << Hash();
+ PrintFixedArrayElements(os, this);
os << "\n";
}
@@ -788,6 +791,11 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
os << Code::ICState2String(nexus.StateFromFeedback());
break;
}
+ case FeedbackSlotKind::kForIn: {
+ ForInICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
StoreDataPropertyInLiteralICNexus nexus(this, slot);
os << Code::ICState2String(nexus.StateFromFeedback());
@@ -795,7 +803,6 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
}
case FeedbackSlotKind::kCreateClosure:
case FeedbackSlotKind::kLiteral:
- case FeedbackSlotKind::kGeneral:
case FeedbackSlotKind::kTypeProfile:
break;
case FeedbackSlotKind::kInvalid:
@@ -1262,6 +1269,13 @@ void AccessorInfo::AccessorInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
+void PromiseCapability::PromiseCapabilityPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "PromiseCapability");
+ os << "\n - promise: " << Brief(promise());
+ os << "\n - resolve: " << Brief(resolve());
+ os << "\n - reject: " << Brief(reject());
+ os << "\n";
+}
void PromiseResolveThenableJobInfo::PromiseResolveThenableJobInfoPrint(
std::ostream& os) { // NOLINT
@@ -1729,11 +1743,8 @@ void TransitionsAccessor::PrintTransitions(std::ostream& os) { // NOLINT
case kWeakCell:
cell = GetTargetCell<kWeakCell>();
break;
- case kTuple3Handler:
- cell = GetTargetCell<kTuple3Handler>();
- break;
- case kFixedArrayHandler:
- cell = GetTargetCell<kFixedArrayHandler>();
+ case kHandler:
+ cell = GetTargetCell<kHandler>();
break;
case kFullTransitionArray:
return transitions()->Print(os);
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 28c1cd681f..b61d1eca32 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -8,6 +8,7 @@
#include <iomanip>
#include <memory>
#include <sstream>
+#include <vector>
#include "src/objects-inl.h"
@@ -47,14 +48,12 @@
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/keys.h"
-#include "src/list.h"
#include "src/log.h"
#include "src/lookup.h"
#include "src/macro-assembler.h"
#include "src/map-updater.h"
#include "src/messages.h"
#include "src/objects-body-descriptors-inl.h"
-#include "src/objects/code-cache-inl.h"
#include "src/objects/compilation-cache-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/frame-array-inl.h"
@@ -70,8 +69,8 @@
#include "src/string-builder.h"
#include "src/string-search.h"
#include "src/string-stream.h"
+#include "src/unicode-cache-inl.h"
#include "src/utils-inl.h"
-#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
#include "src/zone/zone.h"
@@ -255,6 +254,9 @@ MaybeHandle<String> Object::ConvertToString(Isolate* isolate,
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kSymbolToString),
String);
}
+ if (input->IsBigInt()) {
+ return BigInt::ToString(Handle<BigInt>::cast(input));
+ }
ASSIGN_RETURN_ON_EXCEPTION(
isolate, input, JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
ToPrimitiveHint::kString),
@@ -872,26 +874,40 @@ MaybeHandle<Object> Object::GetMethod(Handle<JSReceiver> receiver,
}
namespace {
+
MaybeHandle<FixedArray> CreateListFromArrayLikeFastPath(
Isolate* isolate, Handle<Object> object, ElementTypes element_types) {
- if (element_types != ElementTypes::kAll || !object->IsJSArray()) {
- return MaybeHandle<FixedArray>();
- }
- Handle<JSArray> array = Handle<JSArray>::cast(object);
- uint32_t length;
- if (!array->HasArrayPrototype(isolate) ||
- !array->length()->ToUint32(&length) || !array->HasFastElements() ||
- !JSObject::PrototypeHasNoElements(isolate, *array)) {
- return MaybeHandle<FixedArray>();
+ if (element_types == ElementTypes::kAll) {
+ if (object->IsJSArray()) {
+ Handle<JSArray> array = Handle<JSArray>::cast(object);
+ uint32_t length;
+ if (!array->HasArrayPrototype(isolate) ||
+ !array->length()->ToUint32(&length) || !array->HasFastElements() ||
+ !JSObject::PrototypeHasNoElements(isolate, *array)) {
+ return MaybeHandle<FixedArray>();
+ }
+ return array->GetElementsAccessor()->CreateListFromArrayLike(
+ isolate, array, length);
+ } else if (object->IsJSTypedArray()) {
+ Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(object);
+ uint32_t length = array->length_value();
+ if (array->WasNeutered() ||
+ length > static_cast<uint32_t>(FixedArray::kMaxLength)) {
+ return MaybeHandle<FixedArray>();
+ }
+ return array->GetElementsAccessor()->CreateListFromArrayLike(
+ isolate, array, length);
+ }
}
- return array->GetElementsAccessor()->CreateListFromArray(isolate, array);
+ return MaybeHandle<FixedArray>();
}
+
} // namespace
// static
MaybeHandle<FixedArray> Object::CreateListFromArrayLike(
Isolate* isolate, Handle<Object> object, ElementTypes element_types) {
- // Fast-path for JS_ARRAY_TYPE.
+ // Fast-path for JSArray and JSTypedArray.
MaybeHandle<FixedArray> fast_result =
CreateListFromArrayLikeFastPath(isolate, object, element_types);
if (!fast_result.is_null()) return fast_result;
@@ -1106,7 +1122,7 @@ MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
Execution::Call(isolate, trap, handler, arraysize(args), args), Object);
MaybeHandle<Object> result =
- JSProxy::CheckGetTrapResult(isolate, name, target, trap_result);
+ JSProxy::CheckGetSetTrapResult(isolate, name, target, trap_result, kGet);
if (result.is_null()) {
return result;
}
@@ -1116,10 +1132,11 @@ MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
}
// static
-MaybeHandle<Object> JSProxy::CheckGetTrapResult(Isolate* isolate,
- Handle<Name> name,
- Handle<JSReceiver> target,
- Handle<Object> trap_result) {
+MaybeHandle<Object> JSProxy::CheckGetSetTrapResult(Isolate* isolate,
+ Handle<Name> name,
+ Handle<JSReceiver> target,
+ Handle<Object> trap_result,
+ AccessKind access_kind) {
// 9. Let targetDesc be ? target.[[GetOwnProperty]](P).
PropertyDescriptor target_desc;
Maybe<bool> target_found =
@@ -1136,24 +1153,43 @@ MaybeHandle<Object> JSProxy::CheckGetTrapResult(Isolate* isolate,
!target_desc.writable() &&
!trap_result->SameValue(*target_desc.value());
if (inconsistent) {
- THROW_NEW_ERROR(
- isolate, NewTypeError(MessageTemplate::kProxyGetNonConfigurableData,
- name, target_desc.value(), trap_result),
- Object);
+ if (access_kind == kGet) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kProxyGetNonConfigurableData, name,
+ target_desc.value(), trap_result),
+ Object);
+ } else {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxySetFrozenData, name));
+ return MaybeHandle<Object>();
+ }
}
// 10.b. If IsAccessorDescriptor(targetDesc) and targetDesc.[[Configurable]]
// is false and targetDesc.[[Get]] is undefined, then
// 10.b.i. If trapResult is not undefined, throw a TypeError exception.
- inconsistent = PropertyDescriptor::IsAccessorDescriptor(&target_desc) &&
- !target_desc.configurable() &&
- target_desc.get()->IsUndefined(isolate) &&
- !trap_result->IsUndefined(isolate);
+ if (access_kind == kGet) {
+ inconsistent = PropertyDescriptor::IsAccessorDescriptor(&target_desc) &&
+ !target_desc.configurable() &&
+ target_desc.get()->IsUndefined(isolate) &&
+ !trap_result->IsUndefined(isolate);
+ } else {
+ inconsistent = PropertyDescriptor::IsAccessorDescriptor(&target_desc) &&
+ !target_desc.configurable() &&
+ target_desc.set()->IsUndefined(isolate);
+ }
if (inconsistent) {
- THROW_NEW_ERROR(
- isolate,
- NewTypeError(MessageTemplate::kProxyGetNonConfigurableAccessor, name,
- trap_result),
- Object);
+ if (access_kind == kGet) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kProxyGetNonConfigurableAccessor,
+ name, trap_result),
+ Object);
+ } else {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxySetFrozenAccessor, name));
+ return MaybeHandle<Object>();
+ }
}
}
return isolate->factory()->undefined_value();
@@ -1374,8 +1410,6 @@ int JSObject::GetHeaderSize(InstanceType type) {
return JSWeakMap::kSize;
case JS_WEAK_SET_TYPE:
return JSWeakSet::kSize;
- case JS_PROMISE_CAPABILITY_TYPE:
- return JSPromiseCapability::kSize;
case JS_PROMISE_TYPE:
return JSPromise::kSize;
case JS_REGEXP_TYPE:
@@ -2274,8 +2308,6 @@ Map* Object::GetPrototypeChainRootMap(Isolate* isolate) const {
return native_context->number_function()->initial_map();
}
- // The object is either a number, a string, a symbol, a boolean, a real JS
- // object, or a Harmony proxy.
const HeapObject* heap_object = HeapObject::cast(this);
return heap_object->map()->GetPrototypeChainRootMap(isolate);
}
@@ -2297,12 +2329,11 @@ Map* Map::GetPrototypeChainRootMap(Isolate* isolate) const {
namespace {
-// Returns a non-SMI for JSObjects, but returns the hash code for simple
+// Returns a non-SMI for JSReceivers, but returns the hash code for simple
// objects. This avoids a double lookup in the cases where we know we will
-// add the hash to the JSObject if it does not already exist.
+// add the hash to the JSReceiver if it does not already exist.
Object* GetSimpleHash(Object* object) {
- // The object is either a Smi, a HeapNumber, a name, an odd-ball, a real JS
- // object, or a Harmony proxy.
+ DisallowHeapAllocation no_gc;
if (object->IsSmi()) {
uint32_t hash = ComputeIntegerHash(Smi::ToInt(object));
return Smi::FromInt(hash & Smi::kMaxValue);
@@ -2325,18 +2356,21 @@ Object* GetSimpleHash(Object* object) {
uint32_t hash = Oddball::cast(object)->to_string()->Hash();
return Smi::FromInt(hash);
}
+ if (object->IsBigInt()) {
+ uint32_t hash = BigInt::cast(object)->Hash();
+ return Smi::FromInt(hash & Smi::kMaxValue);
+ }
DCHECK(object->IsJSReceiver());
- // Simply return the receiver as it is guaranteed to not be a SMI.
return object;
}
} // namespace
Object* Object::GetHash() {
+ DisallowHeapAllocation no_gc;
Object* hash = GetSimpleHash(this);
if (hash->IsSmi()) return hash;
- DisallowHeapAllocation no_gc;
DCHECK(IsJSReceiver());
JSReceiver* receiver = JSReceiver::cast(this);
Isolate* isolate = receiver->GetIsolate();
@@ -2345,10 +2379,12 @@ Object* Object::GetHash() {
// static
Smi* Object::GetOrCreateHash(Isolate* isolate, Object* key) {
+ DisallowHeapAllocation no_gc;
return key->GetOrCreateHash(isolate);
}
Smi* Object::GetOrCreateHash(Isolate* isolate) {
+ DisallowHeapAllocation no_gc;
Object* hash = GetSimpleHash(this);
if (hash->IsSmi()) return Smi::cast(hash);
@@ -2375,6 +2411,9 @@ bool Object::SameValue(Object* other) {
if (IsString() && other->IsString()) {
return String::cast(this)->Equals(String::cast(other));
}
+ if (IsBigInt() && other->IsBigInt()) {
+ return BigInt::Equal(BigInt::cast(this), BigInt::cast(other));
+ }
return false;
}
@@ -3110,7 +3149,6 @@ VisitorId Map::GetVisitorId(Map* map) {
ARRAY_ITERATOR_TYPE_LIST(ARRAY_ITERATOR_CASE)
#undef ARRAY_ITERATOR_CASE
- case JS_PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE:
case WASM_INSTANCE_TYPE:
case WASM_MEMORY_TYPE:
@@ -3131,6 +3169,9 @@ VisitorId Map::GetVisitorId(Map* map) {
case MUTABLE_HEAP_NUMBER_TYPE:
return kVisitDataObject;
+ case BIGINT_TYPE:
+ return kVisitBigInt;
+
case FIXED_UINT8_ARRAY_TYPE:
case FIXED_INT8_ARRAY_TYPE:
case FIXED_UINT16_ARRAY_TYPE:
@@ -3383,6 +3424,12 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << '>';
break;
}
+ case BIGINT_TYPE: {
+ os << "<BigInt ";
+ BigInt::cast(this)->BigIntShortPrint(os);
+ os << ">";
+ break;
+ }
case JS_PROXY_TYPE:
os << "<JSProxy>";
break;
@@ -3617,18 +3664,12 @@ MaybeHandle<Map> Map::CopyWithField(Handle<Map> map, Handle<Name> name,
int index = map->NextFreePropertyIndex();
if (map->instance_type() == JS_CONTEXT_EXTENSION_OBJECT_TYPE) {
+ constness = kMutable;
representation = Representation::Tagged();
type = FieldType::Any(isolate);
- } else if (IsTransitionableFastElementsKind(map->elements_kind()) &&
- IsInplaceGeneralizableField(constness, representation, *type)) {
- // We don't support propagation of field generalization through elements
- // kind transitions because they are inserted into the transition tree
- // before field transitions. In order to avoid complexity of handling
- // such a case we ensure that all maps with transitionable elements kinds
- // do not have fields that can be generalized in-place (without creation
- // of a new map).
- DCHECK(representation.IsHeapObject());
- type = FieldType::Any(isolate);
+ } else {
+ Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
+ isolate, map->instance_type(), &constness, &representation, &type);
}
Handle<Object> wrapped_type(WrapFieldType(type));
@@ -4004,9 +4045,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
}
}
- if (external > 0) {
- object->SetProperties(*array);
- }
+ object->SetProperties(*array);
// Create filler object past the new instance size.
int new_instance_size = new_map->instance_size();
@@ -4210,6 +4249,10 @@ int Map::NumberOfFields() const {
return result;
}
+bool Map::HasOutOfObjectProperties() const {
+ return GetInObjectProperties() < NumberOfFields();
+}
+
void DescriptorArray::GeneralizeAllFields() {
int length = number_of_descriptors();
for (int i = 0; i < length; i++) {
@@ -5045,9 +5088,7 @@ void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
// on a cache always being available once it is set. If the map has more
// enumerated descriptors than available in the original cache, the cache
// will be lazily replaced by the extended cache when needed.
- if (descriptors->HasEnumCache()) {
- new_descriptors->CopyEnumCacheFrom(*descriptors);
- }
+ new_descriptors->CopyEnumCacheFrom(*descriptors);
Isolate* isolate = map->GetIsolate();
// Replace descriptors by new_descriptors in all maps that share it. The old
@@ -5532,29 +5573,11 @@ Maybe<bool> JSProxy::SetProperty(Handle<JSProxy> proxy, Handle<Name> name,
trap_name, name));
}
- // Enforce the invariant.
- PropertyDescriptor target_desc;
- Maybe<bool> owned =
- JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
- MAYBE_RETURN(owned, Nothing<bool>());
- if (owned.FromJust()) {
- bool inconsistent = PropertyDescriptor::IsDataDescriptor(&target_desc) &&
- !target_desc.configurable() &&
- !target_desc.writable() &&
- !value->SameValue(*target_desc.value());
- if (inconsistent) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kProxySetFrozenData, name));
- return Nothing<bool>();
- }
- inconsistent = PropertyDescriptor::IsAccessorDescriptor(&target_desc) &&
- !target_desc.configurable() &&
- target_desc.set()->IsUndefined(isolate);
- if (inconsistent) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kProxySetFrozenAccessor, name));
- return Nothing<bool>();
- }
+ MaybeHandle<Object> result =
+ JSProxy::CheckGetSetTrapResult(isolate, name, target, value, kSet);
+
+ if (result.is_null()) {
+ return Nothing<bool>();
}
return Just(true);
}
@@ -5662,13 +5685,53 @@ MaybeHandle<Context> JSBoundFunction::GetFunctionRealm(
MaybeHandle<String> JSBoundFunction::GetName(Isolate* isolate,
Handle<JSBoundFunction> function) {
Handle<String> prefix = isolate->factory()->bound__string();
- if (!function->bound_target_function()->IsJSFunction()) return prefix;
+ Handle<String> target_name = prefix;
+ Factory* factory = isolate->factory();
+ // Concatenate the "bound " up to the last non-bound target.
+ while (function->bound_target_function()->IsJSBoundFunction()) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, target_name,
+ factory->NewConsString(prefix, target_name),
+ String);
+ function = handle(JSBoundFunction::cast(function->bound_target_function()),
+ isolate);
+ }
+ if (function->bound_target_function()->IsJSFunction()) {
+ Handle<JSFunction> target(
+ JSFunction::cast(function->bound_target_function()), isolate);
+ Handle<Object> name = JSFunction::GetName(isolate, target);
+ if (!name->IsString()) return target_name;
+ return factory->NewConsString(target_name, Handle<String>::cast(name));
+ }
+ // This will omit the proper target name for bound JSProxies.
+ return target_name;
+}
+
+// static
+Maybe<int> JSBoundFunction::GetLength(Isolate* isolate,
+ Handle<JSBoundFunction> function) {
+ int nof_bound_arguments = function->bound_arguments()->length();
+ while (function->bound_target_function()->IsJSBoundFunction()) {
+ function = handle(JSBoundFunction::cast(function->bound_target_function()),
+ isolate);
+ // Make sure we never overflow {nof_bound_arguments}, the number of
+ // arguments of a function is strictly limited by the max length of an
+ // JSAarray, Smi::kMaxValue is thus a reasonably good overestimate.
+ int length = function->bound_arguments()->length();
+ if (V8_LIKELY(Smi::kMaxValue - nof_bound_arguments > length)) {
+ nof_bound_arguments += length;
+ } else {
+ nof_bound_arguments = Smi::kMaxValue;
+ }
+ }
+ // All non JSFunction targets get a direct property and don't use this
+ // accessor.
Handle<JSFunction> target(JSFunction::cast(function->bound_target_function()),
isolate);
- Handle<Object> target_name = JSFunction::GetName(isolate, target);
- if (!target_name->IsString()) return prefix;
- Factory* factory = isolate->factory();
- return factory->NewConsString(prefix, Handle<String>::cast(target_name));
+ Maybe<int> target_length = JSFunction::GetLength(isolate, target);
+ if (target_length.IsNothing()) return target_length;
+
+ int length = Max(0, target_length.FromJust() - nof_bound_arguments);
+ return Just(length);
}
// static
@@ -5681,8 +5744,8 @@ Handle<Object> JSFunction::GetName(Isolate* isolate,
}
// static
-MaybeHandle<Smi> JSFunction::GetLength(Isolate* isolate,
- Handle<JSFunction> function) {
+Maybe<int> JSFunction::GetLength(Isolate* isolate,
+ Handle<JSFunction> function) {
int length = 0;
if (function->shared()->is_compiled()) {
length = function->shared()->GetLength();
@@ -5692,10 +5755,10 @@ MaybeHandle<Smi> JSFunction::GetLength(Isolate* isolate,
if (Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
length = function->shared()->GetLength();
}
- if (isolate->has_pending_exception()) return MaybeHandle<Smi>();
+ if (isolate->has_pending_exception()) return Nothing<int>();
}
DCHECK_GE(length, 0);
- return handle(Smi::FromInt(length), isolate);
+ return Just(length);
}
// static
@@ -5759,7 +5822,51 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
}
map = Map::ReconfigureElementsKind(map, to_kind);
}
- JSObject::MigrateToMap(object, map);
+ int number_of_fields = map->NumberOfFields();
+ int inobject = map->GetInObjectProperties();
+ int unused = map->unused_property_fields();
+ int total_size = number_of_fields + unused;
+ int external = total_size - inobject;
+ // Allocate mutable double boxes if necessary. It is always necessary if we
+ // have external properties, but is also necessary if we only have inobject
+ // properties but don't unbox double fields.
+ if (!FLAG_unbox_double_fields || external > 0) {
+ Isolate* isolate = object->GetIsolate();
+
+ Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ Handle<FixedArray> storage;
+ if (!FLAG_unbox_double_fields) {
+ storage = isolate->factory()->NewFixedArray(inobject);
+ }
+
+ Handle<PropertyArray> array =
+ isolate->factory()->NewPropertyArray(external);
+
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ Representation representation = details.representation();
+ if (!representation.IsDouble()) continue;
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ if (map->IsUnboxedDoubleField(index)) continue;
+ Handle<HeapNumber> box = isolate->factory()->NewMutableHeapNumber();
+ if (index.is_inobject()) {
+ storage->set(index.property_index(), *box);
+ } else {
+ array->set(index.outobject_array_index(), *box);
+ }
+ }
+
+ object->SetProperties(*array);
+
+ if (!FLAG_unbox_double_fields) {
+ for (int i = 0; i < inobject; i++) {
+ FieldIndex index = FieldIndex::ForPropertyIndex(*map, i);
+ Object* value = storage->get(i);
+ object->RawFastPropertyAtPut(index, value);
+ }
+ }
+ }
+ object->synchronized_set_map(*map);
}
@@ -6017,14 +6124,6 @@ void NormalizedMapCache::Clear() {
}
-void HeapObject::UpdateMapCodeCache(Handle<HeapObject> object,
- Handle<Name> name,
- Handle<Code> code) {
- Handle<Map> map(object->map());
- Map::UpdateCodeCache(map, name, code);
-}
-
-
void JSObject::NormalizeProperties(Handle<JSObject> object,
PropertyNormalizationMode mode,
int expected_additional_properties,
@@ -6124,6 +6223,9 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Handle<PropertyArray> fields =
factory->NewPropertyArray(number_of_allocated_fields);
+ bool is_transitionable_elements_kind =
+ IsTransitionableFastElementsKind(old_map->elements_kind());
+
// Fill in the instance descriptor and the fields.
int current_offset = 0;
for (int i = 0; i < instance_descriptor_length; i++) {
@@ -6151,8 +6253,14 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
d = Descriptor::DataConstant(key, handle(value, isolate),
details.attributes());
} else {
+ // Ensure that we make constant field only when elements kind is not
+ // transitionable.
+ PropertyConstness constness =
+ FLAG_track_constant_fields && !is_transitionable_elements_kind
+ ? kConst
+ : kMutable;
d = Descriptor::DataField(
- key, current_offset, details.attributes(), kDefaultFieldConstness,
+ key, current_offset, details.attributes(), constness,
// TODO(verwaest): value->OptimalRepresentation();
Representation::Tagged(), FieldType::Any(isolate));
}
@@ -6266,26 +6374,30 @@ Handle<SeededNumberDictionary> JSObject::NormalizeElements(
namespace {
-Object* SetHashAndUpdateProperties(HeapObject* properties, int masked_hash) {
- DCHECK_NE(PropertyArray::kNoHashSentinel, masked_hash);
- DCHECK_EQ(masked_hash & JSReceiver::kHashMask, masked_hash);
+Object* SetHashAndUpdateProperties(HeapObject* properties, int hash) {
+ DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
+ DCHECK(PropertyArray::HashField::is_valid(hash));
- if (properties == properties->GetHeap()->empty_fixed_array() ||
- properties == properties->GetHeap()->empty_property_dictionary()) {
- return Smi::FromInt(masked_hash);
+ Heap* heap = properties->GetHeap();
+ if (properties == heap->empty_fixed_array() ||
+ properties == heap->empty_property_array() ||
+ properties == heap->empty_property_dictionary()) {
+ return Smi::FromInt(hash);
}
if (properties->IsPropertyArray()) {
- PropertyArray::cast(properties)->SetHash(masked_hash);
+ PropertyArray::cast(properties)->SetHash(hash);
+ DCHECK_LT(0, PropertyArray::cast(properties)->length());
return properties;
}
DCHECK(properties->IsDictionary());
- NameDictionary::cast(properties)->SetHash(masked_hash);
+ NameDictionary::cast(properties)->SetHash(hash);
return properties;
}
int GetIdentityHashHelper(Isolate* isolate, JSReceiver* object) {
+ DisallowHeapAllocation no_gc;
Object* properties = object->raw_properties_or_hash();
if (properties->IsSmi()) {
return Smi::ToInt(properties);
@@ -6311,17 +6423,22 @@ int GetIdentityHashHelper(Isolate* isolate, JSReceiver* object) {
}
} // namespace
-void JSReceiver::SetIdentityHash(int masked_hash) {
- DCHECK_NE(PropertyArray::kNoHashSentinel, masked_hash);
- DCHECK_EQ(masked_hash & JSReceiver::kHashMask, masked_hash);
+void JSReceiver::SetIdentityHash(int hash) {
+ DisallowHeapAllocation no_gc;
+ DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
+ DCHECK(PropertyArray::HashField::is_valid(hash));
HeapObject* existing_properties = HeapObject::cast(raw_properties_or_hash());
Object* new_properties =
- SetHashAndUpdateProperties(existing_properties, masked_hash);
+ SetHashAndUpdateProperties(existing_properties, hash);
set_raw_properties_or_hash(new_properties);
}
void JSReceiver::SetProperties(HeapObject* properties) {
+ DCHECK_IMPLIES(properties->IsPropertyArray() &&
+ PropertyArray::cast(properties)->length() == 0,
+ properties == properties->GetHeap()->empty_property_array());
+ DisallowHeapAllocation no_gc;
Isolate* isolate = properties->GetIsolate();
int hash = GetIdentityHashHelper(isolate, this);
Object* new_properties = properties;
@@ -6337,6 +6454,7 @@ void JSReceiver::SetProperties(HeapObject* properties) {
template <typename ProxyType>
Smi* GetOrCreateIdentityHashHelper(Isolate* isolate, ProxyType* proxy) {
+ DisallowHeapAllocation no_gc;
Object* maybe_hash = proxy->hash();
if (maybe_hash->IsSmi()) return Smi::cast(maybe_hash);
@@ -6346,6 +6464,7 @@ Smi* GetOrCreateIdentityHashHelper(Isolate* isolate, ProxyType* proxy) {
}
Object* JSObject::GetIdentityHash(Isolate* isolate) {
+ DisallowHeapAllocation no_gc;
if (IsJSGlobalProxy()) {
return JSGlobalProxy::cast(this)->hash();
}
@@ -6359,6 +6478,7 @@ Object* JSObject::GetIdentityHash(Isolate* isolate) {
}
Smi* JSObject::GetOrCreateIdentityHash(Isolate* isolate) {
+ DisallowHeapAllocation no_gc;
if (IsJSGlobalProxy()) {
return GetOrCreateIdentityHashHelper(isolate, JSGlobalProxy::cast(this));
}
@@ -6368,16 +6488,11 @@ Smi* JSObject::GetOrCreateIdentityHash(Isolate* isolate) {
return Smi::cast(hash_obj);
}
- int masked_hash;
- // TODO(gsathya): Remove the loop and pass kHashMask directly to
- // GenerateIdentityHash.
- do {
- int hash = isolate->GenerateIdentityHash(Smi::kMaxValue);
- masked_hash = hash & JSReceiver::kHashMask;
- } while (masked_hash == PropertyArray::kNoHashSentinel);
+ int hash = isolate->GenerateIdentityHash(PropertyArray::HashField::kMax);
+ DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
- SetIdentityHash(masked_hash);
- return Smi::FromInt(masked_hash);
+ SetIdentityHash(hash);
+ return Smi::FromInt(hash);
}
Object* JSProxy::GetIdentityHash() { return hash(); }
@@ -6713,6 +6828,17 @@ Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(Isolate* isolate,
it.Next();
}
+ // Handle interceptor
+ if (it.state() == LookupIterator::INTERCEPTOR) {
+ if (it.HolderIsReceiverOrHiddenPrototype()) {
+ Maybe<bool> result = DefinePropertyWithInterceptorInternal(
+ &it, it.GetInterceptor(), should_throw, *desc);
+ if (result.IsNothing() || result.FromJust()) {
+ return result;
+ }
+ }
+ }
+
return OrdinaryDefineOwnProperty(&it, desc, should_throw);
}
@@ -6728,20 +6854,6 @@ Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(LookupIterator* it,
PropertyDescriptor current;
MAYBE_RETURN(GetOwnPropertyDescriptor(it, &current), Nothing<bool>());
- it->Restart();
- // Handle interceptor
- for (; it->IsFound(); it->Next()) {
- if (it->state() == LookupIterator::INTERCEPTOR) {
- if (it->HolderIsReceiverOrHiddenPrototype()) {
- Maybe<bool> result = DefinePropertyWithInterceptorInternal(
- it, it->GetInterceptor(), should_throw, *desc);
- if (result.IsNothing() || result.FromJust()) {
- return result;
- }
- }
- }
- }
-
// TODO(jkummerow/verwaest): It would be nice if we didn't have to reset
// the iterator every time. Currently, the reasons why we need it are:
// - handle interceptors correctly
@@ -8906,13 +9018,11 @@ Handle<Map> Map::Normalize(Handle<Map> fast_map, PropertyNormalizationMode mode,
kTransitionsOrPrototypeInfoOffset + kPointerSize);
DCHECK(memcmp(HeapObject::RawField(*fresh, kDescriptorsOffset),
HeapObject::RawField(*new_map, kDescriptorsOffset),
- kCodeCacheOffset - kDescriptorsOffset) == 0);
+ kDependentCodeOffset - kDescriptorsOffset) == 0);
} else {
DCHECK(memcmp(fresh->address(), new_map->address(),
- Map::kCodeCacheOffset) == 0);
+ Map::kDependentCodeOffset) == 0);
}
- STATIC_ASSERT(Map::kDependentCodeOffset ==
- Map::kCodeCacheOffset + kPointerSize);
STATIC_ASSERT(Map::kWeakCellCacheOffset ==
Map::kDependentCodeOffset + kPointerSize);
int offset = Map::kWeakCellCacheOffset + kPointerSize;
@@ -9047,7 +9157,6 @@ Handle<Map> Map::CopyDropDescriptors(Handle<Map> map) {
result->SetInObjectProperties(map->GetInObjectProperties());
result->set_unused_property_fields(map->unused_property_fields());
}
- result->ClearCodeCache(map->GetHeap());
map->NotifyLeafMapLayoutChange();
return result;
}
@@ -9295,6 +9404,13 @@ void Map::InstallDescriptors(Handle<Map> parent, Handle<Map> child,
Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
TransitionFlag flag) {
+ // Only certain objects are allowed to have non-terminal fast transitional
+ // elements kinds.
+ DCHECK(map->IsJSObjectMap());
+ DCHECK_IMPLIES(
+ !map->CanHaveFastTransitionableElementsKind(),
+ IsDictionaryElementsKind(kind) || IsTerminalElementsKind(kind));
+
Map* maybe_elements_transition_map = NULL;
if (flag == INSERT_TRANSITION) {
// Ensure we are requested to add elements kind transition "near the root".
@@ -9882,211 +9998,6 @@ Handle<Map> Map::CopyReplaceDescriptor(Handle<Map> map,
simple_flag);
}
-// Helper class to manage a Map's code cache. The layout depends on the number
-// of entries; this is worthwhile because most code caches are very small,
-// but some are huge (thousands of entries).
-// For zero entries, the EmptyFixedArray is used.
-// For one entry, we use a 2-element FixedArray containing [name, code].
-// For 2..100 entries, we use a FixedArray with linear lookups, the layout is:
-// [0] - number of slots that are currently in use
-// [1] - first name
-// [2] - first code
-// [3] - second name
-// [4] - second code
-// etc.
-// For more than 128 entries, we use a CodeCacheHashTable.
-class CodeCache : public AllStatic {
- public:
- // Returns the new cache, to be stored on the map.
- static Handle<FixedArray> Put(Isolate* isolate, Handle<FixedArray> cache,
- Handle<Name> name, Handle<Code> code) {
- int length = cache->length();
- if (length == 0) return PutFirstElement(isolate, name, code);
- if (length == kEntrySize) {
- return PutSecondElement(isolate, cache, name, code);
- }
- if (length <= kLinearMaxSize) {
- Handle<FixedArray> result = PutLinearElement(isolate, cache, name, code);
- if (!result.is_null()) return result;
- // Fall through if linear storage is getting too large.
- }
- return PutHashTableElement(isolate, cache, name, code);
- }
-
- static Code* Lookup(FixedArray* cache, Name* name, Code::Flags flags) {
- int length = cache->length();
- if (length == 0) return nullptr;
- if (length == kEntrySize) return OneElementLookup(cache, name, flags);
- if (!cache->IsCodeCacheHashTable()) {
- return LinearLookup(cache, name, flags);
- } else {
- return CodeCacheHashTable::cast(cache)->Lookup(name, flags);
- }
- }
-
- private:
- static const int kNameIndex = 0;
- static const int kCodeIndex = 1;
- static const int kEntrySize = 2;
-
- static const int kLinearUsageIndex = 0;
- static const int kLinearReservedSlots = 1;
- static const int kLinearInitialCapacity = 2;
- static const int kLinearMaxSize = 257; // == LinearSizeFor(128);
-
- static const int kHashTableInitialCapacity = 200; // Number of entries.
-
- static int LinearSizeFor(int entries) {
- return kLinearReservedSlots + kEntrySize * entries;
- }
-
- static int LinearNewSize(int old_size) {
- int old_entries = (old_size - kLinearReservedSlots) / kEntrySize;
- return LinearSizeFor(old_entries * 2);
- }
-
- static Code* OneElementLookup(FixedArray* cache, Name* name,
- Code::Flags flags) {
- DCHECK_EQ(cache->length(), kEntrySize);
- if (cache->get(kNameIndex) != name) return nullptr;
- Code* maybe_code = Code::cast(cache->get(kCodeIndex));
- if (maybe_code->flags() != flags) return nullptr;
- return maybe_code;
- }
-
- static Code* LinearLookup(FixedArray* cache, Name* name, Code::Flags flags) {
- DCHECK_GE(cache->length(), kEntrySize);
- DCHECK(!cache->IsCodeCacheHashTable());
- int usage = GetLinearUsage(cache);
- for (int i = kLinearReservedSlots; i < usage; i += kEntrySize) {
- if (cache->get(i + kNameIndex) != name) continue;
- Code* code = Code::cast(cache->get(i + kCodeIndex));
- if (code->flags() == flags) return code;
- }
- return nullptr;
- }
-
- static Handle<FixedArray> PutFirstElement(Isolate* isolate, Handle<Name> name,
- Handle<Code> code) {
- Handle<FixedArray> cache = isolate->factory()->NewFixedArray(kEntrySize);
- cache->set(kNameIndex, *name);
- cache->set(kCodeIndex, *code);
- return cache;
- }
-
- static Handle<FixedArray> PutSecondElement(Isolate* isolate,
- Handle<FixedArray> cache,
- Handle<Name> name,
- Handle<Code> code) {
- DCHECK_EQ(cache->length(), kEntrySize);
- Handle<FixedArray> new_cache = isolate->factory()->NewFixedArray(
- LinearSizeFor(kLinearInitialCapacity));
- new_cache->set(kLinearReservedSlots + kNameIndex, cache->get(kNameIndex));
- new_cache->set(kLinearReservedSlots + kCodeIndex, cache->get(kCodeIndex));
- new_cache->set(LinearSizeFor(1) + kNameIndex, *name);
- new_cache->set(LinearSizeFor(1) + kCodeIndex, *code);
- new_cache->set(kLinearUsageIndex, Smi::FromInt(LinearSizeFor(2)));
- return new_cache;
- }
-
- static Handle<FixedArray> PutLinearElement(Isolate* isolate,
- Handle<FixedArray> cache,
- Handle<Name> name,
- Handle<Code> code) {
- int length = cache->length();
- int usage = GetLinearUsage(*cache);
- DCHECK_LE(usage, length);
- // Check if we need to grow.
- if (usage == length) {
- int new_length = LinearNewSize(length);
- if (new_length > kLinearMaxSize) return Handle<FixedArray>::null();
- Handle<FixedArray> new_cache =
- isolate->factory()->NewFixedArray(new_length);
- for (int i = kLinearReservedSlots; i < length; i++) {
- new_cache->set(i, cache->get(i));
- }
- cache = new_cache;
- }
- // Store new entry.
- DCHECK_GE(cache->length(), usage + kEntrySize);
- cache->set(usage + kNameIndex, *name);
- cache->set(usage + kCodeIndex, *code);
- cache->set(kLinearUsageIndex, Smi::FromInt(usage + kEntrySize));
- return cache;
- }
-
- static Handle<FixedArray> PutHashTableElement(Isolate* isolate,
- Handle<FixedArray> cache,
- Handle<Name> name,
- Handle<Code> code) {
- // Check if we need to transition from linear to hash table storage.
- if (!cache->IsCodeCacheHashTable()) {
- // Check that the initial hash table capacity is large enough.
- DCHECK_EQ(kLinearMaxSize, LinearSizeFor(128));
- STATIC_ASSERT(kHashTableInitialCapacity > 128);
-
- int length = cache->length();
- // Only migrate from linear storage when it's full.
- DCHECK_EQ(length, GetLinearUsage(*cache));
- DCHECK_EQ(length, kLinearMaxSize);
- Handle<CodeCacheHashTable> table =
- CodeCacheHashTable::New(isolate, kHashTableInitialCapacity);
- HandleScope scope(isolate);
- for (int i = kLinearReservedSlots; i < length; i += kEntrySize) {
- Handle<Name> old_name(Name::cast(cache->get(i + kNameIndex)), isolate);
- Handle<Code> old_code(Code::cast(cache->get(i + kCodeIndex)), isolate);
- CodeCacheHashTable::Put(table, old_name, old_code);
- }
- cache = table;
- }
- // Store new entry.
- DCHECK(cache->IsCodeCacheHashTable());
- return CodeCacheHashTable::Put(Handle<CodeCacheHashTable>::cast(cache),
- name, code);
- }
-
- static inline int GetLinearUsage(FixedArray* linear_cache) {
- DCHECK_GT(linear_cache->length(), kEntrySize);
- return Smi::ToInt(linear_cache->get(kLinearUsageIndex));
- }
-};
-
-void Map::UpdateCodeCache(Handle<Map> map,
- Handle<Name> name,
- Handle<Code> code) {
- Isolate* isolate = map->GetIsolate();
- Handle<FixedArray> cache(map->code_cache(), isolate);
- Handle<FixedArray> new_cache = CodeCache::Put(isolate, cache, name, code);
- map->set_code_cache(*new_cache);
-}
-
-Code* Map::LookupInCodeCache(Name* name, Code::Flags flags) {
- return CodeCache::Lookup(code_cache(), name, flags);
-}
-
-
-Handle<CodeCacheHashTable> CodeCacheHashTable::Put(
- Handle<CodeCacheHashTable> cache, Handle<Name> name, Handle<Code> code) {
- CodeCacheHashTableKey key(name, code);
-
- Handle<CodeCacheHashTable> new_cache = EnsureCapacity(cache, 1);
-
- int entry = new_cache->FindInsertionEntry(key.Hash());
- Handle<Object> k = key.AsHandle(cache->GetIsolate());
-
- new_cache->set(EntryToIndex(entry), *k);
- new_cache->ElementAdded();
- return new_cache;
-}
-
-Code* CodeCacheHashTable::Lookup(Name* name, Code::Flags flags) {
- DisallowHeapAllocation no_alloc;
- CodeCacheHashTableKey key(handle(name), flags);
- int entry = FindEntry(&key);
- if (entry == kNotFound) return nullptr;
- return Code::cast(FixedArray::cast(get(EntryToIndex(entry)))->get(1));
-}
-
Handle<FixedArray> FixedArray::SetAndGrow(Handle<FixedArray> array, int index,
Handle<Object> value) {
if (index < array->length()) {
@@ -10416,12 +10327,12 @@ Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
factory->NewFixedArray(LengthFor(size), pretenure);
result->set(kDescriptorLengthIndex, Smi::FromInt(number_of_descriptors));
- result->set(kEnumCacheBridgeIndex, Smi::kZero);
+ result->set(kEnumCacheIndex, isolate->heap()->empty_enum_cache());
return Handle<DescriptorArray>::cast(result);
}
void DescriptorArray::ClearEnumCache() {
- set(kEnumCacheBridgeIndex, Smi::kZero);
+ set(kEnumCacheIndex, GetHeap()->empty_enum_cache());
}
void DescriptorArray::Replace(int index, Descriptor* descriptor) {
@@ -10429,27 +10340,17 @@ void DescriptorArray::Replace(int index, Descriptor* descriptor) {
Set(index, descriptor);
}
-
// static
void DescriptorArray::SetEnumCache(Handle<DescriptorArray> descriptors,
- Isolate* isolate,
- Handle<FixedArray> new_cache,
- Handle<FixedArray> new_index_cache) {
- DCHECK(!descriptors->IsEmpty());
- FixedArray* bridge_storage;
- bool needs_new_enum_cache = !descriptors->HasEnumCache();
- if (needs_new_enum_cache) {
- bridge_storage = *isolate->factory()->NewFixedArray(
- DescriptorArray::kEnumCacheBridgeLength);
+ Isolate* isolate, Handle<FixedArray> keys,
+ Handle<FixedArray> indices) {
+ EnumCache* enum_cache = descriptors->GetEnumCache();
+ if (enum_cache == isolate->heap()->empty_enum_cache()) {
+ enum_cache = *isolate->factory()->NewEnumCache(keys, indices);
+ descriptors->set(kEnumCacheIndex, enum_cache);
} else {
- bridge_storage = FixedArray::cast(descriptors->get(kEnumCacheBridgeIndex));
- }
- bridge_storage->set(kEnumCacheBridgeCacheIndex, *new_cache);
- bridge_storage->set(
- kEnumCacheBridgeIndicesCacheIndex,
- new_index_cache.is_null() ? Object::cast(Smi::kZero) : *new_index_cache);
- if (needs_new_enum_cache) {
- descriptors->set(kEnumCacheBridgeIndex, bridge_storage);
+ enum_cache->set_keys(*keys);
+ enum_cache->set_indices(*indices);
}
}
@@ -10598,8 +10499,6 @@ int HandlerTable::LookupReturn(int pc_offset) {
#ifdef DEBUG
bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
- if (IsEmpty()) return other->IsEmpty();
- if (other->IsEmpty()) return false;
if (length() != other->length()) return false;
for (int i = 0; i < length(); ++i) {
if (get(i) != other->get(i)) return false;
@@ -11233,11 +11132,8 @@ void String::WriteToFlat(String* src,
}
}
-
-
template <typename SourceChar>
-static void CalculateLineEndsImpl(Isolate* isolate,
- List<int>* line_ends,
+static void CalculateLineEndsImpl(Isolate* isolate, std::vector<int>* line_ends,
Vector<const SourceChar> src,
bool include_ending_line) {
const int src_len = src.length();
@@ -11245,16 +11141,16 @@ static void CalculateLineEndsImpl(Isolate* isolate,
for (int i = 0; i < src_len - 1; i++) {
SourceChar current = src[i];
SourceChar next = src[i + 1];
- if (cache->IsLineTerminatorSequence(current, next)) line_ends->Add(i);
+ if (cache->IsLineTerminatorSequence(current, next)) line_ends->push_back(i);
}
if (src_len > 0 && cache->IsLineTerminatorSequence(src[src_len - 1], 0)) {
- line_ends->Add(src_len - 1);
+ line_ends->push_back(src_len - 1);
}
if (include_ending_line) {
// Include one character beyond the end of script. The rewriter uses that
// position for the implicit return statement.
- line_ends->Add(src_len);
+ line_ends->push_back(src_len);
}
}
@@ -11265,7 +11161,8 @@ Handle<FixedArray> String::CalculateLineEnds(Handle<String> src,
// Rough estimate of line count based on a roughly estimated average
// length of (unpacked) code.
int line_count_estimate = src->length() >> 4;
- List<int> line_ends(line_count_estimate);
+ std::vector<int> line_ends;
+ line_ends.reserve(line_count_estimate);
Isolate* isolate = src->GetIsolate();
{ DisallowHeapAllocation no_allocation; // ensure vectors stay valid.
// Dispatch on type of strings.
@@ -11283,7 +11180,7 @@ Handle<FixedArray> String::CalculateLineEnds(Handle<String> src,
include_ending_line);
}
}
- int line_count = line_ends.length();
+ int line_count = static_cast<int>(line_ends.size());
Handle<FixedArray> array = isolate->factory()->NewFixedArray(line_count);
for (int i = 0; i < line_count; i++) {
array->set(i, Smi::FromInt(line_ends[i]));
@@ -11793,11 +11690,10 @@ MaybeHandle<String> String::GetSubstitution(Isolate* isolate, Match* match,
String::IndexOf(isolate, replacement, bracket_string, peek_ix + 1);
if (closing_bracket_ix == -1) {
- THROW_NEW_ERROR(
- isolate,
- NewSyntaxError(MessageTemplate::kRegExpInvalidReplaceString,
- replacement),
- String);
+ // No closing bracket was found, treat '$<' as a string literal.
+ builder.AppendCharacter('$');
+ continue_from_ix = peek_ix;
+ break;
}
Handle<String> capture_name =
@@ -11810,12 +11706,6 @@ MaybeHandle<String> String::GetSubstitution(Isolate* isolate, Match* match,
switch (capture_state) {
case CaptureState::INVALID:
- THROW_NEW_ERROR(
- isolate,
- NewSyntaxError(MessageTemplate::kRegExpInvalidReplaceString,
- replacement),
- String);
- break;
case CaptureState::UNMATCHED:
break;
case CaptureState::MATCHED:
@@ -12310,23 +12200,6 @@ bool Map::EquivalentToForNormalization(const Map* other,
}
-bool JSFunction::Inlines(SharedFunctionInfo* candidate) {
- DisallowHeapAllocation no_gc;
- if (shared() == candidate) return true;
- if (code()->kind() != Code::OPTIMIZED_FUNCTION) return false;
- DeoptimizationInputData* const data =
- DeoptimizationInputData::cast(code()->deoptimization_data());
- if (data->length() == 0) return false;
- FixedArray* const literals = data->LiteralArray();
- int const inlined_count = data->InlinedFunctionCount()->value();
- for (int i = 0; i < inlined_count; ++i) {
- if (SharedFunctionInfo::cast(literals->get(i)) == candidate) {
- return true;
- }
- }
- return false;
-}
-
void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
Isolate* isolate = GetIsolate();
if (!isolate->concurrent_recompilation_enabled() ||
@@ -12920,8 +12793,13 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_VALUE_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
+ case WASM_INSTANCE_TYPE:
+ case WASM_MEMORY_TYPE:
+ case WASM_MODULE_TYPE:
+ case WASM_TABLE_TYPE:
return true;
+ case BIGINT_TYPE:
case BYTECODE_ARRAY_TYPE:
case BYTE_ARRAY_TYPE:
case CELL_TYPE:
@@ -13270,26 +13148,6 @@ void Oddball::Initialize(Isolate* isolate, Handle<Oddball> oddball,
oddball->set_kind(kind);
}
-void Script::SetEvalOrigin(Handle<Script> script,
- Handle<SharedFunctionInfo> outer_info,
- int eval_position) {
- if (eval_position == kNoSourcePosition) {
- // If the position is missing, attempt to get the code offset from the
- // current activation. Do not translate the code offset into source
- // position, but store it as negative value for lazy translation.
- StackTraceFrameIterator it(script->GetIsolate());
- if (!it.done() && it.is_javascript()) {
- FrameSummary summary = FrameSummary::GetTop(it.javascript_frame());
- script->set_eval_from_shared(summary.AsJavaScript().function()->shared());
- script->set_eval_from_position(-summary.code_offset());
- return;
- }
- eval_position = 0;
- }
- script->set_eval_from_shared(*outer_info);
- script->set_eval_from_position(eval_position);
-}
-
int Script::GetEvalPosition() {
DisallowHeapAllocation no_gc;
DCHECK(compilation_type() == Script::COMPILATION_TYPE_EVAL);
@@ -13650,7 +13508,6 @@ bool SharedFunctionInfo::HasCoverageInfo() const {
if (!HasDebugInfo()) return false;
DebugInfo* info = DebugInfo::cast(debug_info());
bool has_coverage_info = info->HasCoverageInfo();
- DCHECK_IMPLIES(has_coverage_info, FLAG_block_coverage);
return has_coverage_info;
}
@@ -13854,9 +13711,8 @@ void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
set_compiler_hints(
DisabledOptimizationReasonBits::update(compiler_hints(), reason));
- // Code should be the lazy compilation stub or else unoptimized.
- DCHECK(abstract_code()->kind() == AbstractCode::FUNCTION ||
- abstract_code()->kind() == AbstractCode::INTERPRETED_FUNCTION ||
+ // Code should be the lazy compilation stub or else interpreted.
+ DCHECK(abstract_code()->kind() == AbstractCode::INTERPRETED_FUNCTION ||
abstract_code()->kind() == AbstractCode::BUILTIN);
PROFILE(GetIsolate(), CodeDisableOptEvent(abstract_code(), this));
if (FLAG_trace_opt) {
@@ -13937,6 +13793,16 @@ void SharedFunctionInfo::SetExpectedNofPropertiesFromEstimate(
void SharedFunctionInfo::SetConstructStub(Code* code) {
if (code->kind() == Code::BUILTIN) code->set_is_construct_stub(true);
+#ifdef DEBUG
+ if (code->is_builtin()) {
+ // See https://crbug.com/v8/6787. Lazy deserialization currently cannot
+ // handle lazy construct stubs that differ from the code object.
+ int builtin_id = code->builtin_index();
+ DCHECK_NE(Builtins::kDeserializeLazy, builtin_id);
+ DCHECK(builtin_id == Builtins::kJSBuiltinsConstructStub ||
+ this->code() == code || !Builtins::IsLazy(builtin_id));
+ }
+#endif
set_construct_stub(code);
}
@@ -14053,58 +13919,6 @@ SafepointEntry Code::GetSafepointEntry(Address pc) {
}
-Object* Code::FindNthObject(int n, Map* match_map) {
- DCHECK(is_inline_cache_stub());
- DisallowHeapAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- Object* object = info->target_object();
- if (object->IsWeakCell()) object = WeakCell::cast(object)->value();
- if (object->IsHeapObject()) {
- if (HeapObject::cast(object)->map() == match_map) {
- if (--n == 0) return object;
- }
- }
- }
- return NULL;
-}
-
-
-AllocationSite* Code::FindFirstAllocationSite() {
- Object* result = FindNthObject(1, GetHeap()->allocation_site_map());
- return (result != NULL) ? AllocationSite::cast(result) : NULL;
-}
-
-
-Map* Code::FindFirstMap() {
- Object* result = FindNthObject(1, GetHeap()->meta_map());
- return (result != NULL) ? Map::cast(result) : NULL;
-}
-
-
-void Code::FindAndReplace(const FindAndReplacePattern& pattern) {
- DCHECK(is_inline_cache_stub() || is_handler());
- DisallowHeapAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- STATIC_ASSERT(FindAndReplacePattern::kMaxCount < 32);
- int current_pattern = 0;
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- HeapObject* object = info->target_object();
- if (object->IsWeakCell()) {
- object = HeapObject::cast(WeakCell::cast(object)->value());
- }
- Map* map = object->map();
- if (map == *pattern.find_[current_pattern]) {
- info->set_target_object(*pattern.replace_[current_pattern]);
- if (++current_pattern == pattern.count_) return;
- }
- }
- UNREACHABLE();
-}
-
-
namespace {
template <typename Code>
void SetStackFrameCacheCommon(Handle<Code> code,
@@ -14186,7 +14000,10 @@ int AbstractCode::SourceStatementPosition(int offset) {
void JSFunction::ClearTypeFeedbackInfo() {
if (feedback_vector_cell()->value()->IsFeedbackVector()) {
FeedbackVector* vector = feedback_vector();
- vector->ClearSlots(this);
+ Isolate* isolate = GetIsolate();
+ if (vector->ClearSlots(isolate)) {
+ IC::OnFeedbackChanged(isolate, vector, this);
+ }
}
}
@@ -14246,7 +14063,6 @@ Handle<WeakCell> Code::WeakCellFor(Handle<Code> code) {
return cell;
}
-
WeakCell* Code::CachedWeakCell() {
DCHECK(kind() == OPTIMIZED_FUNCTION);
Object* weak_cell_cache =
@@ -14258,6 +14074,53 @@ WeakCell* Code::CachedWeakCell() {
return NULL;
}
+bool Code::Inlines(SharedFunctionInfo* sfi) {
+ // We can only check for inlining for optimized code.
+ DCHECK(is_optimized_code());
+ DisallowHeapAllocation no_gc;
+ DeoptimizationInputData* const data =
+ DeoptimizationInputData::cast(deoptimization_data());
+ if (data->length() == 0) return false;
+ if (data->SharedFunctionInfo() == sfi) return true;
+ FixedArray* const literals = data->LiteralArray();
+ int const inlined_count = data->InlinedFunctionCount()->value();
+ for (int i = 0; i < inlined_count; ++i) {
+ if (SharedFunctionInfo::cast(literals->get(i)) == sfi) return true;
+ }
+ return false;
+}
+
+Code::OptimizedCodeIterator::OptimizedCodeIterator(Isolate* isolate) {
+ isolate_ = isolate;
+ Object* list = isolate->heap()->native_contexts_list();
+ next_context_ = list->IsUndefined(isolate_) ? nullptr : Context::cast(list);
+ current_code_ = nullptr;
+}
+
+Code* Code::OptimizedCodeIterator::Next() {
+ do {
+ Object* next;
+ if (current_code_ != nullptr) {
+ // Get next code in the linked list.
+ next = Code::cast(current_code_)->next_code_link();
+ } else if (next_context_ != nullptr) {
+ // Linked list of code exhausted. Get list of next context.
+ next = next_context_->OptimizedCodeListHead();
+ Object* next_context = next_context_->next_context_link();
+ next_context_ = next_context->IsUndefined(isolate_)
+ ? nullptr
+ : Context::cast(next_context);
+ } else {
+ // Exhausted contexts.
+ return nullptr;
+ }
+ current_code_ = next->IsUndefined(isolate_) ? nullptr : Code::cast(next);
+ } while (current_code_ == nullptr);
+ Code* code = Code::cast(current_code_);
+ DCHECK_EQ(Code::OPTIMIZED_FUNCTION, code->kind());
+ return code;
+}
+
#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
const char* Code::ICState2String(InlineCacheState state) {
@@ -14280,17 +14143,6 @@ const char* Code::ICState2String(InlineCacheState state) {
UNREACHABLE();
}
-void Code::PrintExtraICState(std::ostream& os, // NOLINT
- Kind kind, ExtraICState extra) {
- os << "extra_ic_state = ";
- if ((kind == STORE_IC || kind == KEYED_STORE_IC) &&
- is_strict(static_cast<LanguageMode>(extra))) {
- os << "STRICT\n";
- } else {
- os << extra << "\n";
- }
-}
-
#endif // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
#ifdef ENABLE_DISASSEMBLER
@@ -14498,8 +14350,9 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
case Translation::ARGUMENTS_ELEMENTS:
case Translation::ARGUMENTS_LENGTH: {
- bool is_rest = iterator.Next();
- os << "{is_rest=" << (is_rest ? "true" : "false") << "}";
+ CreateArgumentsType arguments_type =
+ static_cast<CreateArgumentsType>(iterator.Next());
+ os << "{arguments_type=" << arguments_type << "}";
break;
}
@@ -14546,7 +14399,7 @@ void HandlerTable::HandlerTableReturnPrint(std::ostream& os) {
void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
os << "kind = " << Kind2String(kind()) << "\n";
- if (IsCodeStubOrIC()) {
+ if (is_stub()) {
const char* n = CodeStub::MajorName(CodeStub::GetMajorKey(this));
os << "major_key = " << (n == NULL ? "null" : n) << "\n";
}
@@ -14568,11 +14421,7 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
if (kind() == OPTIMIZED_FUNCTION) {
os << "stack_slots = " << stack_slots() << "\n";
}
- os << "compiler = "
- << (is_turbofanned()
- ? "turbofan"
- : kind() == Code::FUNCTION ? "full-codegen" : "unknown")
- << "\n";
+ os << "compiler = " << (is_turbofanned() ? "turbofan" : "unknown") << "\n";
os << "Instructions (size = " << instruction_size() << ")\n";
{
@@ -14580,16 +14429,12 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
int size = instruction_size();
int safepoint_offset =
is_turbofanned() ? static_cast<int>(safepoint_table_offset()) : size;
- int back_edge_offset = (kind() == Code::FUNCTION)
- ? static_cast<int>(back_edge_table_offset())
- : size;
int constant_pool_offset = FLAG_enable_embedded_constant_pool
? this->constant_pool_offset()
: size;
// Stop before reaching any embedded tables
- int code_size = Min(safepoint_offset, back_edge_offset);
- code_size = Min(code_size, constant_pool_offset);
+ int code_size = Min(safepoint_offset, constant_pool_offset);
byte* begin = instruction_start();
byte* end = begin + code_size;
Disassembler::Decode(isolate, &os, begin, end, this);
@@ -14654,9 +14499,7 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
if (handler_table()->length() > 0) {
os << "Handler Table (size = " << handler_table()->Size() << ")\n";
- if (kind() == FUNCTION) {
- HandlerTable::cast(handler_table())->HandlerTableRangePrint(os);
- } else if (kind() == OPTIMIZED_FUNCTION) {
+ if (kind() == OPTIMIZED_FUNCTION) {
HandlerTable::cast(handler_table())->HandlerTableReturnPrint(os);
}
os << "\n";
@@ -15345,6 +15188,9 @@ bool JSObject::WouldConvertToSlowElements(uint32_t index) {
static ElementsKind BestFittingFastElementsKind(JSObject* object) {
+ if (!object->map()->CanHaveFastTransitionableElementsKind()) {
+ return HOLEY_ELEMENTS;
+ }
if (object->HasSloppyArgumentsElements()) {
return FAST_SLOPPY_ARGUMENTS_ELEMENTS;
}
@@ -16455,6 +16301,8 @@ template class HashTable<ObjectHashTable, ObjectHashTableShape>;
template class HashTable<WeakHashTable, WeakHashTableShape<2>>;
+template class HashTable<TemplateMap, TemplateMapShape>;
+
template class Dictionary<NameDictionary, NameDictionaryShape>;
template class Dictionary<GlobalDictionary, GlobalDictionaryShape>;
@@ -18748,15 +18596,13 @@ MaybeHandle<JSDate> JSDate::New(Handle<JSFunction> constructor,
// static
double JSDate::CurrentTimeValue(Isolate* isolate) {
- if (FLAG_log_timer_events || FLAG_prof_cpp) LOG(isolate, CurrentTimeEvent());
+ if (FLAG_log_internal_timer_events) LOG(isolate, CurrentTimeEvent());
// According to ECMA-262, section 15.9.1, page 117, the precision of
// the number in a Date object representing a particular instant in
// time is milliseconds. Therefore, we floor the result of getting
// the OS time.
- return Floor(FLAG_verify_predictable
- ? isolate->heap()->MonotonicallyIncreasingTimeInMs()
- : base::OS::TimeCurrentMillis());
+ return Floor(V8::GetCurrentPlatform()->CurrentClockTimeMillis());
}
@@ -19268,7 +19114,6 @@ void PropertyCell::SetValueWithInvalidation(Handle<PropertyCell> cell,
int JSGeneratorObject::source_position() const {
CHECK(is_suspended());
DCHECK(function()->shared()->HasBytecodeArray());
- DCHECK(!function()->shared()->HasBaselineCode());
int code_offset = Smi::ToInt(input_or_debug_pos());
@@ -19314,6 +19159,16 @@ bool JSReceiver::HasProxyInPrototype(Isolate* isolate) {
return false;
}
+bool JSReceiver::HasComplexElements() {
+ if (IsJSProxy()) return true;
+ JSObject* this_object = JSObject::cast(this);
+ if (this_object->HasIndexedInterceptor()) {
+ return true;
+ }
+ if (!this_object->HasDictionaryElements()) return false;
+ return this_object->element_dictionary()->HasComplexElements();
+}
+
MaybeHandle<Name> FunctionTemplateInfo::TryGetCachedPropertyName(
Isolate* isolate, Handle<Object> getter) {
if (getter->IsFunctionTemplateInfo()) {
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index a5acf7c6c4..895d92ba31 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -89,7 +89,6 @@
// - StringTable
// - StringSet
// - CompilationCacheTable
-// - CodeCacheHashTable
// - MapCache
// - OrderedHashTable
// - OrderedHashSet
@@ -126,6 +125,7 @@
// - ExternalTwoByteInternalizedString
// - Symbol
// - HeapNumber
+// - BigInt
// - Cell
// - PropertyCell
// - PropertyArray
@@ -142,10 +142,12 @@
// - AccessorInfo
// - PromiseResolveThenableJobInfo
// - PromiseReactionJobInfo
+// - PromiseCapability
// - AccessorPair
// - AccessCheckInfo
// - InterceptorInfo
// - CallHandlerInfo
+// - EnumCache
// - TemplateInfo
// - FunctionTemplateInfo
// - ObjectTemplateInfo
@@ -171,6 +173,7 @@ namespace v8 {
namespace internal {
struct InliningPosition;
+class PropertyDescriptorObject;
enum KeyedAccessStoreMode {
STANDARD_STORE,
@@ -260,11 +263,6 @@ enum DescriptorFlag {
OWN_DESCRIPTORS
};
-// ICs store extra state in a Code object. The default extra state is
-// kNoExtraICState.
-typedef int ExtraICState;
-static const ExtraICState kNoExtraICState = 0;
-
// Instance size sentinel for objects of variable size.
const int kVariableSizeSentinel = 0;
@@ -322,6 +320,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
\
V(SYMBOL_TYPE) \
V(HEAP_NUMBER_TYPE) \
+ V(BIGINT_TYPE) \
V(ODDBALL_TYPE) \
\
V(MAP_TYPE) \
@@ -357,6 +356,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
V(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE) \
V(PROMISE_REACTION_JOB_INFO_TYPE) \
+ V(PROMISE_CAPABILITY_TYPE) \
V(DEBUG_INFO_TYPE) \
V(STACK_FRAME_INFO_TYPE) \
V(PROTOTYPE_INFO_TYPE) \
@@ -405,7 +405,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_MAP_VALUE_ITERATOR_TYPE) \
V(JS_WEAK_MAP_TYPE) \
V(JS_WEAK_SET_TYPE) \
- V(JS_PROMISE_CAPABILITY_TYPE) \
V(JS_PROMISE_TYPE) \
V(JS_REGEXP_TYPE) \
V(JS_ERROR_TYPE) \
@@ -540,6 +539,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
promise_resolve_thenable_job_info) \
V(PROMISE_REACTION_JOB_INFO, PromiseReactionJobInfo, \
promise_reaction_job_info) \
+ V(PROMISE_CAPABILITY, PromiseCapability, promise_capability) \
V(DEBUG_INFO, DebugInfo, debug_info) \
V(STACK_FRAME_INFO, StackFrameInfo, stack_frame_info) \
V(PROTOTYPE_INFO, PrototypeInfo, prototype_info) \
@@ -670,6 +670,7 @@ enum InstanceType : uint8_t {
// Other primitives (cannot contain non-map-word pointers to heap objects).
HEAP_NUMBER_TYPE,
+ BIGINT_TYPE,
ODDBALL_TYPE, // LAST_PRIMITIVE_TYPE
// Objects allocated in their own spaces (never in new space).
@@ -708,6 +709,7 @@ enum InstanceType : uint8_t {
ALIASED_ARGUMENTS_ENTRY_TYPE,
PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
PROMISE_REACTION_JOB_INFO_TYPE,
+ PROMISE_CAPABILITY_TYPE,
DEBUG_INFO_TYPE,
STACK_FRAME_INFO_TYPE,
PROTOTYPE_INFO_TYPE,
@@ -763,7 +765,6 @@ enum InstanceType : uint8_t {
JS_MAP_VALUE_ITERATOR_TYPE,
JS_WEAK_MAP_TYPE,
JS_WEAK_SET_TYPE,
- JS_PROMISE_CAPABILITY_TYPE,
JS_PROMISE_TYPE,
JS_REGEXP_TYPE,
JS_ERROR_TYPE,
@@ -865,7 +866,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(HANDLER_TABLE_SUB_TYPE) \
V(JS_COLLECTION_SUB_TYPE) \
V(JS_WEAK_COLLECTION_SUB_TYPE) \
- V(MAP_CODE_CACHE_SUB_TYPE) \
V(NOSCRIPT_SHARED_FUNCTION_INFOS_SUB_TYPE) \
V(NUMBER_STRING_CACHE_SUB_TYPE) \
V(OBJECT_TO_CODE_SUB_TYPE) \
@@ -920,7 +920,7 @@ class AllocationSite;
class Cell;
class ConsString;
class ElementsAccessor;
-class FindAndReplacePattern;
+class EnumCache;
class FixedArrayBase;
class PropertyArray;
class FunctionLiteral;
@@ -945,6 +945,7 @@ class FeedbackVector;
class WeakCell;
class TransitionArray;
class TemplateList;
+class TemplateMap;
template <typename T>
class ZoneForwardList;
@@ -968,6 +969,7 @@ template <class C> inline bool Is(Object* obj);
V(AbstractCode) \
V(AccessCheckNeeded) \
V(ArrayList) \
+ V(BigInt) \
V(BoilerplateDescription) \
V(Boolean) \
V(BreakPoint) \
@@ -978,7 +980,6 @@ template <class C> inline bool Is(Object* obj);
V(CallHandlerInfo) \
V(Cell) \
V(Code) \
- V(CodeCacheHashTable) \
V(CompilationCacheTable) \
V(ConsString) \
V(ConstantElementsPair) \
@@ -988,6 +989,7 @@ template <class C> inline bool Is(Object* obj);
V(DeoptimizationInputData) \
V(DependentCode) \
V(DescriptorArray) \
+ V(EnumCache) \
V(External) \
V(ExternalOneByteString) \
V(ExternalString) \
@@ -1038,7 +1040,6 @@ template <class C> inline bool Is(Object* obj);
V(JSModuleNamespace) \
V(JSObject) \
V(JSPromise) \
- V(JSPromiseCapability) \
V(JSProxy) \
V(JSReceiver) \
V(JSRegExp) \
@@ -1064,6 +1065,7 @@ template <class C> inline bool Is(Object* obj);
V(PreParsedScopeData) \
V(PropertyArray) \
V(PropertyCell) \
+ V(PropertyDescriptorObject) \
V(RegExpMatchInfo) \
V(ScopeInfo) \
V(ScriptContextTable) \
@@ -1084,6 +1086,8 @@ template <class C> inline bool Is(Object* obj);
V(Symbol) \
V(TemplateInfo) \
V(TemplateList) \
+ V(TemplateMap) \
+ V(TemplateObjectDescription) \
V(ThinString) \
V(TransitionArray) \
V(TypeFeedbackInfo) \
@@ -1781,13 +1785,6 @@ class HeapObject: public Object {
// during marking GC.
static inline Object** RawField(HeapObject* obj, int offset);
- // Adds the |code| object related to |name| to the code cache of this map. If
- // this map is a dictionary map that is shared, the map copied and installed
- // onto the object.
- static void UpdateMapCodeCache(Handle<HeapObject> object,
- Handle<Name> name,
- Handle<Code> code);
-
DECL_CAST(HeapObject)
// Return the write barrier mode for this. Callers of this function
@@ -1953,12 +1950,10 @@ class PropertyArray : public HeapObject {
// No weak fields.
typedef BodyDescriptor BodyDescriptorWeak;
- static const int kLengthMask = 0x3ff;
- static const int kHashMask = 0x7ffffc00;
- STATIC_ASSERT(kLengthMask + kHashMask == 0x7fffffff);
-
- static const int kMaxLength = kLengthMask;
- STATIC_ASSERT(kMaxLength > kMaxNumberOfDescriptors);
+ static const int kLengthFieldSize = 10;
+ class LengthField : public BitField<int, 0, kLengthFieldSize> {};
+ class HashField : public BitField<int, kLengthFieldSize,
+ kSmiValueSize - kLengthFieldSize - 1> {};
static const int kNoHashSentinel = 0;
@@ -1982,6 +1977,9 @@ class JSReceiver: public HeapObject {
// Gets slow properties for non-global objects.
inline NameDictionary* property_dictionary() const;
+ // Sets the properties backing store and makes sure any existing hash is moved
+ // to the new properties store. To clear out the properties store, pass in the
+ // empty_fixed_array(), the hash will be maintained in this case as well.
void SetProperties(HeapObject* properties);
// There are five possible values for the properties offset.
@@ -2185,7 +2183,7 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnEntries(
Handle<JSReceiver> object, PropertyFilter filter);
- static const int kHashMask = PropertyArray::kHashMask;
+ static const int kHashMask = PropertyArray::HashField::kMask;
// Layout description.
static const int kPropertiesOrHashOffset = HeapObject::kHeaderSize;
@@ -2193,6 +2191,8 @@ class JSReceiver: public HeapObject {
bool HasProxyInPrototype(Isolate* isolate);
+ bool HasComplexElements();
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
};
@@ -3354,28 +3354,29 @@ class BytecodeArray : public FixedArrayBase {
inline void clear_padding();
// Layout description.
- static const int kConstantPoolOffset = FixedArrayBase::kHeaderSize;
- static const int kHandlerTableOffset = kConstantPoolOffset + kPointerSize;
- static const int kSourcePositionTableOffset =
- kHandlerTableOffset + kPointerSize;
- static const int kFrameSizeOffset = kSourcePositionTableOffset + kPointerSize;
- static const int kParameterSizeOffset = kFrameSizeOffset + kIntSize;
- static const int kIncomingNewTargetOrGeneratorRegisterOffset =
- kParameterSizeOffset + kIntSize;
- static const int kInterruptBudgetOffset =
- kIncomingNewTargetOrGeneratorRegisterOffset + kIntSize;
- static const int kOSRNestingLevelOffset = kInterruptBudgetOffset + kIntSize;
- static const int kBytecodeAgeOffset = kOSRNestingLevelOffset + kCharSize;
- static const int kHeaderSize = kBytecodeAgeOffset + kCharSize;
+#define BYTECODE_ARRAY_FIELDS(V) \
+ /* Pointer fields. */ \
+ V(kConstantPoolOffset, kPointerSize) \
+ V(kHandlerTableOffset, kPointerSize) \
+ V(kSourcePositionTableOffset, kPointerSize) \
+ V(kFrameSizeOffset, kIntSize) \
+ V(kParameterSizeOffset, kIntSize) \
+ V(kIncomingNewTargetOrGeneratorRegisterOffset, kIntSize) \
+ V(kInterruptBudgetOffset, kIntSize) \
+ V(kOSRNestingLevelOffset, kCharSize) \
+ V(kBytecodeAgeOffset, kCharSize) \
+ /* Total size. */ \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize,
+ BYTECODE_ARRAY_FIELDS)
+#undef BYTECODE_ARRAY_FIELDS
// Maximal memory consumption for a single BytecodeArray.
static const int kMaxSize = 512 * MB;
// Maximal length of a single BytecodeArray.
static const int kMaxLength = kMaxSize - kHeaderSize;
- static const int kPointerFieldsBeginOffset = kConstantPoolOffset;
- static const int kPointerFieldsEndOffset = kFrameSizeOffset;
-
class BodyDescriptor;
// No weak fields.
typedef BodyDescriptor BodyDescriptorWeak;
@@ -3644,12 +3645,10 @@ class Code: public HeapObject {
// cache state, and arguments count.
typedef uint32_t Flags;
-#define NON_IC_KIND_LIST(V) \
- V(FUNCTION) \
+#define CODE_KIND_LIST(V) \
V(OPTIMIZED_FUNCTION) \
V(BYTECODE_HANDLER) \
V(STUB) \
- V(HANDLER) \
V(BUILTIN) \
V(REGEXP) \
V(WASM_FUNCTION) \
@@ -3658,18 +3657,6 @@ class Code: public HeapObject {
V(WASM_INTERPRETER_ENTRY) \
V(C_WASM_ENTRY)
-#define IC_KIND_LIST(V) \
- V(LOAD_IC) \
- V(LOAD_GLOBAL_IC) \
- V(KEYED_LOAD_IC) \
- V(STORE_IC) \
- V(STORE_GLOBAL_IC) \
- V(KEYED_STORE_IC)
-
-#define CODE_KIND_LIST(V) \
- NON_IC_KIND_LIST(V) \
- IC_KIND_LIST(V)
-
enum Kind {
#define DEFINE_CODE_KIND_ENUM(name) name,
CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
@@ -3679,13 +3666,9 @@ class Code: public HeapObject {
static const char* Kind2String(Kind kind);
- static const int kPrologueOffsetNotSet = -1;
-
#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
// Printing
static const char* ICState2String(InlineCacheState state);
- static void PrintExtraICState(std::ostream& os, // NOLINT
- Kind kind, ExtraICState extra);
#endif // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
#ifdef ENABLE_DISASSEMBLER
@@ -3719,12 +3702,10 @@ class Code: public HeapObject {
// [raw_type_feedback_info]: This field stores various things, depending on
// the kind of the code object.
- // FUNCTION => type feedback information.
// STUB and ICs => major/minor key as Smi.
+ // TODO(mvstanton): rename raw_type_feedback_info to stub_key, since the
+ // field is no longer overloaded.
DECL_ACCESSORS(raw_type_feedback_info, Object)
- inline Object* type_feedback_info() const;
- inline void set_type_feedback_info(
- Object* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline uint32_t stub_key() const;
inline void set_stub_key(uint32_t key);
@@ -3732,11 +3713,6 @@ class Code: public HeapObject {
// Note that storage for this field is overlapped with typefeedback_info.
DECL_ACCESSORS(next_code_link, Object)
- // [prologue_offset]: Offset of the function prologue, used for aging
- // FUNCTIONs and OPTIMIZED_FUNCTIONs.
- inline int prologue_offset() const;
- inline void set_prologue_offset(int offset);
-
// [constant_pool offset]: Offset of the constant pool.
// Valid for FLAG_enable_embedded_constant_pool only
inline int constant_pool_offset() const;
@@ -3747,22 +3723,14 @@ class Code: public HeapObject {
inline int relocation_size() const;
- // [flags]: Various code flags.
- inline Flags flags() const;
- inline void set_flags(Flags flags);
-
- // [flags]: Access to specific code flags.
+ // [kind]: Access to specific code kind.
inline Kind kind() const;
+ inline void set_kind(Kind kind);
- // Testers for IC stub kinds.
- inline bool is_inline_cache_stub() const;
- inline bool is_handler() const;
inline bool is_stub() const;
inline bool is_optimized_code() const;
inline bool is_wasm_code() const;
- inline bool IsCodeStubOrIC() const;
-
inline void set_raw_kind_specific_flags1(int value);
inline void set_raw_kind_specific_flags2(int value);
@@ -3794,24 +3762,12 @@ class Code: public HeapObject {
inline bool is_construct_stub() const;
inline void set_is_construct_stub(bool value);
- // [has_reloc_info_for_serialization]: For FUNCTION kind, tells if its
- // reloc info includes runtime and external references to support
- // serialization/deserialization.
- inline bool has_reloc_info_for_serialization() const;
- inline void set_has_reloc_info_for_serialization(bool value);
-
- // [allow_osr_at_loop_nesting_level]: For FUNCTION kind, tells for
- // how long the function has been marked for OSR and therefore which
- // level of loop nesting we are willing to do on-stack replacement
- // for.
- inline void set_allow_osr_at_loop_nesting_level(int level);
- inline int allow_osr_at_loop_nesting_level() const;
-
// [builtin_index]: For builtins, tells which builtin index the code object
- // has. Note that builtins can have a code kind other than BUILTIN. The
- // builtin index is a non-negative integer for builtins, and -1 otherwise.
+ // has. The builtin index is a non-negative integer for builtins, and -1
+ // otherwise.
inline int builtin_index() const;
inline void set_builtin_index(int id);
+ inline bool is_builtin() const;
// [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots
// reserved in the code prologue.
@@ -3823,13 +3779,6 @@ class Code: public HeapObject {
inline unsigned safepoint_table_offset() const;
inline void set_safepoint_table_offset(unsigned offset);
- // [back_edge_table_start]: For kind FUNCTION, the offset in the
- // instruction stream where the back edge table starts.
- inline unsigned back_edge_table_offset() const;
- inline void set_back_edge_table_offset(unsigned offset);
-
- inline bool back_edges_patched_for_osr() const;
-
// [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
// the code is going to be deoptimized because of dead embedded maps.
inline bool marked_for_deoptimization() const;
@@ -3858,23 +3807,6 @@ class Code: public HeapObject {
// Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Address pc);
- // Find an object in a stub with a specified map
- Object* FindNthObject(int n, Map* match_map);
-
- // Find the first allocation site in an IC stub.
- AllocationSite* FindFirstAllocationSite();
-
- // Find the first map in an IC stub.
- Map* FindFirstMap();
-
- // For each (map-to-find, object-to-replace) pair in the pattern, this
- // function replaces the corresponding placeholder in the code with the
- // object-to-replace. The function assumes that pairs in the pattern come in
- // the same order as the placeholders in the code.
- // If the placeholder is a weak cell, then the value of weak cell is matched
- // against the map-to-find.
- void FindAndReplace(const FindAndReplacePattern& pattern);
-
// The entire code object including its header is copied verbatim to the
// snapshot so that it can be written in one, fast, memcpy during
// deserialization. The deserializer will overwrite some pointers, rather
@@ -3887,15 +3819,9 @@ class Code: public HeapObject {
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic.
inline void clear_padding();
-
- // Flags operations.
- static inline Flags ComputeFlags(
- Kind kind, ExtraICState extra_ic_state = kNoExtraICState);
-
- static inline Flags ComputeHandlerFlags(Kind handler_kind);
-
- static inline Kind ExtractKindFromFlags(Flags flags);
- static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
+ // Initialize the flags field. Similar to clear_padding above this ensure that
+ // the snapshot content is deterministic.
+ inline void initialize_flags(Kind kind);
// Convert a target address into a code object.
static inline Code* GetCodeFromTargetAddress(Address address);
@@ -4008,7 +3934,6 @@ class Code: public HeapObject {
#ifdef DEBUG
enum VerifyMode { kNoContextSpecificPointers, kNoContextRetainingPointers };
void VerifyEmbeddedObjects(VerifyMode mode = kNoContextRetainingPointers);
- static void VerifyRecompiledCode(Code* old_code, Code* new_code);
#endif // DEBUG
inline bool CanContainWeakObjects();
@@ -4020,6 +3945,23 @@ class Code: public HeapObject {
static Handle<WeakCell> WeakCellFor(Handle<Code> code);
WeakCell* CachedWeakCell();
+ // Return true if the function is inlined in the code.
+ bool Inlines(SharedFunctionInfo* sfi);
+
+ class OptimizedCodeIterator {
+ public:
+ explicit OptimizedCodeIterator(Isolate* isolate);
+ Code* Next();
+
+ private:
+ Context* next_context_;
+ Code* current_code_;
+ Isolate* isolate_;
+
+ DisallowHeapAllocation no_gc;
+ DISALLOW_COPY_AND_ASSIGN(OptimizedCodeIterator)
+ };
+
static const int kConstantPoolSize =
FLAG_enable_embedded_constant_pool ? kIntSize : 0;
@@ -4039,22 +3981,22 @@ class Code: public HeapObject {
static const int kKindSpecificFlags1Offset = kFlagsOffset + kIntSize;
static const int kKindSpecificFlags2Offset =
kKindSpecificFlags1Offset + kIntSize;
- // Note: We might be able to squeeze this into the flags above.
- static const int kPrologueOffset = kKindSpecificFlags2Offset + kIntSize;
- static const int kConstantPoolOffset = kPrologueOffset + kIntSize;
+ static const int kConstantPoolOffset = kKindSpecificFlags2Offset + kIntSize;
static const int kBuiltinIndexOffset =
kConstantPoolOffset + kConstantPoolSize;
static const int kTrapHandlerIndex = kBuiltinIndexOffset + kIntSize;
static const int kHeaderPaddingStart = kTrapHandlerIndex + kPointerSize;
- enum TrapFields { kTrapCodeOffset, kTrapLandingOffset, kTrapDataSize };
-
-
// Add padding to align the instruction start following right after
// the Code object header.
static const int kHeaderSize =
(kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
+ // Data or code not directly visited by GC directly starts here.
+ // The serializer needs to copy bytes starting from here verbatim.
+ // Objects embedded into code is visited via reloc info.
+ static const int kDataStart = kInstructionSizeOffset;
+
inline int GetUnwindingInfoSizeOffset() const;
class BodyDescriptor;
@@ -4063,16 +4005,6 @@ class Code: public HeapObject {
class HasUnwindingInfoField : public BitField<bool, 0, 1> {};
class KindField : public BitField<Kind, HasUnwindingInfoField::kNext, 5> {};
STATIC_ASSERT(NUMBER_OF_KINDS <= KindField::kMax);
- class ExtraICStateField
- : public BitField<ExtraICState, KindField::kNext,
- PlatformSmiTagging::kSmiValueSize - KindField::kNext> {
- };
-
- // KindSpecificFlags1 layout (FUNCTION)
- static const int kFullCodeFlags = kKindSpecificFlags1Offset;
- static const int kFullCodeFlagsHasRelocInfoForSerialization = 0;
- class FullCodeFlagsHasRelocInfoForSerialization
- : public BitField<bool, kFullCodeFlagsHasRelocInfoForSerialization, 1> {};
// KindSpecificFlags1 layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
static const int kStackSlotsFirstBit = 0;
@@ -4123,12 +4055,6 @@ class Code: public HeapObject {
kSafepointTableOffsetFirstBit,
kSafepointTableOffsetBitCount> {}; // NOLINT
- // KindSpecificFlags2 layout (FUNCTION)
- class BackEdgeTableOffsetField
- : public BitField<int, kHasTaggedStackBit + 1, 27> {}; // NOLINT
- class AllowOSRAtLoopNestingLevelField
- : public BitField<int, kHasTaggedStackBit + 1 + 27, 4> {}; // NOLINT
-
static const int kArgumentsBits = 16;
static const int kMaxArguments = (1 << kArgumentsBits) - 1;
@@ -4197,8 +4123,6 @@ class AbstractCode : public HeapObject {
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
static const int kMaxLoopNestingMarker = 6;
- STATIC_ASSERT(Code::AllowOSRAtLoopNestingLevelField::kMax >=
- kMaxLoopNestingMarker);
};
// Dependent code is a singly linked list of fixed arrays. Each array contains
@@ -4329,6 +4253,25 @@ class Struct: public HeapObject {
void BriefPrintDetails(std::ostream& os);
};
+class PromiseCapability : public Struct {
+ public:
+ DECL_CAST(PromiseCapability)
+ DECL_PRINTER(PromiseCapability)
+ DECL_VERIFIER(PromiseCapability)
+
+ DECL_ACCESSORS(promise, Object)
+ DECL_ACCESSORS(resolve, Object)
+ DECL_ACCESSORS(reject, Object)
+
+ static const int kPromiseOffset = Struct::kHeaderSize;
+ static const int kResolveOffset = kPromiseOffset + kPointerSize;
+ static const int kRejectOffset = kResolveOffset + kPointerSize;
+ static const int kSize = kRejectOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseCapability);
+};
+
// A container struct to hold state required for PromiseResolveThenableJob.
class PromiseResolveThenableJobInfo : public Struct {
public:
@@ -4579,6 +4522,7 @@ class ContextExtension : public Struct {
V(Function.prototype, call, FunctionCall) \
V(Object, assign, ObjectAssign) \
V(Object, create, ObjectCreate) \
+ V(Object, is, ObjectIs) \
V(Object.prototype, hasOwnProperty, ObjectHasOwnProperty) \
V(Object.prototype, isPrototypeOf, ObjectIsPrototypeOf) \
V(Object.prototype, toString, ObjectToString) \
@@ -4696,6 +4640,7 @@ enum BuiltinFunctionId {
kMathPowHalf,
// These are manually assigned to special getters during bootstrapping.
kArrayBufferByteLength,
+ kArrayBufferIsView,
kArrayEntries,
kArrayKeys,
kArrayValues,
@@ -4721,6 +4666,7 @@ enum BuiltinFunctionId {
kTypedArrayEntries,
kTypedArrayKeys,
kTypedArrayLength,
+ kTypedArrayToStringTag,
kTypedArrayValues,
kSharedArrayBufferByteLength,
kStringIterator,
@@ -4834,6 +4780,8 @@ class JSBoundFunction : public JSObject {
static MaybeHandle<String> GetName(Isolate* isolate,
Handle<JSBoundFunction> function);
+ static Maybe<int> GetLength(Isolate* isolate,
+ Handle<JSBoundFunction> function);
static MaybeHandle<Context> GetFunctionRealm(
Handle<JSBoundFunction> function);
@@ -4879,8 +4827,7 @@ class JSFunction: public JSObject {
inline Context* native_context();
static Handle<Object> GetName(Isolate* isolate, Handle<JSFunction> function);
- static MaybeHandle<Smi> GetLength(Isolate* isolate,
- Handle<JSFunction> function);
+ static Maybe<int> GetLength(Isolate* isolate, Handle<JSFunction> function);
static Handle<Context> GetFunctionRealm(Handle<JSFunction> function);
// [code]: The generated code object for this function. Executed
@@ -4890,15 +4837,11 @@ class JSFunction: public JSObject {
inline Code* code();
inline void set_code(Code* code);
inline void set_code_no_write_barrier(Code* code);
- inline void ReplaceCode(Code* code);
// Get the abstract code associated with the function, which will either be
// a Code object or a BytecodeArray.
inline AbstractCode* abstract_code();
- // Tells whether this function inlines the given shared function info.
- bool Inlines(SharedFunctionInfo* candidate);
-
// Tells whether or not this function is interpreted.
//
// Note: function->IsInterpreted() does not necessarily return the same value
@@ -4994,11 +4937,6 @@ class JSFunction: public JSObject {
// Returns if this function has been compiled to native code yet.
inline bool is_compiled();
- // [next_function_link]: Links functions into various lists, e.g. the list
- // of optimized functions hanging off the native_context. Treated weakly
- // by the garbage collector.
- DECL_ACCESSORS(next_function_link, Object)
-
// Prints the name of the function using PrintF.
void PrintName(FILE* out = stdout);
@@ -5014,13 +4952,8 @@ class JSFunction: public JSObject {
int requested_in_object_properties,
int* instance_size,
int* in_object_properties);
- enum BodyVisitingPolicy { kIgnoreWeakness, kRespectWeakness };
- // Iterates the function object according to the visiting policy.
- template <BodyVisitingPolicy>
- class BodyDescriptorImpl;
- typedef BodyDescriptorImpl<kIgnoreWeakness> BodyDescriptor;
- typedef BodyDescriptorImpl<kRespectWeakness> BodyDescriptorWeak;
+ class BodyDescriptor;
// Dispatched behavior.
DECL_PRINTER(JSFunction)
@@ -5054,9 +4987,7 @@ class JSFunction: public JSObject {
static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
static const int kFeedbackVectorOffset = kContextOffset + kPointerSize;
static const int kCodeOffset = kFeedbackVectorOffset + kPointerSize;
- static const int kNonWeakFieldsEndOffset = kCodeOffset + kPointerSize;
- static const int kNextFunctionLinkOffset = kNonWeakFieldsEndOffset;
- static const int kSize = kNextFunctionLinkOffset + kPointerSize;
+ static const int kSize = kCodeOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
@@ -5290,6 +5221,8 @@ class JSMessageObject: public JSObject {
inline int end_position() const;
inline void set_end_position(int value);
+ // Returns the line number for the error message (1-based), or
+ // Message::kNoLineNumberInfo if the line cannot be determined.
int GetLineNumber() const;
// Returns the offset of the given position within the containing line.
@@ -5325,36 +5258,6 @@ class JSMessageObject: public JSObject {
typedef BodyDescriptor BodyDescriptorWeak;
};
-class JSPromise;
-
-// TODO(caitp): Make this a Struct once properties are no longer accessed from
-// JS
-class JSPromiseCapability : public JSObject {
- public:
- DECL_CAST(JSPromiseCapability)
-
- DECL_VERIFIER(JSPromiseCapability)
-
- DECL_ACCESSORS(promise, Object)
- DECL_ACCESSORS(resolve, Object)
- DECL_ACCESSORS(reject, Object)
-
- static const int kPromiseOffset = JSObject::kHeaderSize;
- static const int kResolveOffset = kPromiseOffset + kPointerSize;
- static const int kRejectOffset = kResolveOffset + kPointerSize;
- static const int kSize = kRejectOffset + kPointerSize;
-
- enum InObjectPropertyIndex {
- kPromiseIndex,
- kResolveIndex,
- kRejectIndex,
- kInObjectPropertyCount // Dummy.
- };
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSPromiseCapability);
-};
-
class JSPromise : public JSObject {
public:
DECL_ACCESSORS(result, Object)
@@ -5782,18 +5685,6 @@ class Relocatable BASE_EMBEDDED {
Relocatable* prev_;
};
-template <typename T>
-class VectorIterator {
- public:
- VectorIterator(T* d, int l) : data_(Vector<const T>(d, l)), index_(0) { }
- explicit VectorIterator(Vector<const T> data) : data_(data), index_(0) { }
- T GetNext() { return data_[index_++]; }
- bool has_more() { return index_ < data_.length(); }
- private:
- Vector<const T> data_;
- int index_;
-};
-
// The Oddball describes objects null, undefined, true, and false.
class Oddball: public HeapObject {
@@ -6049,10 +5940,13 @@ class JSProxy: public JSReceiver {
Isolate* isolate, Handle<JSProxy> proxy, Handle<Name> name,
Handle<Object> receiver, bool* was_found);
- static MaybeHandle<Object> CheckGetTrapResult(Isolate* isolate,
- Handle<Name> name,
- Handle<JSReceiver> target,
- Handle<Object> trap_result);
+ enum AccessKind { kGet, kSet };
+
+ static MaybeHandle<Object> CheckGetSetTrapResult(Isolate* isolate,
+ Handle<Name> name,
+ Handle<JSReceiver> target,
+ Handle<Object> trap_result,
+ AccessKind access_kind);
// ES6 9.5.9
MUST_USE_RESULT static Maybe<bool> SetProperty(Handle<JSProxy> proxy,
@@ -6487,7 +6381,7 @@ class JSTypedArray: public JSArrayBufferView {
DECL_PRINTER(JSTypedArray)
DECL_VERIFIER(JSTypedArray)
- static const int kLengthOffset = kViewSize + kPointerSize;
+ static const int kLengthOffset = kViewSize;
static const int kSize = kLengthOffset + kPointerSize;
static const int kSizeWithEmbedderFields =
@@ -6627,8 +6521,8 @@ class JSArray: public JSObject {
static const int kInitialMaxFastElementArray =
(kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - kSize -
- AllocationMemento::kSize) /
- kPointerSize;
+ AllocationMemento::kSize) >>
+ kDoubleSizeLog2;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
diff --git a/deps/v8/src/objects/bigint-inl.h b/deps/v8/src/objects/bigint-inl.h
new file mode 100644
index 0000000000..c22620176e
--- /dev/null
+++ b/deps/v8/src/objects/bigint-inl.h
@@ -0,0 +1,56 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_BIGINT_INL_H_
+#define V8_OBJECTS_BIGINT_INL_H_
+
+#include "src/objects/bigint.h"
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+int BigInt::length() const {
+ intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
+ return LengthBits::decode(static_cast<uint32_t>(bitfield));
+}
+void BigInt::set_length(int new_length) {
+ intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
+ bitfield = LengthBits::update(static_cast<uint32_t>(bitfield), new_length);
+ WRITE_INTPTR_FIELD(this, kBitfieldOffset, bitfield);
+}
+
+bool BigInt::sign() const {
+ intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
+ return SignBits::decode(static_cast<uint32_t>(bitfield));
+}
+void BigInt::set_sign(bool new_sign) {
+ intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
+ bitfield = SignBits::update(static_cast<uint32_t>(bitfield), new_sign);
+ WRITE_INTPTR_FIELD(this, kBitfieldOffset, bitfield);
+}
+
+BigInt::digit_t BigInt::digit(int n) const {
+ SLOW_DCHECK(0 <= n && n < length());
+ const byte* address = FIELD_ADDR_CONST(this, kDigitsOffset + n * kDigitSize);
+ return *reinterpret_cast<digit_t*>(reinterpret_cast<intptr_t>(address));
+}
+void BigInt::set_digit(int n, digit_t value) {
+ SLOW_DCHECK(0 <= n && n < length());
+ byte* address = FIELD_ADDR(this, kDigitsOffset + n * kDigitSize);
+ (*reinterpret_cast<digit_t*>(reinterpret_cast<intptr_t>(address))) = value;
+}
+
+TYPE_CHECKER(BigInt, BIGINT_TYPE)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_BIGINT_INL_H_
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
new file mode 100644
index 0000000000..e6fe89dbf1
--- /dev/null
+++ b/deps/v8/src/objects/bigint.cc
@@ -0,0 +1,1346 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Parts of the implementation below:
+
+// Copyright (c) 2014 the Dart project authors. Please see the AUTHORS file [1]
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file [2].
+//
+// [1] https://github.com/dart-lang/sdk/blob/master/AUTHORS
+// [2] https://github.com/dart-lang/sdk/blob/master/LICENSE
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file [3].
+//
+// [3] https://golang.org/LICENSE
+
+#include "src/objects/bigint.h"
+
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<BigInt> BigInt::UnaryMinus(Handle<BigInt> x) {
+ // Special case: There is no -0n.
+ if (x->is_zero()) {
+ return x;
+ }
+ Handle<BigInt> result = BigInt::Copy(x);
+ result->set_sign(!x->sign());
+ return result;
+}
+
+Handle<BigInt> BigInt::BitwiseNot(Handle<BigInt> x) {
+ UNIMPLEMENTED(); // TODO(jkummerow): Implement.
+}
+
+MaybeHandle<BigInt> BigInt::Exponentiate(Handle<BigInt> base,
+ Handle<BigInt> exponent) {
+ UNIMPLEMENTED(); // TODO(jkummerow): Implement.
+}
+
+Handle<BigInt> BigInt::Multiply(Handle<BigInt> x, Handle<BigInt> y) {
+ if (x->is_zero()) return x;
+ if (y->is_zero()) return y;
+ Handle<BigInt> result =
+ x->GetIsolate()->factory()->NewBigInt(x->length() + y->length());
+ for (int i = 0; i < x->length(); i++) {
+ MultiplyAccumulate(y, x->digit(i), result, i);
+ }
+ result->set_sign(x->sign() != y->sign());
+ result->RightTrim();
+ return result;
+}
+
+MaybeHandle<BigInt> BigInt::Divide(Handle<BigInt> x, Handle<BigInt> y) {
+ // 1. If y is 0n, throw a RangeError exception.
+ if (y->is_zero()) {
+ THROW_NEW_ERROR(y->GetIsolate(),
+ NewRangeError(MessageTemplate::kBigIntDivZero), BigInt);
+ }
+ // 2. Let quotient be the mathematical value of x divided by y.
+ // 3. Return a BigInt representing quotient rounded towards 0 to the next
+ // integral value.
+ if (AbsoluteCompare(x, y) < 0) {
+ // TODO(jkummerow): Consider caching a canonical zero-BigInt.
+ return x->GetIsolate()->factory()->NewBigIntFromInt(0);
+ }
+ Handle<BigInt> quotient;
+ if (y->length() == 1) {
+ digit_t remainder;
+ AbsoluteDivSmall(x, y->digit(0), &quotient, &remainder);
+ } else {
+ AbsoluteDivLarge(x, y, &quotient, nullptr);
+ }
+ quotient->set_sign(x->sign() != y->sign());
+ quotient->RightTrim();
+ return quotient;
+}
+
+MaybeHandle<BigInt> BigInt::Remainder(Handle<BigInt> x, Handle<BigInt> y) {
+ // 1. If y is 0n, throw a RangeError exception.
+ if (y->is_zero()) {
+ THROW_NEW_ERROR(y->GetIsolate(),
+ NewRangeError(MessageTemplate::kBigIntDivZero), BigInt);
+ }
+ // 2. Return the BigInt representing x modulo y.
+ // See https://github.com/tc39/proposal-bigint/issues/84 though.
+ if (AbsoluteCompare(x, y) < 0) return x;
+ Handle<BigInt> remainder;
+ if (y->length() == 1) {
+ digit_t remainder_digit;
+ AbsoluteDivSmall(x, y->digit(0), nullptr, &remainder_digit);
+ if (remainder_digit == 0) {
+ return x->GetIsolate()->factory()->NewBigIntFromInt(0);
+ }
+ remainder = x->GetIsolate()->factory()->NewBigIntRaw(1);
+ remainder->set_digit(0, remainder_digit);
+ } else {
+ AbsoluteDivLarge(x, y, nullptr, &remainder);
+ }
+ remainder->set_sign(x->sign());
+ return remainder;
+}
+
+Handle<BigInt> BigInt::Add(Handle<BigInt> x, Handle<BigInt> y) {
+ bool xsign = x->sign();
+ if (xsign == y->sign()) {
+ // x + y == x + y
+ // -x + -y == -(x + y)
+ return AbsoluteAdd(x, y, xsign);
+ }
+ // x + -y == x - y == -(y - x)
+ // -x + y == y - x == -(x - y)
+ if (AbsoluteCompare(x, y) >= 0) {
+ return AbsoluteSub(x, y, xsign);
+ }
+ return AbsoluteSub(y, x, !xsign);
+}
+
+Handle<BigInt> BigInt::Subtract(Handle<BigInt> x, Handle<BigInt> y) {
+ bool xsign = x->sign();
+ if (xsign != y->sign()) {
+ // x - (-y) == x + y
+ // (-x) - y == -(x + y)
+ return AbsoluteAdd(x, y, xsign);
+ }
+ // x - y == -(y - x)
+ // (-x) - (-y) == y - x == -(x - y)
+ if (AbsoluteCompare(x, y) >= 0) {
+ return AbsoluteSub(x, y, xsign);
+ }
+ return AbsoluteSub(y, x, !xsign);
+}
+
+MaybeHandle<BigInt> BigInt::LeftShift(Handle<BigInt> x, Handle<BigInt> y) {
+ if (y->is_zero() || x->is_zero()) return x;
+ if (y->sign()) return RightShiftByAbsolute(x, y);
+ return LeftShiftByAbsolute(x, y);
+}
+
+MaybeHandle<BigInt> BigInt::SignedRightShift(Handle<BigInt> x,
+ Handle<BigInt> y) {
+ if (y->is_zero() || x->is_zero()) return x;
+ if (y->sign()) return LeftShiftByAbsolute(x, y);
+ return RightShiftByAbsolute(x, y);
+}
+
+MaybeHandle<BigInt> BigInt::UnsignedRightShift(Handle<BigInt> x,
+ Handle<BigInt> y) {
+ THROW_NEW_ERROR(x->GetIsolate(), NewTypeError(MessageTemplate::kBigIntShr),
+ BigInt);
+}
+
+bool BigInt::LessThan(Handle<BigInt> x, Handle<BigInt> y) {
+ UNIMPLEMENTED(); // TODO(jkummerow): Implement.
+}
+
+bool BigInt::Equal(BigInt* x, BigInt* y) {
+ if (x->sign() != y->sign()) return false;
+ if (x->length() != y->length()) return false;
+ for (int i = 0; i < x->length(); i++) {
+ if (x->digit(i) != y->digit(i)) return false;
+ }
+ return true;
+}
+
+Handle<BigInt> BigInt::BitwiseAnd(Handle<BigInt> x, Handle<BigInt> y) {
+ Handle<BigInt> result;
+ if (!x->sign() && !y->sign()) {
+ result = AbsoluteAnd(x, y);
+ } else if (x->sign() && y->sign()) {
+ int result_length = Max(x->length(), y->length()) + 1;
+ // (-x) & (-y) == ~(x-1) & ~(y-1) == ~((x-1) | (y-1))
+ // == -(((x-1) | (y-1)) + 1)
+ result = AbsoluteSubOne(x, result_length);
+ result = AbsoluteOr(result, AbsoluteSubOne(y, y->length()), *result);
+ result = AbsoluteAddOne(result, true, *result);
+ } else {
+ DCHECK(x->sign() != y->sign());
+ // Assume that x is the positive BigInt.
+ if (x->sign()) std::swap(x, y);
+ // x & (-y) == x & ~(y-1) == x &~ (y-1)
+ result = AbsoluteAndNot(x, AbsoluteSubOne(y, y->length()));
+ }
+ result->RightTrim();
+ return result;
+}
+
+Handle<BigInt> BigInt::BitwiseXor(Handle<BigInt> x, Handle<BigInt> y) {
+ Handle<BigInt> result;
+ if (!x->sign() && !y->sign()) {
+ result = AbsoluteXor(x, y);
+ } else if (x->sign() && y->sign()) {
+ int result_length = Max(x->length(), y->length());
+ // (-x) ^ (-y) == ~(x-1) ^ ~(y-1) == (x-1) ^ (y-1)
+ result = AbsoluteSubOne(x, result_length);
+ result = AbsoluteXor(result, AbsoluteSubOne(y, y->length()), *result);
+ } else {
+ DCHECK(x->sign() != y->sign());
+ int result_length = Max(x->length(), y->length()) + 1;
+ // Assume that x is the positive BigInt.
+ if (x->sign()) std::swap(x, y);
+ // x ^ (-y) == x ^ ~(y-1) == ~(x ^ (y-1)) == -((x ^ (y-1)) + 1)
+ result = AbsoluteSubOne(y, result_length);
+ result = AbsoluteXor(result, x, *result);
+ result = AbsoluteAddOne(result, true, *result);
+ }
+ result->RightTrim();
+ return result;
+}
+
+Handle<BigInt> BigInt::BitwiseOr(Handle<BigInt> x, Handle<BigInt> y) {
+ Handle<BigInt> result;
+ int result_length = Max(x->length(), y->length());
+ if (!x->sign() && !y->sign()) {
+ result = AbsoluteOr(x, y);
+ } else if (x->sign() && y->sign()) {
+ // (-x) | (-y) == ~(x-1) | ~(y-1) == ~((x-1) & (y-1))
+ // == -(((x-1) & (y-1)) + 1)
+ result = AbsoluteSubOne(x, result_length);
+ result = AbsoluteAnd(result, AbsoluteSubOne(y, y->length()), *result);
+ result = AbsoluteAddOne(result, true, *result);
+ } else {
+ DCHECK(x->sign() != y->sign());
+ // Assume that x is the positive BigInt.
+ if (x->sign()) std::swap(x, y);
+ // x | (-y) == x | ~(y-1) == ~((y-1) &~ x) == -(((y-1) &~ x) + 1)
+ result = AbsoluteSubOne(y, result_length);
+ result = AbsoluteAndNot(result, x, *result);
+ result = AbsoluteAddOne(result, true, *result);
+ }
+ result->RightTrim();
+ return result;
+}
+
+MaybeHandle<String> BigInt::ToString(Handle<BigInt> bigint, int radix) {
+ Isolate* isolate = bigint->GetIsolate();
+ if (bigint->is_zero()) {
+ return isolate->factory()->NewStringFromStaticChars("0");
+ }
+ if (base::bits::IsPowerOfTwo(radix)) {
+ return ToStringBasePowerOfTwo(bigint, radix);
+ }
+ return ToStringGeneric(bigint, radix);
+}
+
+void BigInt::Initialize(int length, bool zero_initialize) {
+ set_length(length);
+ set_sign(false);
+ if (zero_initialize) {
+ memset(reinterpret_cast<void*>(reinterpret_cast<Address>(this) +
+ kDigitsOffset - kHeapObjectTag),
+ 0, length * kDigitSize);
+#if DEBUG
+ } else {
+ memset(reinterpret_cast<void*>(reinterpret_cast<Address>(this) +
+ kDigitsOffset - kHeapObjectTag),
+ 0xbf, length * kDigitSize);
+#endif
+ }
+}
+
+void BigInt::BigIntShortPrint(std::ostream& os) {
+ if (sign()) os << "-";
+ int len = length();
+ if (len == 0) {
+ os << "0";
+ return;
+ }
+ if (len > 1) os << "...";
+ os << digit(0);
+}
+
+// Private helpers for public methods.
+
+Handle<BigInt> BigInt::AbsoluteAdd(Handle<BigInt> x, Handle<BigInt> y,
+ bool result_sign) {
+ if (x->length() < y->length()) return AbsoluteAdd(y, x, result_sign);
+ if (x->is_zero()) {
+ DCHECK(y->is_zero());
+ return x;
+ }
+ if (y->is_zero()) {
+ return result_sign == x->sign() ? x : UnaryMinus(x);
+ }
+ Handle<BigInt> result =
+ x->GetIsolate()->factory()->NewBigIntRaw(x->length() + 1);
+ digit_t carry = 0;
+ int i = 0;
+ for (; i < y->length(); i++) {
+ digit_t new_carry = 0;
+ digit_t sum = digit_add(x->digit(i), y->digit(i), &new_carry);
+ sum = digit_add(sum, carry, &new_carry);
+ result->set_digit(i, sum);
+ carry = new_carry;
+ }
+ for (; i < x->length(); i++) {
+ digit_t new_carry = 0;
+ digit_t sum = digit_add(x->digit(i), carry, &new_carry);
+ result->set_digit(i, sum);
+ carry = new_carry;
+ }
+ result->set_digit(i, carry);
+ result->set_sign(result_sign);
+ result->RightTrim();
+ return result;
+}
+
+Handle<BigInt> BigInt::AbsoluteSub(Handle<BigInt> x, Handle<BigInt> y,
+ bool result_sign) {
+ DCHECK(x->length() >= y->length());
+ SLOW_DCHECK(AbsoluteCompare(x, y) >= 0);
+ if (x->is_zero()) {
+ DCHECK(y->is_zero());
+ return x;
+ }
+ if (y->is_zero()) {
+ return result_sign == x->sign() ? x : UnaryMinus(x);
+ }
+ Handle<BigInt> result = x->GetIsolate()->factory()->NewBigIntRaw(x->length());
+ digit_t borrow = 0;
+ int i = 0;
+ for (; i < y->length(); i++) {
+ digit_t new_borrow = 0;
+ digit_t difference = digit_sub(x->digit(i), y->digit(i), &new_borrow);
+ difference = digit_sub(difference, borrow, &new_borrow);
+ result->set_digit(i, difference);
+ borrow = new_borrow;
+ }
+ for (; i < x->length(); i++) {
+ digit_t new_borrow = 0;
+ digit_t difference = digit_sub(x->digit(i), borrow, &new_borrow);
+ result->set_digit(i, difference);
+ borrow = new_borrow;
+ }
+ DCHECK_EQ(0, borrow);
+ result->set_sign(result_sign);
+ result->RightTrim();
+ return result;
+}
+
+// Adds 1 to the absolute value of {x}, stores the result in {result_storage}
+// and sets its sign to {sign}.
+// {result_storage} and {x} may refer to the same BigInt for in-place
+// modification.
+Handle<BigInt> BigInt::AbsoluteAddOne(Handle<BigInt> x, bool sign,
+ BigInt* result_storage) {
+ DCHECK(result_storage != nullptr);
+ int input_length = x->length();
+ int result_length = result_storage->length();
+ Isolate* isolate = x->GetIsolate();
+ Handle<BigInt> result(result_storage, isolate);
+ digit_t carry = 1;
+ for (int i = 0; i < input_length; i++) {
+ digit_t new_carry = 0;
+ result->set_digit(i, digit_add(x->digit(i), carry, &new_carry));
+ carry = new_carry;
+ }
+ if (result_length > input_length) {
+ result->set_digit(input_length, carry);
+ } else {
+ DCHECK(carry == 0);
+ }
+ result->set_sign(sign);
+ return result;
+}
+
+// Subtracts 1 from the absolute value of {x}. {x} must not be zero.
+// Allocates a new BigInt of length {result_length} for the result;
+// {result_length} must be at least as large as {x->length()}.
+Handle<BigInt> BigInt::AbsoluteSubOne(Handle<BigInt> x, int result_length) {
+ DCHECK(!x->is_zero());
+ DCHECK(result_length >= x->length());
+ Handle<BigInt> result =
+ x->GetIsolate()->factory()->NewBigIntRaw(result_length);
+ int length = x->length();
+ digit_t borrow = 1;
+ for (int i = 0; i < length; i++) {
+ digit_t new_borrow = 0;
+ result->set_digit(i, digit_sub(x->digit(i), borrow, &new_borrow));
+ borrow = new_borrow;
+ }
+ DCHECK(borrow == 0);
+ for (int i = length; i < result_length; i++) {
+ result->set_digit(i, borrow);
+ }
+ return result;
+}
+
+// Helper for Absolute{And,AndNot,Or,Xor}.
+// Performs the given binary {op} on digit pairs of {x} and {y}; when the
+// end of the shorter of the two is reached, {extra_digits} configures how
+// remaining digits in the longer input are handled: copied to the result
+// or ignored.
+// If {result_storage} is non-nullptr, it will be used for the result and
+// any extra digits in it will be zeroed out, otherwise a new BigInt (with
+// the same length as the longer input) will be allocated.
+// {result_storage} may alias {x} or {y} for in-place modification.
+// Example:
+// y: [ y2 ][ y1 ][ y0 ]
+// x: [ x3 ][ x2 ][ x1 ][ x0 ]
+// | | | |
+// (kCopy) (op) (op) (op)
+// | | | |
+// v v v v
+// result_storage: [ 0 ][ x3 ][ r2 ][ r1 ][ r0 ]
+inline Handle<BigInt> BigInt::AbsoluteBitwiseOp(
+ Handle<BigInt> x, Handle<BigInt> y, BigInt* result_storage,
+ ExtraDigitsHandling extra_digits,
+ std::function<digit_t(digit_t, digit_t)> op) {
+ int x_length = x->length();
+ int y_length = y->length();
+ if (x_length < y_length) {
+ return AbsoluteBitwiseOp(y, x, result_storage, extra_digits, op);
+ }
+ Isolate* isolate = x->GetIsolate();
+ Handle<BigInt> result(result_storage, isolate);
+ int result_length = extra_digits == kCopy ? x_length : y_length;
+ if (result_storage == nullptr) {
+ result = isolate->factory()->NewBigIntRaw(result_length);
+ } else {
+ DCHECK(result_storage->length() >= result_length);
+ result_length = result_storage->length();
+ }
+ int i = 0;
+ for (; i < y_length; i++) {
+ result->set_digit(i, op(x->digit(i), y->digit(i)));
+ }
+ if (extra_digits == kCopy) {
+ for (; i < x_length; i++) {
+ result->set_digit(i, x->digit(i));
+ }
+ }
+ for (; i < result_length; i++) {
+ result->set_digit(i, 0);
+ }
+ return result;
+}
+
+// If {result_storage} is non-nullptr, it will be used for the result,
+// otherwise a new BigInt of appropriate length will be allocated.
+// {result_storage} may alias {x} or {y} for in-place modification.
+Handle<BigInt> BigInt::AbsoluteAnd(Handle<BigInt> x, Handle<BigInt> y,
+ BigInt* result_storage) {
+ return AbsoluteBitwiseOp(x, y, result_storage, kSkip,
+ [](digit_t a, digit_t b) { return a & b; });
+}
+
+// If {result_storage} is non-nullptr, it will be used for the result,
+// otherwise a new BigInt of appropriate length will be allocated.
+// {result_storage} may alias {x} or {y} for in-place modification.
+Handle<BigInt> BigInt::AbsoluteAndNot(Handle<BigInt> x, Handle<BigInt> y,
+ BigInt* result_storage) {
+ return AbsoluteBitwiseOp(x, y, result_storage, kCopy,
+ [](digit_t a, digit_t b) { return a & ~b; });
+}
+
+// If {result_storage} is non-nullptr, it will be used for the result,
+// otherwise a new BigInt of appropriate length will be allocated.
+// {result_storage} may alias {x} or {y} for in-place modification.
+Handle<BigInt> BigInt::AbsoluteOr(Handle<BigInt> x, Handle<BigInt> y,
+ BigInt* result_storage) {
+ return AbsoluteBitwiseOp(x, y, result_storage, kCopy,
+ [](digit_t a, digit_t b) { return a | b; });
+}
+
+// If {result_storage} is non-nullptr, it will be used for the result,
+// otherwise a new BigInt of appropriate length will be allocated.
+// {result_storage} may alias {x} or {y} for in-place modification.
+Handle<BigInt> BigInt::AbsoluteXor(Handle<BigInt> x, Handle<BigInt> y,
+ BigInt* result_storage) {
+ return AbsoluteBitwiseOp(x, y, result_storage, kCopy,
+ [](digit_t a, digit_t b) { return a ^ b; });
+}
+
+// Returns a positive value if abs(x) > abs(y), a negative value if
+// abs(x) < abs(y), or zero if abs(x) == abs(y).
+int BigInt::AbsoluteCompare(Handle<BigInt> x, Handle<BigInt> y) {
+ int diff = x->length() - y->length();
+ if (diff != 0) return diff;
+ int i = x->length() - 1;
+ while (i >= 0 && x->digit(i) == y->digit(i)) i--;
+ if (i < 0) return 0;
+ return x->digit(i) > y->digit(i) ? 1 : -1;
+}
+
+// Multiplies {multiplicand} with {multiplier} and adds the result to
+// {accumulator}, starting at {accumulator_index} for the least-significant
+// digit.
+// Callers must ensure that {accumulator} is big enough to hold the result.
+void BigInt::MultiplyAccumulate(Handle<BigInt> multiplicand, digit_t multiplier,
+ Handle<BigInt> accumulator,
+ int accumulator_index) {
+ // This is a minimum requirement; the DCHECK in the second loop below
+ // will enforce more as needed.
+ DCHECK(accumulator->length() > multiplicand->length() + accumulator_index);
+ if (multiplier == 0L) return;
+ digit_t carry = 0;
+ digit_t high = 0;
+ for (int i = 0; i < multiplicand->length(); i++, accumulator_index++) {
+ digit_t acc = accumulator->digit(accumulator_index);
+ digit_t new_carry = 0;
+ // Add last round's carryovers.
+ acc = digit_add(acc, high, &new_carry);
+ acc = digit_add(acc, carry, &new_carry);
+ // Compute this round's multiplication.
+ digit_t m_digit = multiplicand->digit(i);
+ digit_t low = digit_mul(multiplier, m_digit, &high);
+ acc = digit_add(acc, low, &new_carry);
+ // Store result and prepare for next round.
+ accumulator->set_digit(accumulator_index, acc);
+ carry = new_carry;
+ }
+ for (; carry != 0 || high != 0; accumulator_index++) {
+ DCHECK(accumulator_index < accumulator->length());
+ digit_t acc = accumulator->digit(accumulator_index);
+ digit_t new_carry = 0;
+ acc = digit_add(acc, high, &new_carry);
+ high = 0;
+ acc = digit_add(acc, carry, &new_carry);
+ accumulator->set_digit(accumulator_index, acc);
+ carry = new_carry;
+ }
+}
+
+// Multiplies {source} with {factor} and adds {summand} to the result.
+// {result} and {source} may be the same BigInt for inplace modification.
+void BigInt::InternalMultiplyAdd(BigInt* source, digit_t factor,
+ digit_t summand, int n, BigInt* result) {
+ DCHECK(source->length() >= n);
+ DCHECK(result->length() >= n);
+ digit_t carry = summand;
+ digit_t high = 0;
+ for (int i = 0; i < n; i++) {
+ digit_t current = source->digit(i);
+ digit_t new_carry = 0;
+ // Compute this round's multiplication.
+ digit_t new_high = 0;
+ current = digit_mul(current, factor, &new_high);
+ // Add last round's carryovers.
+ current = digit_add(current, high, &new_carry);
+ current = digit_add(current, carry, &new_carry);
+ // Store result and prepare for next round.
+ result->set_digit(i, current);
+ carry = new_carry;
+ high = new_high;
+ }
+ if (result->length() > n) {
+ result->set_digit(n++, carry + high);
+ // Current callers don't pass in such large results, but let's be robust.
+ while (n < result->length()) {
+ result->set_digit(n++, 0);
+ }
+ } else {
+ CHECK((carry + high) == 0);
+ }
+}
+
+// Multiplies {this} with {factor} and adds {summand} to the result.
+void BigInt::InplaceMultiplyAdd(uintptr_t factor, uintptr_t summand) {
+ STATIC_ASSERT(sizeof(factor) == sizeof(digit_t));
+ STATIC_ASSERT(sizeof(summand) == sizeof(digit_t));
+ InternalMultiplyAdd(this, factor, summand, length(), this);
+}
+
+// Divides {x} by {divisor}, returning the result in {quotient} and {remainder}.
+// Mathematically, the contract is:
+// quotient = (x - remainder) / divisor, with 0 <= remainder < divisor.
+// If {quotient} is an empty handle, an appropriately sized BigInt will be
+// allocated for it; otherwise the caller must ensure that it is big enough.
+// {quotient} can be the same as {x} for an in-place division. {quotient} can
+// also be nullptr if the caller is only interested in the remainder.
+void BigInt::AbsoluteDivSmall(Handle<BigInt> x, digit_t divisor,
+ Handle<BigInt>* quotient, digit_t* remainder) {
+ DCHECK(divisor != 0);
+ DCHECK(!x->is_zero()); // Callers check anyway, no need to handle this.
+ *remainder = 0;
+ if (divisor == 1) {
+ if (quotient != nullptr) *quotient = x;
+ return;
+ }
+
+ int length = x->length();
+ if (quotient != nullptr) {
+ if ((*quotient).is_null()) {
+ *quotient = x->GetIsolate()->factory()->NewBigIntRaw(length);
+ }
+ for (int i = length - 1; i >= 0; i--) {
+ digit_t q = digit_div(*remainder, x->digit(i), divisor, remainder);
+ (*quotient)->set_digit(i, q);
+ }
+ } else {
+ for (int i = length - 1; i >= 0; i--) {
+ digit_div(*remainder, x->digit(i), divisor, remainder);
+ }
+ }
+}
+
+// Divides {dividend} by {divisor}, returning the result in {quotient} and
+// {remainder}. Mathematically, the contract is:
+// quotient = (dividend - remainder) / divisor, with 0 <= remainder < divisor.
+// Both {quotient} and {remainder} are optional, for callers that are only
+// interested in one of them.
+// See Knuth, Volume 2, section 4.3.1, Algorithm D.
+void BigInt::AbsoluteDivLarge(Handle<BigInt> dividend, Handle<BigInt> divisor,
+ Handle<BigInt>* quotient,
+ Handle<BigInt>* remainder) {
+ DCHECK(divisor->length() >= 2);
+ DCHECK(dividend->length() >= divisor->length());
+ Factory* factory = dividend->GetIsolate()->factory();
+ // The unusual variable names inside this function are consistent with
+ // Knuth's book, as well as with Go's implementation of this algorithm.
+ // Maintaining this consistency is probably more useful than trying to
+ // come up with more descriptive names for them.
+ int n = divisor->length();
+ int m = dividend->length() - n;
+
+ // The quotient to be computed.
+ Handle<BigInt> q;
+ if (quotient != nullptr) q = factory->NewBigIntRaw(m + 1);
+ // In each iteration, {qhatv} holds {divisor} * {current quotient digit}.
+ // "v" is the book's name for {divisor}, "qhat" the current quotient digit.
+ Handle<BigInt> qhatv = factory->NewBigIntRaw(n + 1);
+
+ // D1.
+ // Left-shift inputs so that the divisor's MSB is set. This is necessary
+ // to prevent the digit-wise divisions (see digit_div call below) from
+ // overflowing (they take a two digits wide input, and return a one digit
+ // result).
+ int shift = base::bits::CountLeadingZeros(divisor->digit(n - 1));
+ if (shift > 0) {
+ divisor = SpecialLeftShift(divisor, shift, kSameSizeResult);
+ }
+ // Holds the (continuously updated) remaining part of the dividend, which
+ // eventually becomes the remainder.
+ Handle<BigInt> u = SpecialLeftShift(dividend, shift, kAlwaysAddOneDigit);
+
+ // D2.
+ // Iterate over the dividend's digit (like the "grad school" algorithm).
+ // {vn1} is the divisor's most significant digit.
+ digit_t vn1 = divisor->digit(n - 1);
+ for (int j = m; j >= 0; j--) {
+ // D3.
+ // Estimate the current iteration's quotient digit (see Knuth for details).
+ // {qhat} is the current quotient digit.
+ digit_t qhat = std::numeric_limits<digit_t>::max();
+ // {ujn} is the dividend's most significant remaining digit.
+ digit_t ujn = u->digit(j + n);
+ if (ujn != vn1) {
+ // {rhat} is the current iteration's remainder.
+ digit_t rhat = 0;
+ // Estimate the current quotient digit by dividing the most significant
+ // digits of dividend and divisor. The result will not be too small,
+ // but could be a bit too large.
+ qhat = digit_div(ujn, u->digit(j + n - 1), vn1, &rhat);
+
+ // Decrement the quotient estimate as needed by looking at the next
+ // digit, i.e. by testing whether
+ // qhat * v_{n-2} > (rhat << kDigitBits) + u_{j+n-2}.
+ digit_t vn2 = divisor->digit(n - 2);
+ digit_t ujn2 = u->digit(j + n - 2);
+ while (ProductGreaterThan(qhat, vn2, rhat, ujn2)) {
+ qhat--;
+ digit_t prev_rhat = rhat;
+ rhat += vn1;
+ // v[n-1] >= 0, so this tests for overflow.
+ if (rhat < prev_rhat) break;
+ }
+ }
+
+ // D4.
+ // Multiply the divisor with the current quotient digit, and subtract
+ // it from the dividend. If there was "borrow", then the quotient digit
+ // was one too high, so we must correct it and undo one subtraction of
+ // the (shifted) divisor.
+ InternalMultiplyAdd(*divisor, qhat, 0, n, *qhatv);
+ digit_t c = u->InplaceSub(*qhatv, j);
+ if (c != 0) {
+ c = u->InplaceAdd(*divisor, j);
+ u->set_digit(j + n, u->digit(j + n) + c);
+ qhat--;
+ }
+
+ if (quotient != nullptr) q->set_digit(j, qhat);
+ }
+ if (quotient != nullptr) {
+ *quotient = q; // Caller will right-trim.
+ }
+ if (remainder != nullptr) {
+ u->InplaceRightShift(shift);
+ *remainder = u;
+ }
+}
+
+// Returns whether (factor1 * factor2) > (high << kDigitBits) + low.
+bool BigInt::ProductGreaterThan(digit_t factor1, digit_t factor2, digit_t high,
+ digit_t low) {
+ digit_t result_high;
+ digit_t result_low = digit_mul(factor1, factor2, &result_high);
+ return result_high > high || (result_high == high && result_low > low);
+}
+
+// Adds {summand} onto {this}, starting with {summand}'s 0th digit
+// at {this}'s {start_index}'th digit. Returns the "carry" (0 or 1).
+BigInt::digit_t BigInt::InplaceAdd(BigInt* summand, int start_index) {
+ digit_t carry = 0;
+ int n = summand->length();
+ DCHECK(length() >= start_index + n);
+ for (int i = 0; i < n; i++) {
+ digit_t new_carry = 0;
+ digit_t sum =
+ digit_add(digit(start_index + i), summand->digit(i), &new_carry);
+ sum = digit_add(sum, carry, &new_carry);
+ set_digit(start_index + i, sum);
+ carry = new_carry;
+ }
+ return carry;
+}
+
+// Subtracts {subtrahend} from {this}, starting with {subtrahend}'s 0th digit
+// at {this}'s {start_index}-th digit. Returns the "borrow" (0 or 1).
+BigInt::digit_t BigInt::InplaceSub(BigInt* subtrahend, int start_index) {
+ digit_t borrow = 0;
+ int n = subtrahend->length();
+ DCHECK(length() >= start_index + n);
+ for (int i = 0; i < n; i++) {
+ digit_t new_borrow = 0;
+ digit_t difference =
+ digit_sub(digit(start_index + i), subtrahend->digit(i), &new_borrow);
+ difference = digit_sub(difference, borrow, &new_borrow);
+ set_digit(start_index + i, difference);
+ borrow = new_borrow;
+ }
+ return borrow;
+}
+
+void BigInt::InplaceRightShift(int shift) {
+ DCHECK(shift >= 0);
+ DCHECK(shift < kDigitBits);
+ DCHECK(length() > 0);
+ DCHECK((digit(0) & ((1 << shift) - 1)) == 0);
+ if (shift == 0) return;
+ digit_t carry = digit(0) >> shift;
+ int last = length() - 1;
+ for (int i = 0; i < last; i++) {
+ digit_t d = digit(i + 1);
+ set_digit(i, (d << (kDigitBits - shift)) | carry);
+ carry = d >> shift;
+ }
+ set_digit(last, carry);
+ RightTrim();
+}
+
+// Always copies the input, even when {shift} == 0.
+// {shift} must be less than kDigitBits, {x} must be non-zero.
+Handle<BigInt> BigInt::SpecialLeftShift(Handle<BigInt> x, int shift,
+ SpecialLeftShiftMode mode) {
+ DCHECK(shift >= 0);
+ DCHECK(shift < kDigitBits);
+ DCHECK(x->length() > 0);
+ int n = x->length();
+ int result_length = mode == kAlwaysAddOneDigit ? n + 1 : n;
+ Handle<BigInt> result =
+ x->GetIsolate()->factory()->NewBigIntRaw(result_length);
+ digit_t carry = 0;
+ for (int i = 0; i < n; i++) {
+ digit_t d = x->digit(i);
+ result->set_digit(i, (d << shift) | carry);
+ carry = d >> (kDigitBits - shift);
+ }
+ if (mode == kAlwaysAddOneDigit) {
+ result->set_digit(n, carry);
+ } else {
+ DCHECK(mode == kSameSizeResult);
+ DCHECK(carry == 0);
+ }
+ return result;
+}
+
+MaybeHandle<BigInt> BigInt::LeftShiftByAbsolute(Handle<BigInt> x,
+ Handle<BigInt> y) {
+ Isolate* isolate = x->GetIsolate();
+ Maybe<digit_t> maybe_shift = ToShiftAmount(y);
+ if (maybe_shift.IsNothing()) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
+ BigInt);
+ }
+ digit_t shift = maybe_shift.FromJust();
+ int digit_shift = static_cast<int>(shift / kDigitBits);
+ int bits_shift = static_cast<int>(shift % kDigitBits);
+ int length = x->length();
+ bool grow = bits_shift != 0 &&
+ (x->digit(length - 1) >> (kDigitBits - bits_shift)) != 0;
+ int result_length = length + digit_shift + grow;
+ if (result_length > kMaxLength) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
+ BigInt);
+ }
+ Handle<BigInt> result = isolate->factory()->NewBigIntRaw(result_length);
+ if (bits_shift == 0) {
+ int i = 0;
+ for (; i < digit_shift; i++) result->set_digit(i, 0ul);
+ for (; i < result_length; i++) {
+ result->set_digit(i, x->digit(i - digit_shift));
+ }
+ } else {
+ digit_t carry = 0;
+ for (int i = 0; i < digit_shift; i++) result->set_digit(i, 0ul);
+ for (int i = 0; i < length; i++) {
+ digit_t d = x->digit(i);
+ result->set_digit(i + digit_shift, (d << bits_shift) | carry);
+ carry = d >> (kDigitBits - bits_shift);
+ }
+ if (grow) {
+ result->set_digit(length + digit_shift, carry);
+ } else {
+ DCHECK(carry == 0);
+ }
+ }
+ result->set_sign(x->sign());
+ result->RightTrim();
+ return result;
+}
+
+Handle<BigInt> BigInt::RightShiftByAbsolute(Handle<BigInt> x,
+ Handle<BigInt> y) {
+ Isolate* isolate = x->GetIsolate();
+ int length = x->length();
+ bool sign = x->sign();
+ Maybe<digit_t> maybe_shift = ToShiftAmount(y);
+ if (maybe_shift.IsNothing()) {
+ return RightShiftByMaximum(isolate, sign);
+ }
+ digit_t shift = maybe_shift.FromJust();
+ int digit_shift = static_cast<int>(shift / kDigitBits);
+ int bits_shift = static_cast<int>(shift % kDigitBits);
+ int result_length = length - digit_shift;
+ if (result_length <= 0) {
+ return RightShiftByMaximum(isolate, sign);
+ }
+ // For negative numbers, round down if any bit was shifted out (so that e.g.
+ // -5n >> 1n == -3n and not -2n). Check now whether this will happen and
+ // whether it can cause overflow into a new digit. If we allocate the result
+ // large enough up front, it avoids having to do a second allocation later.
+ bool must_round_down = false;
+ if (sign) {
+ if ((x->digit(digit_shift) & ((1 << bits_shift) - 1)) != 0) {
+ must_round_down = true;
+ } else {
+ for (int i = 0; i < digit_shift; i++) {
+ if (x->digit(i) != 0) {
+ must_round_down = true;
+ break;
+ }
+ }
+ }
+ }
+ // If bits_shift is non-zero, it frees up bits, preventing overflow.
+ if (must_round_down && bits_shift == 0) {
+ // Overflow cannot happen if the most significant digit has unset bits.
+ digit_t msd = x->digit(length - 1);
+ bool rounding_can_overflow = digit_ismax(msd);
+ if (rounding_can_overflow) result_length++;
+ }
+
+ Handle<BigInt> result = isolate->factory()->NewBigIntRaw(result_length);
+ if (bits_shift == 0) {
+ for (int i = digit_shift; i < length; i++) {
+ result->set_digit(i - digit_shift, x->digit(i));
+ }
+ } else {
+ digit_t carry = x->digit(digit_shift) >> bits_shift;
+ int last = length - digit_shift - 1;
+ for (int i = 0; i < last; i++) {
+ digit_t d = x->digit(i + digit_shift + 1);
+ result->set_digit(i, (d << (kDigitBits - bits_shift)) | carry);
+ carry = d >> bits_shift;
+ }
+ result->set_digit(last, carry);
+ }
+
+ if (sign) {
+ result->set_sign(true);
+ if (must_round_down) {
+ // Since the result is negative, rounding down means adding one to
+ // its absolute value.
+ result = AbsoluteAddOne(result, true, *result);
+ }
+ }
+ result->RightTrim();
+ return result;
+}
+
+Handle<BigInt> BigInt::RightShiftByMaximum(Isolate* isolate, bool sign) {
+ if (sign) {
+ // TODO(jkummerow): Consider caching a canonical -1n BigInt.
+ return isolate->factory()->NewBigIntFromInt(-1);
+ } else {
+ // TODO(jkummerow): Consider caching a canonical zero BigInt.
+ return isolate->factory()->NewBigIntFromInt(0);
+ }
+}
+
+// Returns the value of {x} if it is less than the maximum bit length of
+// a BigInt, or Nothing otherwise.
+Maybe<BigInt::digit_t> BigInt::ToShiftAmount(Handle<BigInt> x) {
+ if (x->length() > 1) return Nothing<digit_t>();
+ digit_t value = x->digit(0);
+ STATIC_ASSERT(kMaxLength * kDigitBits < std::numeric_limits<digit_t>::max());
+ if (value > kMaxLength * kDigitBits) return Nothing<digit_t>();
+ return Just(value);
+}
+
+Handle<BigInt> BigInt::Copy(Handle<BigInt> source) {
+ int length = source->length();
+ Handle<BigInt> result = source->GetIsolate()->factory()->NewBigIntRaw(length);
+ memcpy(result->address() + HeapObject::kHeaderSize,
+ source->address() + HeapObject::kHeaderSize,
+ SizeFor(length) - HeapObject::kHeaderSize);
+ return result;
+}
+
+// Lookup table for the maximum number of bits required per character of a
+// base-N string representation of a number. To increase accuracy, the array
+// value is the actual value multiplied by 32. To generate this table:
+// for (var i = 0; i <= 36; i++) { print(Math.ceil(Math.log2(i) * 32) + ","); }
+constexpr uint8_t kMaxBitsPerChar[] = {
+ 0, 0, 32, 51, 64, 75, 83, 90, 96, // 0..8
+ 102, 107, 111, 115, 119, 122, 126, 128, // 9..16
+ 131, 134, 136, 139, 141, 143, 145, 147, // 17..24
+ 149, 151, 153, 154, 156, 158, 159, 160, // 25..32
+ 162, 163, 165, 166, // 33..36
+};
+
+static const int kBitsPerCharTableShift = 5;
+static const size_t kBitsPerCharTableMultiplier = 1u << kBitsPerCharTableShift;
+
+MaybeHandle<BigInt> BigInt::AllocateFor(Isolate* isolate, int radix,
+ int charcount) {
+ DCHECK(2 <= radix && radix <= 36);
+ DCHECK(charcount >= 0);
+ size_t bits_per_char = kMaxBitsPerChar[radix];
+ size_t chars = static_cast<size_t>(charcount);
+ const int roundup = kBitsPerCharTableMultiplier - 1;
+ if ((std::numeric_limits<size_t>::max() - roundup) / bits_per_char < chars) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
+ BigInt);
+ }
+ size_t bits_min = bits_per_char * chars;
+ // Divide by 32 (see table), rounding up.
+ bits_min = (bits_min + roundup) >> kBitsPerCharTableShift;
+ if (bits_min > static_cast<size_t>(kMaxInt)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
+ BigInt);
+ }
+ // Divide by kDigitsBits, rounding up.
+ int length = (static_cast<int>(bits_min) + kDigitBits - 1) / kDigitBits;
+ if (length > BigInt::kMaxLength) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
+ BigInt);
+ }
+ return isolate->factory()->NewBigInt(length);
+}
+
+void BigInt::RightTrim() {
+ int old_length = length();
+ int new_length = old_length;
+ while (new_length > 0 && digit(new_length - 1) == 0) new_length--;
+ int to_trim = old_length - new_length;
+ if (to_trim == 0) return;
+ int size_delta = to_trim * kDigitSize;
+ Address new_end = this->address() + SizeFor(new_length);
+ Heap* heap = GetHeap();
+ heap->CreateFillerObjectAt(new_end, size_delta, ClearRecordedSlots::kNo);
+ // Canonicalize -0n.
+ if (new_length == 0) set_sign(false);
+ set_length(new_length);
+}
+
+static const char kConversionChars[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+
+MaybeHandle<String> BigInt::ToStringBasePowerOfTwo(Handle<BigInt> x,
+ int radix) {
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(kDigitBits));
+ DCHECK(base::bits::IsPowerOfTwo(radix));
+ DCHECK(radix >= 2 && radix <= 32);
+ DCHECK(!x->is_zero());
+ Isolate* isolate = x->GetIsolate();
+
+ const int length = x->length();
+ const bool sign = x->sign();
+ const int bits_per_char = base::bits::CountTrailingZeros32(radix);
+ const int char_mask = radix - 1;
+ // Compute the length of the resulting string: divide the bit length of the
+ // BigInt by the number of bits representable per character (rounding up).
+ const digit_t msd = x->digit(length - 1);
+ const int msd_leading_zeros = base::bits::CountLeadingZeros(msd);
+ const size_t bit_length = length * kDigitBits - msd_leading_zeros;
+ const size_t chars_required =
+ (bit_length + bits_per_char - 1) / bits_per_char + sign;
+
+ if (chars_required > String::kMaxLength) {
+ THROW_NEW_ERROR(isolate, NewInvalidStringLengthError(), String);
+ }
+
+ Handle<SeqOneByteString> result =
+ isolate->factory()
+ ->NewRawOneByteString(static_cast<int>(chars_required))
+ .ToHandleChecked();
+ DisallowHeapAllocation no_gc;
+ uint8_t* buffer = result->GetChars();
+ // Print the number into the string, starting from the last position.
+ int pos = static_cast<int>(chars_required - 1);
+ digit_t digit = 0;
+ // Keeps track of how many unprocessed bits there are in {digit}.
+ int available_bits = 0;
+ for (int i = 0; i < length - 1; i++) {
+ digit_t new_digit = x->digit(i);
+ // Take any leftover bits from the last iteration into account.
+ int current = (digit | (new_digit << available_bits)) & char_mask;
+ buffer[pos--] = kConversionChars[current];
+ int consumed_bits = bits_per_char - available_bits;
+ digit = new_digit >> consumed_bits;
+ available_bits = kDigitBits - consumed_bits;
+ while (available_bits >= bits_per_char) {
+ buffer[pos--] = kConversionChars[digit & char_mask];
+ digit >>= bits_per_char;
+ available_bits -= bits_per_char;
+ }
+ }
+ // Take any leftover bits from the last iteration into account.
+ int current = (digit | (msd << available_bits)) & char_mask;
+ buffer[pos--] = kConversionChars[current];
+ digit = msd >> (bits_per_char - available_bits);
+ while (digit != 0) {
+ buffer[pos--] = kConversionChars[digit & char_mask];
+ digit >>= bits_per_char;
+ }
+ if (sign) buffer[pos--] = '-';
+ DCHECK(pos == -1);
+ return result;
+}
+
+MaybeHandle<String> BigInt::ToStringGeneric(Handle<BigInt> x, int radix) {
+ DCHECK(radix >= 2 && radix <= 36);
+ DCHECK(!x->is_zero());
+ Heap* heap = x->GetHeap();
+ Isolate* isolate = heap->isolate();
+
+ const int length = x->length();
+ const bool sign = x->sign();
+
+ // Compute (an overapproximation of) the length of the resulting string:
+ // Divide bit length of the BigInt by bits representable per character.
+ const size_t bit_length =
+ length * kDigitBits - base::bits::CountLeadingZeros(x->digit(length - 1));
+ // Maximum number of bits we can represent with one character. We'll use this
+ // to find an appropriate chunk size below.
+ const uint8_t max_bits_per_char = kMaxBitsPerChar[radix];
+ // For estimating result length, we have to be pessimistic and work with
+ // the minimum number of bits one character can represent.
+ const uint8_t min_bits_per_char = max_bits_per_char - 1;
+ // Perform the following computation with uint64_t to avoid overflows.
+ uint64_t chars_required = bit_length;
+ chars_required *= kBitsPerCharTableMultiplier;
+ chars_required += min_bits_per_char - 1; // Round up.
+ chars_required /= min_bits_per_char;
+ chars_required += sign;
+
+ if (chars_required > String::kMaxLength) {
+ THROW_NEW_ERROR(isolate, NewInvalidStringLengthError(), String);
+ }
+ Handle<SeqOneByteString> result =
+ isolate->factory()
+ ->NewRawOneByteString(static_cast<int>(chars_required))
+ .ToHandleChecked();
+
+#if DEBUG
+ // Zap the string first.
+ {
+ DisallowHeapAllocation no_gc;
+ uint8_t* chars = result->GetChars();
+ for (int i = 0; i < static_cast<int>(chars_required); i++) chars[i] = '?';
+ }
+#endif
+
+ // We assemble the result string in reverse order, and then reverse it.
+ // TODO(jkummerow): Consider building the string from the right, and
+ // left-shifting it if the length estimate was too large.
+ int pos = 0;
+
+ digit_t last_digit;
+ if (length == 1) {
+ last_digit = x->digit(0);
+ } else {
+ int chunk_chars =
+ kDigitBits * kBitsPerCharTableMultiplier / max_bits_per_char;
+ digit_t chunk_divisor = digit_pow(radix, chunk_chars);
+ // By construction of chunk_chars, there can't have been overflow.
+ DCHECK(chunk_divisor != 0);
+ int nonzero_digit = length - 1;
+ DCHECK(x->digit(nonzero_digit) != 0);
+ // {rest} holds the part of the BigInt that we haven't looked at yet.
+ // Not to be confused with "remainder"!
+ Handle<BigInt> rest;
+ // In the first round, divide the input, allocating a new BigInt for
+ // the result == rest; from then on divide the rest in-place.
+ Handle<BigInt>* dividend = &x;
+ do {
+ digit_t chunk;
+ AbsoluteDivSmall(*dividend, chunk_divisor, &rest, &chunk);
+ DCHECK(!rest.is_null());
+ dividend = &rest;
+ DisallowHeapAllocation no_gc;
+ uint8_t* chars = result->GetChars();
+ for (int i = 0; i < chunk_chars; i++) {
+ chars[pos++] = kConversionChars[chunk % radix];
+ chunk /= radix;
+ }
+ DCHECK(chunk == 0);
+ if (rest->digit(nonzero_digit) == 0) nonzero_digit--;
+ // We can never clear more than one digit per iteration, because
+ // chunk_divisor is smaller than max digit value.
+ DCHECK(rest->digit(nonzero_digit) > 0);
+ } while (nonzero_digit > 0);
+ last_digit = rest->digit(0);
+ }
+ DisallowHeapAllocation no_gc;
+ uint8_t* chars = result->GetChars();
+ do {
+ chars[pos++] = kConversionChars[last_digit % radix];
+ last_digit /= radix;
+ } while (last_digit > 0);
+ DCHECK(pos >= 1);
+ DCHECK(pos <= static_cast<int>(chars_required));
+ // Remove leading zeroes.
+ while (pos > 1 && chars[pos - 1] == '0') pos--;
+ if (sign) chars[pos++] = '-';
+ // Trim any over-allocation (which can happen due to conservative estimates).
+ if (pos < static_cast<int>(chars_required)) {
+ result->synchronized_set_length(pos);
+ int string_size =
+ SeqOneByteString::SizeFor(static_cast<int>(chars_required));
+ int needed_size = SeqOneByteString::SizeFor(pos);
+ if (needed_size < string_size) {
+ Address new_end = result->address() + needed_size;
+ heap->CreateFillerObjectAt(new_end, (string_size - needed_size),
+ ClearRecordedSlots::kNo);
+ }
+ }
+ // Reverse the string.
+ for (int i = 0, j = pos - 1; i < j; i++, j--) {
+ uint8_t tmp = chars[i];
+ chars[i] = chars[j];
+ chars[j] = tmp;
+ }
+#if DEBUG
+ // Verify that all characters have been written.
+ DCHECK(result->length() == pos);
+ for (int i = 0; i < pos; i++) DCHECK(chars[i] != '?');
+#endif
+ return result;
+}
+
+// Digit arithmetic helpers.
+
+#if V8_TARGET_ARCH_32_BIT
+#define HAVE_TWODIGIT_T 1
+typedef uint64_t twodigit_t;
+#elif defined(__SIZEOF_INT128__)
+// Both Clang and GCC support this on x64.
+#define HAVE_TWODIGIT_T 1
+typedef __uint128_t twodigit_t;
+#endif
+
+// {carry} must point to an initialized digit_t and will either be incremented
+// by one or left alone.
+inline BigInt::digit_t BigInt::digit_add(digit_t a, digit_t b, digit_t* carry) {
+#if HAVE_TWODIGIT_T
+ twodigit_t result = static_cast<twodigit_t>(a) + static_cast<twodigit_t>(b);
+ *carry += result >> kDigitBits;
+ return static_cast<digit_t>(result);
+#else
+ digit_t result = a + b;
+ if (result < a) *carry += 1;
+ return result;
+#endif
+}
+
+// {borrow} must point to an initialized digit_t and will either be incremented
+// by one or left alone.
+inline BigInt::digit_t BigInt::digit_sub(digit_t a, digit_t b,
+ digit_t* borrow) {
+#if HAVE_TWODIGIT_T
+ twodigit_t result = static_cast<twodigit_t>(a) - static_cast<twodigit_t>(b);
+ *borrow += (result >> kDigitBits) & 1;
+ return static_cast<digit_t>(result);
+#else
+ digit_t result = a - b;
+ if (result > a) *borrow += 1;
+ return static_cast<digit_t>(result);
+#endif
+}
+
+// Returns the low half of the result. High half is in {high}.
+inline BigInt::digit_t BigInt::digit_mul(digit_t a, digit_t b, digit_t* high) {
+#if HAVE_TWODIGIT_T
+ twodigit_t result = static_cast<twodigit_t>(a) * static_cast<twodigit_t>(b);
+ *high = result >> kDigitBits;
+ return static_cast<digit_t>(result);
+#else
+ // Multiply in half-pointer-sized chunks.
+ // For inputs [AH AL]*[BH BL], the result is:
+ //
+ // [AL*BL] // r_low
+ // + [AL*BH] // r_mid1
+ // + [AH*BL] // r_mid2
+ // + [AH*BH] // r_high
+ // = [R4 R3 R2 R1] // high = [R4 R3], low = [R2 R1]
+ //
+ // Where of course we must be careful with carries between the columns.
+ digit_t a_low = a & kHalfDigitMask;
+ digit_t a_high = a >> kHalfDigitBits;
+ digit_t b_low = b & kHalfDigitMask;
+ digit_t b_high = b >> kHalfDigitBits;
+
+ digit_t r_low = a_low * b_low;
+ digit_t r_mid1 = a_low * b_high;
+ digit_t r_mid2 = a_high * b_low;
+ digit_t r_high = a_high * b_high;
+
+ digit_t carry = 0;
+ digit_t low = digit_add(r_low, r_mid1 << kHalfDigitBits, &carry);
+ low = digit_add(low, r_mid2 << kHalfDigitBits, &carry);
+ *high =
+ (r_mid1 >> kHalfDigitBits) + (r_mid2 >> kHalfDigitBits) + r_high + carry;
+ return low;
+#endif
+}
+
+// Returns the quotient.
+// quotient = (high << kDigitBits + low - remainder) / divisor
+BigInt::digit_t BigInt::digit_div(digit_t high, digit_t low, digit_t divisor,
+ digit_t* remainder) {
+ DCHECK(high < divisor);
+#if V8_TARGET_ARCH_X64 && (__GNUC__ || __clang__)
+ digit_t quotient;
+ digit_t rem;
+ __asm__("divq %[divisor]"
+ // Outputs: {quotient} will be in rax, {rem} in rdx.
+ : "=a"(quotient), "=d"(rem)
+ // Inputs: put {high} into rdx, {low} into rax, and {divisor} into
+ // any register or stack slot.
+ : "d"(high), "a"(low), [divisor] "rm"(divisor));
+ *remainder = rem;
+ return quotient;
+#elif V8_TARGET_ARCH_IA32 && (__GNUC__ || __clang__)
+ digit_t quotient;
+ digit_t rem;
+ __asm__("divl %[divisor]"
+ // Outputs: {quotient} will be in eax, {rem} in edx.
+ : "=a"(quotient), "=d"(rem)
+ // Inputs: put {high} into edx, {low} into eax, and {divisor} into
+ // any register or stack slot.
+ : "d"(high), "a"(low), [divisor] "rm"(divisor));
+ *remainder = rem;
+ return quotient;
+#else
+ static const digit_t kHalfDigitBase = 1ull << kHalfDigitBits;
+ // Adapted from Warren, Hacker's Delight, p. 152.
+ int s = base::bits::CountLeadingZeros(divisor);
+ divisor <<= s;
+
+ digit_t vn1 = divisor >> kHalfDigitBits;
+ digit_t vn0 = divisor & kHalfDigitMask;
+ // {s} can be 0. "low >> kDigitBits == low" on x86, so we "&" it with
+ // {s_zero_mask} which is 0 if s == 0 and all 1-bits otherwise.
+ STATIC_ASSERT(sizeof(intptr_t) == sizeof(digit_t));
+ digit_t s_zero_mask =
+ static_cast<digit_t>(static_cast<intptr_t>(-s) >> (kDigitBits - 1));
+ digit_t un32 = (high << s) | ((low >> (kDigitBits - s)) & s_zero_mask);
+ digit_t un10 = low << s;
+ digit_t un1 = un10 >> kHalfDigitBits;
+ digit_t un0 = un10 & kHalfDigitMask;
+ digit_t q1 = un32 / vn1;
+ digit_t rhat = un32 - q1 * vn1;
+
+ while (q1 >= kHalfDigitBase || q1 * vn0 > rhat * kHalfDigitBase + un1) {
+ q1--;
+ rhat += vn1;
+ if (rhat >= kHalfDigitBase) break;
+ }
+
+ digit_t un21 = un32 * kHalfDigitBase + un1 - q1 * divisor;
+ digit_t q0 = un21 / vn1;
+ rhat = un21 - q0 * vn1;
+
+ while (q0 >= kHalfDigitBase || q0 * vn0 > rhat * kHalfDigitBase + un0) {
+ q0--;
+ rhat += vn1;
+ if (rhat >= kHalfDigitBase) break;
+ }
+
+ *remainder = (un21 * kHalfDigitBase + un0 - q0 * divisor) >> s;
+ return q1 * kHalfDigitBase + q0;
+#endif
+}
+
+// Raises {base} to the power of {exponent}. Does not check for overflow.
+BigInt::digit_t BigInt::digit_pow(digit_t base, digit_t exponent) {
+ digit_t result = 1ull;
+ while (exponent > 0) {
+ if (exponent & 1) {
+ result *= base;
+ }
+ exponent >>= 1;
+ base *= base;
+ }
+ return result;
+}
+
+#undef HAVE_TWODIGIT_T
+
+#ifdef OBJECT_PRINT
+void BigInt::BigIntPrint(std::ostream& os) {
+ DisallowHeapAllocation no_gc;
+ HeapObject::PrintHeader(os, "BigInt");
+ int len = length();
+ os << "- length: " << len << "\n";
+ os << "- sign: " << sign() << "\n";
+ if (len > 0) {
+ os << "- digits:";
+ for (int i = 0; i < len; i++) {
+ os << "\n 0x" << std::hex << digit(i);
+ }
+ os << std::dec << "\n";
+ }
+}
+#endif // OBJECT_PRINT
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
new file mode 100644
index 0000000000..43a91b5133
--- /dev/null
+++ b/deps/v8/src/objects/bigint.h
@@ -0,0 +1,187 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_BIGINT_H_
+#define V8_OBJECTS_BIGINT_H_
+
+#include "src/globals.h"
+#include "src/objects.h"
+#include "src/utils.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// UNDER CONSTRUCTION!
+// Arbitrary precision integers in JavaScript.
+class BigInt : public HeapObject {
+ public:
+ // Implementation of the Spec methods, see:
+ // https://tc39.github.io/proposal-bigint/#sec-numeric-types
+ // Sections 1.1.1 through 1.1.19.
+ static Handle<BigInt> UnaryMinus(Handle<BigInt> x);
+ static Handle<BigInt> BitwiseNot(Handle<BigInt> x);
+ static MaybeHandle<BigInt> Exponentiate(Handle<BigInt> base,
+ Handle<BigInt> exponent);
+ static Handle<BigInt> Multiply(Handle<BigInt> x, Handle<BigInt> y);
+ static MaybeHandle<BigInt> Divide(Handle<BigInt> x, Handle<BigInt> y);
+ static MaybeHandle<BigInt> Remainder(Handle<BigInt> x, Handle<BigInt> y);
+ static Handle<BigInt> Add(Handle<BigInt> x, Handle<BigInt> y);
+ static Handle<BigInt> Subtract(Handle<BigInt> x, Handle<BigInt> y);
+ static MaybeHandle<BigInt> LeftShift(Handle<BigInt> x, Handle<BigInt> y);
+ static MaybeHandle<BigInt> SignedRightShift(Handle<BigInt> x,
+ Handle<BigInt> y);
+ static MaybeHandle<BigInt> UnsignedRightShift(Handle<BigInt> x,
+ Handle<BigInt> y);
+ static bool LessThan(Handle<BigInt> x, Handle<BigInt> y);
+ static bool Equal(BigInt* x, BigInt* y);
+ static Handle<BigInt> BitwiseAnd(Handle<BigInt> x, Handle<BigInt> y);
+ static Handle<BigInt> BitwiseXor(Handle<BigInt> x, Handle<BigInt> y);
+ static Handle<BigInt> BitwiseOr(Handle<BigInt> x, Handle<BigInt> y);
+
+ // Other parts of the public interface.
+ bool ToBoolean() { return !is_zero(); }
+ uint32_t Hash() {
+ // TODO(jkummerow): Improve this. At least use length and sign.
+ return is_zero() ? 0 : ComputeIntegerHash(static_cast<uint32_t>(digit(0)));
+ }
+
+ DECL_CAST(BigInt)
+ DECL_VERIFIER(BigInt)
+ DECL_PRINTER(BigInt)
+ void BigIntShortPrint(std::ostream& os);
+
+ // TODO(jkummerow): Do we need {synchronized_length} for GC purposes?
+ DECL_INT_ACCESSORS(length)
+
+ inline static int SizeFor(int length) {
+ return kHeaderSize + length * kDigitSize;
+ }
+ void Initialize(int length, bool zero_initialize);
+
+ static MaybeHandle<String> ToString(Handle<BigInt> bigint, int radix = 10);
+
+ // The maximum length that the current implementation supports would be
+ // kMaxInt / kDigitBits. However, we use a lower limit for now, because
+ // raising it later is easier than lowering it.
+ static const int kMaxLengthBits = 20;
+ static const int kMaxLength = (1 << kMaxLengthBits) - 1;
+
+ class BodyDescriptor;
+
+ private:
+ friend class Factory;
+ friend class BigIntParseIntHelper;
+
+ typedef uintptr_t digit_t;
+ static const int kDigitSize = sizeof(digit_t);
+ static const int kDigitBits = kDigitSize * kBitsPerByte;
+ static const int kHalfDigitBits = kDigitBits / 2;
+ static const digit_t kHalfDigitMask = (1ull << kHalfDigitBits) - 1;
+
+ // Private helpers for public methods.
+ static Handle<BigInt> Copy(Handle<BigInt> source);
+ static MaybeHandle<BigInt> AllocateFor(Isolate* isolate, int radix,
+ int charcount);
+ void RightTrim();
+
+ static Handle<BigInt> AbsoluteAdd(Handle<BigInt> x, Handle<BigInt> y,
+ bool result_sign);
+ static Handle<BigInt> AbsoluteSub(Handle<BigInt> x, Handle<BigInt> y,
+ bool result_sign);
+ static Handle<BigInt> AbsoluteAddOne(Handle<BigInt> x, bool sign,
+ BigInt* result_storage);
+ static Handle<BigInt> AbsoluteSubOne(Handle<BigInt> x, int result_length);
+
+ enum ExtraDigitsHandling { kCopy, kSkip };
+ static inline Handle<BigInt> AbsoluteBitwiseOp(
+ Handle<BigInt> x, Handle<BigInt> y, BigInt* result_storage,
+ ExtraDigitsHandling extra_digits,
+ std::function<digit_t(digit_t, digit_t)> op);
+ static Handle<BigInt> AbsoluteAnd(Handle<BigInt> x, Handle<BigInt> y,
+ BigInt* result_storage = nullptr);
+ static Handle<BigInt> AbsoluteAndNot(Handle<BigInt> x, Handle<BigInt> y,
+ BigInt* result_storage = nullptr);
+ static Handle<BigInt> AbsoluteOr(Handle<BigInt> x, Handle<BigInt> y,
+ BigInt* result_storage = nullptr);
+ static Handle<BigInt> AbsoluteXor(Handle<BigInt> x, Handle<BigInt> y,
+ BigInt* result_storage = nullptr);
+
+ static int AbsoluteCompare(Handle<BigInt> x, Handle<BigInt> y);
+
+ static void MultiplyAccumulate(Handle<BigInt> multiplicand,
+ digit_t multiplier, Handle<BigInt> accumulator,
+ int accumulator_index);
+ static void InternalMultiplyAdd(BigInt* source, digit_t factor,
+ digit_t summand, int n, BigInt* result);
+ void InplaceMultiplyAdd(uintptr_t factor, uintptr_t summand);
+
+ // Specialized helpers for Divide/Remainder.
+ static void AbsoluteDivSmall(Handle<BigInt> x, digit_t divisor,
+ Handle<BigInt>* quotient, digit_t* remainder);
+ static void AbsoluteDivLarge(Handle<BigInt> dividend, Handle<BigInt> divisor,
+ Handle<BigInt>* quotient,
+ Handle<BigInt>* remainder);
+ static bool ProductGreaterThan(digit_t factor1, digit_t factor2, digit_t high,
+ digit_t low);
+ digit_t InplaceAdd(BigInt* summand, int start_index);
+ digit_t InplaceSub(BigInt* subtrahend, int start_index);
+ void InplaceRightShift(int shift);
+ enum SpecialLeftShiftMode {
+ kSameSizeResult,
+ kAlwaysAddOneDigit,
+ };
+ static Handle<BigInt> SpecialLeftShift(Handle<BigInt> x, int shift,
+ SpecialLeftShiftMode mode);
+
+ // Specialized helpers for shift operations.
+ static MaybeHandle<BigInt> LeftShiftByAbsolute(Handle<BigInt> x,
+ Handle<BigInt> y);
+ static Handle<BigInt> RightShiftByAbsolute(Handle<BigInt> x,
+ Handle<BigInt> y);
+ static Handle<BigInt> RightShiftByMaximum(Isolate* isolate, bool sign);
+ static Maybe<digit_t> ToShiftAmount(Handle<BigInt> x);
+
+ static MaybeHandle<String> ToStringBasePowerOfTwo(Handle<BigInt> x,
+ int radix);
+ static MaybeHandle<String> ToStringGeneric(Handle<BigInt> x, int radix);
+
+ // Digit arithmetic helpers.
+ static inline digit_t digit_add(digit_t a, digit_t b, digit_t* carry);
+ static inline digit_t digit_sub(digit_t a, digit_t b, digit_t* borrow);
+ static inline digit_t digit_mul(digit_t a, digit_t b, digit_t* high);
+ static inline digit_t digit_div(digit_t high, digit_t low, digit_t divisor,
+ digit_t* remainder);
+ static digit_t digit_pow(digit_t base, digit_t exponent);
+ static inline bool digit_ismax(digit_t x) {
+ return static_cast<digit_t>(~x) == 0;
+ }
+
+ class LengthBits : public BitField<int, 0, kMaxLengthBits> {};
+ class SignBits : public BitField<bool, LengthBits::kNext, 1> {};
+
+ // Low-level accessors.
+ // sign() == true means negative.
+ DECL_BOOLEAN_ACCESSORS(sign)
+ inline digit_t digit(int n) const;
+ inline void set_digit(int n, digit_t value);
+
+ bool is_zero() {
+ DCHECK(length() > 0 || !sign()); // There is no -0n.
+ return length() == 0;
+ }
+ static const int kBitfieldOffset = HeapObject::kHeaderSize;
+ static const int kDigitsOffset = kBitfieldOffset + kPointerSize;
+ static const int kHeaderSize = kDigitsOffset;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BigInt);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_BIGINT_H_
diff --git a/deps/v8/src/objects/code-cache-inl.h b/deps/v8/src/objects/code-cache-inl.h
deleted file mode 100644
index 5d08d3c122..0000000000
--- a/deps/v8/src/objects/code-cache-inl.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_OBJECTS_CODE_CACHE_INL_H_
-#define V8_OBJECTS_CODE_CACHE_INL_H_
-
-#include "src/objects/code-cache.h"
-
-// Has to be the last include (doesn't have include guards):
-#include "src/objects/object-macros.h"
-
-namespace v8 {
-namespace internal {
-
-CAST_ACCESSOR(CodeCacheHashTable)
-
-Handle<Object> CodeCacheHashTableShape::AsHandle(Isolate* isolate,
- CodeCacheHashTableKey* key) {
- return key->AsHandle(isolate);
-}
-
-} // namespace internal
-} // namespace v8
-
-#include "src/objects/object-macros-undef.h"
-
-#endif // V8_OBJECTS_CODE_CACHE_INL_H_
diff --git a/deps/v8/src/objects/code-cache.h b/deps/v8/src/objects/code-cache.h
deleted file mode 100644
index 25bdaf2159..0000000000
--- a/deps/v8/src/objects/code-cache.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_OBJECTS_CODE_CACHE_H_
-#define V8_OBJECTS_CODE_CACHE_H_
-
-#include "src/objects/hash-table.h"
-
-// Has to be the last include (doesn't have include guards):
-#include "src/objects/object-macros.h"
-
-namespace v8 {
-namespace internal {
-
-// The key in the code cache hash table consists of the property name and the
-// code object. The actual match is on the name and the code flags. If a key
-// is created using the flags and not a code object it can only be used for
-// lookup not to create a new entry.
-class CodeCacheHashTableKey final {
- public:
- CodeCacheHashTableKey(Handle<Name> name, Code::Flags flags)
- : name_(name), flags_(flags), code_() {
- DCHECK(name_->IsUniqueName());
- }
-
- CodeCacheHashTableKey(Handle<Name> name, Handle<Code> code)
- : name_(name), flags_(code->flags()), code_(code) {
- DCHECK(name_->IsUniqueName());
- }
-
- bool IsMatch(Object* other) {
- DCHECK(other->IsFixedArray());
- FixedArray* pair = FixedArray::cast(other);
- Name* name = Name::cast(pair->get(0));
- Code::Flags flags = Code::cast(pair->get(1))->flags();
- if (flags != flags_) return false;
- DCHECK(name->IsUniqueName());
- return *name_ == name;
- }
-
- static uint32_t NameFlagsHashHelper(Name* name, Code::Flags flags) {
- return name->Hash() ^ flags;
- }
-
- uint32_t Hash() { return NameFlagsHashHelper(*name_, flags_); }
-
- MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) {
- Handle<Code> code = code_.ToHandleChecked();
- Handle<FixedArray> pair = isolate->factory()->NewFixedArray(2);
- pair->set(0, *name_);
- pair->set(1, *code);
- return pair;
- }
-
- private:
- Handle<Name> name_;
- Code::Flags flags_;
- // TODO(jkummerow): We should be able to get by without this.
- MaybeHandle<Code> code_;
-};
-
-class CodeCacheHashTableShape : public BaseShape<CodeCacheHashTableKey*> {
- public:
- static inline bool IsMatch(CodeCacheHashTableKey* key, Object* value) {
- return key->IsMatch(value);
- }
-
- static inline uint32_t Hash(Isolate* isolate, CodeCacheHashTableKey* key) {
- return key->Hash();
- }
-
- static inline uint32_t HashForObject(Isolate* isolate, Object* object) {
- FixedArray* pair = FixedArray::cast(object);
- Name* name = Name::cast(pair->get(0));
- Code* code = Code::cast(pair->get(1));
- return CodeCacheHashTableKey::NameFlagsHashHelper(name, code->flags());
- }
-
- static inline Handle<Object> AsHandle(Isolate* isolate,
- CodeCacheHashTableKey* key);
-
- static const int kPrefixSize = 0;
- // The both the key (name + flags) and value (code object) can be derived from
- // the fixed array that stores both the name and code.
- // TODO(verwaest): Don't allocate a fixed array but inline name and code.
- // Rewrite IsMatch to get table + index as input rather than just the raw key.
- static const int kEntrySize = 1;
-};
-
-class CodeCacheHashTable
- : public HashTable<CodeCacheHashTable, CodeCacheHashTableShape> {
- public:
- static Handle<CodeCacheHashTable> Put(Handle<CodeCacheHashTable> table,
- Handle<Name> name, Handle<Code> code);
-
- Code* Lookup(Name* name, Code::Flags flags);
-
- DECL_CAST(CodeCacheHashTable)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(CodeCacheHashTable);
-};
-
-} // namespace internal
-} // namespace v8
-
-#include "src/objects/object-macros-undef.h"
-
-#endif // V8_OBJECTS_CODE_CACHE_H_
diff --git a/deps/v8/src/objects/debug-objects.cc b/deps/v8/src/objects/debug-objects.cc
index 59e9b20a09..1efe39c1d9 100644
--- a/deps/v8/src/objects/debug-objects.cc
+++ b/deps/v8/src/objects/debug-objects.cc
@@ -175,7 +175,6 @@ bool DebugInfo::HasCoverageInfo() const {
}
bool DebugInfo::ClearCoverageInfo() {
- DCHECK(FLAG_block_coverage);
if (HasCoverageInfo()) {
Isolate* isolate = GetIsolate();
@@ -301,34 +300,29 @@ int BreakPointInfo::GetBreakPointCount() {
}
int CoverageInfo::SlotCount() const {
- DCHECK(FLAG_block_coverage);
DCHECK_EQ(kFirstSlotIndex, length() % kSlotIndexCount);
return (length() - kFirstSlotIndex) / kSlotIndexCount;
}
int CoverageInfo::StartSourcePosition(int slot_index) const {
- DCHECK(FLAG_block_coverage);
DCHECK_LT(slot_index, SlotCount());
const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
return Smi::ToInt(get(slot_start + kSlotStartSourcePositionIndex));
}
int CoverageInfo::EndSourcePosition(int slot_index) const {
- DCHECK(FLAG_block_coverage);
DCHECK_LT(slot_index, SlotCount());
const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
return Smi::ToInt(get(slot_start + kSlotEndSourcePositionIndex));
}
int CoverageInfo::BlockCount(int slot_index) const {
- DCHECK(FLAG_block_coverage);
DCHECK_LT(slot_index, SlotCount());
const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
return Smi::ToInt(get(slot_start + kSlotBlockCountIndex));
}
void CoverageInfo::InitializeSlot(int slot_index, int from_pos, int to_pos) {
- DCHECK(FLAG_block_coverage);
DCHECK_LT(slot_index, SlotCount());
const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
set(slot_start + kSlotStartSourcePositionIndex, Smi::FromInt(from_pos));
@@ -337,7 +331,6 @@ void CoverageInfo::InitializeSlot(int slot_index, int from_pos, int to_pos) {
}
void CoverageInfo::IncrementBlockCount(int slot_index) {
- DCHECK(FLAG_block_coverage);
DCHECK_LT(slot_index, SlotCount());
const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
const int old_count = BlockCount(slot_index);
@@ -345,7 +338,6 @@ void CoverageInfo::IncrementBlockCount(int slot_index) {
}
void CoverageInfo::ResetBlockCount(int slot_index) {
- DCHECK(FLAG_block_coverage);
DCHECK_LT(slot_index, SlotCount());
const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
set(slot_start + kSlotBlockCountIndex, Smi::kZero);
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index b74b1dac7c..71537d3d38 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -18,25 +18,33 @@ class Handle;
class Isolate;
+// An EnumCache is a pair used to hold keys and indices caches.
+class EnumCache : public Tuple2 {
+ public:
+ DECL_ACCESSORS(keys, FixedArray)
+ DECL_ACCESSORS(indices, FixedArray)
+
+ DECL_CAST(EnumCache)
+
+ // Layout description.
+ static const int kKeysOffset = kValue1Offset;
+ static const int kIndicesOffset = kValue2Offset;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(EnumCache);
+};
+
// A DescriptorArray is a fixed array used to hold instance descriptors.
-// The format of the these objects is:
+// The format of these objects is:
// [0]: Number of descriptors
-// [1]: Either Smi(0) if uninitialized,
-// or enum cache bridge (FixedArray[2]):
-// [0]: enum cache: FixedArray containing all own enumerable keys
-// [1]: either Smi(0) or pointer to fixed array with indices
+// [1]: Enum cache.
// [2]: first key (and internalized String)
// [3]: first descriptor details (see PropertyDetails)
-// [4]: first value for constants | Smi(1) when not usedA
+// [4]: first value for constants | Smi(1) when not used
//
// [2 + number of descriptors * 3]: start of slack
class DescriptorArray : public FixedArray {
public:
- // Returns true for both shared empty_descriptor_array and for smis, which the
- // map uses to encode additional bit fields when the descriptor array is not
- // yet used.
- inline bool IsEmpty();
-
// Returns the number of descriptors in the array.
inline int number_of_descriptors();
inline int number_of_descriptors_storage();
@@ -45,18 +53,14 @@ class DescriptorArray : public FixedArray {
inline void SetNumberOfDescriptors(int number_of_descriptors);
inline int number_of_entries();
- inline bool HasEnumCache();
- inline bool HasEnumIndicesCache();
- inline FixedArray* GetEnumCache();
- inline FixedArray* GetEnumIndicesCache();
+ inline EnumCache* GetEnumCache();
void ClearEnumCache();
inline void CopyEnumCacheFrom(DescriptorArray* array);
// Initialize or change the enum cache,
- // using the supplied storage for the small "bridge".
static void SetEnumCache(Handle<DescriptorArray> descriptors,
- Isolate* isolate, Handle<FixedArray> new_cache,
- Handle<FixedArray> new_index_cache);
+ Isolate* isolate, Handle<FixedArray> keys,
+ Handle<FixedArray> indices);
// Accessors for fetching instance descriptor at descriptor number.
inline Name* GetKey(int descriptor_number);
@@ -122,22 +126,13 @@ class DescriptorArray : public FixedArray {
static const int kNotFound = -1;
static const int kDescriptorLengthIndex = 0;
- static const int kEnumCacheBridgeIndex = 1;
+ static const int kEnumCacheIndex = 1;
static const int kFirstIndex = 2;
- // The length of the "bridge" to the enum cache.
- static const int kEnumCacheBridgeLength = 2;
- static const int kEnumCacheBridgeCacheIndex = 0;
- static const int kEnumCacheBridgeIndicesCacheIndex = 1;
-
// Layout description.
static const int kDescriptorLengthOffset = FixedArray::kHeaderSize;
- static const int kEnumCacheBridgeOffset =
- kDescriptorLengthOffset + kPointerSize;
- static const int kFirstOffset = kEnumCacheBridgeOffset + kPointerSize;
-
- // Layout description for the bridge array.
- static const int kEnumCacheBridgeCacheOffset = FixedArray::kHeaderSize;
+ static const int kEnumCacheOffset = kDescriptorLengthOffset + kPointerSize;
+ static const int kFirstOffset = kEnumCacheOffset + kPointerSize;
// Layout of descriptor.
// Naming is consistent with Dictionary classes for easy templating.
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index a989c8fc8a..11cf8b1163 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -138,14 +138,16 @@ class BaseNameDictionary : public Dictionary<Derived, Shape> {
return Smi::ToInt(this->get(kNextEnumerationIndexIndex));
}
- void SetHash(int masked_hash) {
- DCHECK_EQ(masked_hash & JSReceiver::kHashMask, masked_hash);
- this->set(kObjectHashIndex, Smi::FromInt(masked_hash));
+ void SetHash(int hash) {
+ DCHECK(PropertyArray::HashField::is_valid(hash));
+ this->set(kObjectHashIndex, Smi::FromInt(hash));
}
int Hash() const {
Object* hash_obj = this->get(kObjectHashIndex);
- return Smi::ToInt(hash_obj);
+ int hash = Smi::ToInt(hash_obj);
+ DCHECK(PropertyArray::HashField::is_valid(hash));
+ return hash;
}
// Creates a new dictionary.
@@ -217,7 +219,7 @@ class GlobalDictionary
inline void SetEntry(int entry, Object* key, Object* value,
PropertyDetails details);
inline Name* NameAt(int entry);
- void ValueAtPut(int entry, Object* value) { set(EntryToIndex(entry), value); }
+ inline void ValueAtPut(int entry, Object* value);
};
class NumberDictionaryShape : public BaseDictionaryShape<uint32_t> {
diff --git a/deps/v8/src/objects/frame-array-inl.h b/deps/v8/src/objects/frame-array-inl.h
index 5ada507b9f..1e9ac1002e 100644
--- a/deps/v8/src/objects/frame-array-inl.h
+++ b/deps/v8/src/objects/frame-array-inl.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_FRAME_ARRAY_INL_H_
#include "src/objects/frame-array.h"
+#include "src/wasm/wasm-objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index aab6e78668..a5421a32ca 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -42,6 +42,39 @@ bool Map::IsInplaceGeneralizableField(PropertyConstness constness,
return false;
}
+bool Map::CanHaveFastTransitionableElementsKind(InstanceType instance_type) {
+ return instance_type == JS_ARRAY_TYPE || instance_type == JS_VALUE_TYPE ||
+ instance_type == JS_ARGUMENTS_TYPE;
+}
+
+bool Map::CanHaveFastTransitionableElementsKind() const {
+ return CanHaveFastTransitionableElementsKind(instance_type());
+}
+
+// static
+void Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
+ Isolate* isolate, InstanceType instance_type, PropertyConstness* constness,
+ Representation* representation, Handle<FieldType>* field_type) {
+ if (CanHaveFastTransitionableElementsKind(instance_type)) {
+ // We don't support propagation of field generalization through elements
+ // kind transitions because they are inserted into the transition tree
+ // before field transitions. In order to avoid complexity of handling
+ // such a case we ensure that all maps with transitionable elements kinds
+ // do not have fields that can be generalized in-place (without creation
+ // of a new map).
+ if (FLAG_track_constant_fields && FLAG_modify_map_inplace) {
+ // The constness is either already kMutable or should become kMutable if
+ // it was kConst.
+ *constness = kMutable;
+ }
+ if (representation->IsHeapObject()) {
+ // The field type is either already Any or should become Any if it was
+ // something else.
+ *field_type = FieldType::Any(isolate);
+ }
+ }
+}
+
int NormalizedMapCache::GetIndex(Handle<Map> map) {
return map->Hash() % NormalizedMapCache::kEntries;
}
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 114ec0b430..5806a24ae0 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -17,6 +17,7 @@ namespace internal {
#define VISITOR_ID_LIST(V) \
V(AllocationSite) \
+ V(BigInt) \
V(ByteArray) \
V(BytecodeArray) \
V(Cell) \
@@ -76,6 +77,82 @@ typedef std::vector<Handle<Map>> MapHandles;
// A Map contains information about:
// - Size information about the object
// - How to iterate over an object (for garbage collection)
+//
+// Map layout:
+// +---------------+---------------------------------------------+
+// | _ Type _ | _ Description _ |
+// +---------------+---------------------------------------------+
+// | TaggedPointer | map - Always a pointer to the MetaMap root |
+// +---------------+---------------------------------------------+
+// | Int | instance_sizes (the first int field) |
+// `---+----------+---------------------------------------------+
+// | Byte | [instance_size] |
+// +----------+---------------------------------------------+
+// | Byte | If Map for a primitive type: |
+// | | native context index for constructor fn |
+// | | If Map for an Object type: |
+// | | number of in-object properties |
+// +----------+---------------------------------------------+
+// | Byte | unused |
+// +----------+---------------------------------------------+
+// | Byte | [visitor_id] |
+// +----+----------+---------------------------------------------+
+// | Int | instance_attributes (second int field) |
+// `---+----------+---------------------------------------------+
+// | Word16 | [instance_type] in low byte |
+// | | [bit_field] in high byte |
+// | | - has_non_instance_prototype (bit 0) |
+// | | - is_callable (bit 1) |
+// | | - has_named_interceptor (bit 2) |
+// | | - has_indexed_interceptor (bit 3) |
+// | | - is_undetectable (bit 4) |
+// | | - is_access_check_needed (bit 5) |
+// | | - is_constructor (bit 6) |
+// | | - unused (bit 7) |
+// +----------+---------------------------------------------+
+// | Byte | [bit_field2] |
+// | | - is_extensible (bit 0) |
+// | | - is_prototype_map (bit 2) |
+// | | - elements_kind (bits 3..7) |
+// +----------+---------------------------------------------+
+// | Byte | [unused_property_fields] number of unused |
+// | | property fields in JSObject (for fast-mode) |
+// +----+----------+---------------------------------------------+
+// | Word | [bit_field3] |
+// | | - number_of_own_descriptors (bit 0..19) |
+// | | - is_dictionary_map (bit 20) |
+// | | - owns_descriptors (bit 21) |
+// | | - has_hidden_prototype (bit 22) |
+// | | - is_deprecated (bit 23) |
+// | | - is_unstable (bit 24) |
+// | | - is_migration_target (bit 25) |
+// | | - is_immutable_proto (bit 26) |
+// | | - new_target_is_base (bit 27) |
+// | | - may_have_interesting_symbols (bit 28) |
+// | | - construction_counter (bit 29..31) |
+// | | |
+// | | On systems with 64bit pointer types, there |
+// | | is an unused 32bits after bit_field3 |
+// +---------------+---------------------------------------------+
+// | TaggedPointer | [prototype] |
+// +---------------+---------------------------------------------+
+// | TaggedPointer | [constructor_or_backpointer] |
+// +---------------+---------------------------------------------+
+// | TaggedPointer | If Map is a prototype map: |
+// | | [prototype_info] |
+// | | Else: |
+// | | [raw_transitions] |
+// +---------------+---------------------------------------------+
+// | TaggedPointer | [instance_descriptors] |
+// +*************************************************************+
+// ! TaggedPointer ! [layout_descriptors] !
+// ! ! Field is only present on 64 bit arch !
+// +*************************************************************+
+// | TaggedPointer | [dependent_code] |
+// +---------------+---------------------------------------------+
+// | TaggedPointer | [weak_cell_cache] |
+// +---------------+---------------------------------------------+
+
class Map : public HeapObject {
public:
// Instance size.
@@ -329,6 +406,8 @@ class Map : public HeapObject {
int NumberOfFields() const;
+ bool HasOutOfObjectProperties() const;
+
// Returns true if transition to the given map requires special
// synchronization with the concurrent marker.
bool TransitionRequiresSynchronizationWithGC(Map* target) const;
@@ -359,6 +438,18 @@ class Map : public HeapObject {
Representation representation,
FieldType* field_type);
+ // Generalizes constness, representation and field_type if objects with given
+ // instance type can have fast elements that can be transitioned by stubs or
+ // optimized code to more general elements kind.
+ // This generalization is necessary in order to ensure that elements kind
+ // transitions performed by stubs / optimized code don't silently transition
+ // kMutable fields back to kConst state or fields with HeapObject
+ // representation and "Any" type back to "Class" type.
+ static inline void GeneralizeIfCanHaveTransitionableFastElementsKind(
+ Isolate* isolate, InstanceType instance_type,
+ PropertyConstness* constness, Representation* representation,
+ Handle<FieldType>* field_type);
+
static Handle<Map> ReconfigureProperty(Handle<Map> map, int modify_index,
PropertyKind new_kind,
PropertyAttributes new_attributes,
@@ -430,9 +521,6 @@ class Map : public HeapObject {
inline void InitializeDescriptors(DescriptorArray* descriptors,
LayoutDescriptor* layout_descriptor);
- // [stub cache]: contains stubs compiled for this map.
- DECL_ACCESSORS(code_cache, FixedArray)
-
// [dependent code]: list of optimized codes that weakly embed this map.
DECL_ACCESSORS(dependent_code, DependentCode)
@@ -561,15 +649,6 @@ class Map : public HeapObject {
DECL_CAST(Map)
- // Code cache operations.
-
- // Clears the code cache.
- inline void ClearCodeCache(Heap* heap);
-
- // Update code cache.
- static void UpdateCodeCache(Handle<Map> map, Handle<Name> name,
- Handle<Code> code);
-
// Extend the descriptor array of the map with the list of descriptors.
// In case of duplicates, the latest descriptor is used.
static void AppendCallbackDescriptors(Handle<Map> map,
@@ -579,8 +658,6 @@ class Map : public HeapObject {
static void EnsureDescriptorSlack(Handle<Map> map, int slack);
- Code* LookupInCodeCache(Name* name, Code::Flags code);
-
static Handle<Map> GetObjectCreateMap(Handle<HeapObject> prototype);
// Computes a hash value for this map, to be used in HashTables and such.
@@ -652,12 +729,12 @@ class Map : public HeapObject {
kTransitionsOrPrototypeInfoOffset + kPointerSize;
#if V8_DOUBLE_FIELDS_UNBOXING
static const int kLayoutDescriptorOffset = kDescriptorsOffset + kPointerSize;
- static const int kCodeCacheOffset = kLayoutDescriptorOffset + kPointerSize;
+ static const int kDependentCodeOffset =
+ kLayoutDescriptorOffset + kPointerSize;
#else
static const int kLayoutDescriptorOffset = 1; // Must not be ever accessed.
- static const int kCodeCacheOffset = kDescriptorsOffset + kPointerSize;
+ static const int kDependentCodeOffset = kDescriptorsOffset + kPointerSize;
#endif
- static const int kDependentCodeOffset = kCodeCacheOffset + kPointerSize;
static const int kWeakCellCacheOffset = kDependentCodeOffset + kPointerSize;
static const int kSize = kWeakCellCacheOffset + kPointerSize;
@@ -760,6 +837,16 @@ class Map : public HeapObject {
static VisitorId GetVisitorId(Map* map);
+ // Returns true if objects with given instance type are allowed to have
+ // fast transitionable elements kinds. This predicate is used to ensure
+ // that objects that can have transitionable fast elements kind will not
+ // get in-place generalizable fields because the elements kind transition
+ // performed by stubs or optimized code can't properly generalize such
+ // fields.
+ static inline bool CanHaveFastTransitionableElementsKind(
+ InstanceType instance_type);
+ inline bool CanHaveFastTransitionableElementsKind() const;
+
private:
// Returns the map that this (root) map transitions to if its elements_kind
// is changed to |elements_kind|, or |nullptr| if no such map is cached yet.
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index a73d91ace8..7d1bf42cfd 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -8,6 +8,7 @@
#include "src/objects/module.h"
#include "src/accessors.h"
+#include "src/api.h"
#include "src/ast/modules.h"
#include "src/objects-inl.h"
@@ -157,7 +158,6 @@ Cell* Module::GetCell(int cell_index) {
break;
case ModuleDescriptor::kInvalid:
UNREACHABLE();
- cell = nullptr;
break;
}
return Cell::cast(cell);
@@ -840,15 +840,25 @@ Handle<JSModuleNamespace> Module::GetModuleNamespace(Handle<Module> module) {
ns->set_module(*module);
module->set_module_namespace(*ns);
- // Create the properties in the namespace object.
+ // Create the properties in the namespace object. Transition the object
+ // to dictionary mode so that property addition is faster.
PropertyAttributes attr = DONT_DELETE;
+ JSObject::NormalizeProperties(ns, CLEAR_INOBJECT_PROPERTIES,
+ static_cast<int>(names.size()),
+ "JSModuleNamespace");
for (const auto& name : names) {
- JSObject::SetAccessor(
- ns, Accessors::ModuleNamespaceEntryInfo(isolate, name, attr))
- .Check();
+ JSObject::SetNormalizedProperty(
+ ns, name, Accessors::ModuleNamespaceEntryInfo(isolate, name, attr),
+ PropertyDetails(kAccessor, attr, PropertyCellType::kMutable));
}
JSObject::PreventExtensions(ns, THROW_ON_ERROR).ToChecked();
+ // Optimize the namespace object as a prototype, for two reasons:
+ // - The object's map is guaranteed not to be shared. ICs rely on this.
+ // - We can store a pointer from the map back to the namespace object.
+ // Turbofan can use this for inlining the access.
+ JSObject::OptimizeAsPrototype(ns);
+ Map::GetOrCreatePrototypeWeakCell(ns, isolate);
return ns;
}
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index 389427cd0d..610f930116 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -7,6 +7,8 @@
#include "src/objects/name.h"
+#include "src/heap/heap-inl.h"
+
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/property-descriptor-object-inl.h b/deps/v8/src/objects/property-descriptor-object-inl.h
new file mode 100644
index 0000000000..a65d5d65e0
--- /dev/null
+++ b/deps/v8/src/objects/property-descriptor-object-inl.h
@@ -0,0 +1,23 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PROPERTY_DESCRIPTOR_OBJECT_INL_H_
+#define V8_OBJECTS_PROPERTY_DESCRIPTOR_OBJECT_INL_H_
+
+#include "src/objects-inl.h"
+#include "src/objects/property-descriptor-object.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(PropertyDescriptorObject)
+}
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_PROPERTY_DESCRIPTOR_OBJECT_INL_H_
diff --git a/deps/v8/src/objects/property-descriptor-object.h b/deps/v8/src/objects/property-descriptor-object.h
new file mode 100644
index 0000000000..880aaa9c30
--- /dev/null
+++ b/deps/v8/src/objects/property-descriptor-object.h
@@ -0,0 +1,64 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PROPERTY_DESCRIPTOR_OBJECT_H_
+#define V8_OBJECTS_PROPERTY_DESCRIPTOR_OBJECT_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class PropertyDescriptorObject : public FixedArray {
+ public:
+#define FLAGS_BIT_FIELDS(V, _) \
+ V(IsEnumerableBit, bool, 1, _) \
+ V(HasEnumerableBit, bool, 1, _) \
+ V(IsConfigurableBit, bool, 1, _) \
+ V(HasConfigurableBit, bool, 1, _) \
+ V(IsWritableBit, bool, 1, _) \
+ V(HasWritableBit, bool, 1, _) \
+ V(HasValueBit, bool, 1, _) \
+ V(HasGetBit, bool, 1, _) \
+ V(HasSetBit, bool, 1, _)
+
+ DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
+#undef FLAGS_BIT_FIELDS
+
+ enum { kFlagsIndex, kValueIndex, kGetIndex, kSetIndex, kLength };
+
+ DECL_CAST(PropertyDescriptorObject)
+
+ static const int kRegularAccessorPropertyBits =
+ HasEnumerableBit::kMask | HasConfigurableBit::kMask | HasGetBit::kMask |
+ HasSetBit::kMask;
+
+ static const int kRegularDataPropertyBits =
+ HasEnumerableBit::kMask | HasConfigurableBit::kMask |
+ HasWritableBit::kMask | HasValueBit::kMask;
+
+ static const int kHasMask = HasEnumerableBit::kMask |
+ HasConfigurableBit::kMask |
+ HasWritableBit::kMask | HasValueBit::kMask |
+ HasGetBit::kMask | HasSetBit::kMask;
+
+ static const int kValueOffset =
+ FixedArray::OffsetOfElementAt(PropertyDescriptorObject::kValueIndex);
+ static const int kFlagsOffset =
+ FixedArray::OffsetOfElementAt(PropertyDescriptorObject::kFlagsIndex);
+ static const int kGetOffset =
+ FixedArray::OffsetOfElementAt(PropertyDescriptorObject::kGetIndex);
+ static const int kSetOffset =
+ FixedArray::OffsetOfElementAt(PropertyDescriptorObject::kSetIndex);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_PROPERTY_DESCRIPTOR_OBJECT_H_
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index 87045d044e..59baa0c5c0 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -827,7 +827,7 @@ Handle<ModuleInfoEntry> ModuleInfoEntry::New(Isolate* isolate,
int module_request, int cell_index,
int beg_pos, int end_pos) {
Handle<ModuleInfoEntry> result = Handle<ModuleInfoEntry>::cast(
- isolate->factory()->NewStruct(MODULE_INFO_ENTRY_TYPE));
+ isolate->factory()->NewStruct(MODULE_INFO_ENTRY_TYPE, TENURED));
result->set_export_name(*export_name);
result->set_local_name(*local_name);
result->set_import_name(*import_name);
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index db8bd3a5a8..27f6b83f0d 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -83,11 +83,9 @@ class ScopeInfo : public FixedArray {
bool HasContext();
// Return if this is a function scope with "use asm".
- inline bool IsAsmModule() { return AsmModuleField::decode(Flags()); }
+ inline bool IsAsmModule();
- inline bool HasSimpleParameters() {
- return HasSimpleParametersField::decode(Flags());
- }
+ inline bool HasSimpleParameters();
// Return the function_name if present.
String* FunctionName();
@@ -205,16 +203,9 @@ class ScopeInfo : public FixedArray {
V(StackLocalCount) \
V(ContextLocalCount)
-#define FIELD_ACCESSORS(name) \
- inline void Set##name(int value) { set(k##name, Smi::FromInt(value)); } \
- inline int name() { \
- if (length() > 0) { \
- return Smi::ToInt(get(k##name)); \
- } else { \
- return 0; \
- } \
- }
-
+#define FIELD_ACCESSORS(name) \
+ inline void Set##name(int value); \
+ inline int name();
FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(FIELD_ACCESSORS)
#undef FIELD_ACCESSORS
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index a6d2c1d825..ae4a87914d 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -115,10 +115,6 @@ class Script : public Struct {
Object* GetNameOrSourceURL();
- // Set eval origin for stack trace formatting.
- static void SetEvalOrigin(Handle<Script> script,
- Handle<SharedFunctionInfo> outer,
- int eval_position);
// Retrieve source position from where eval was called.
int GetEvalPosition();
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index d9e0407617..248f160ea9 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -16,7 +16,7 @@ namespace v8 {
namespace internal {
CAST_ACCESSOR(PreParsedScopeData)
-ACCESSORS(PreParsedScopeData, scope_data, PodArray<uint32_t>, kScopeDataOffset)
+ACCESSORS(PreParsedScopeData, scope_data, PodArray<uint8_t>, kScopeDataOffset)
ACCESSORS(PreParsedScopeData, child_data, FixedArray, kChildDataOffset)
TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
@@ -94,8 +94,6 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, is_declaration,
BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, native,
SharedFunctionInfo::IsNativeBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, force_inline,
- SharedFunctionInfo::ForceInlineBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, is_asm_wasm_broken,
SharedFunctionInfo::IsAsmWasmBrokenBit)
@@ -216,22 +214,10 @@ void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
}
-void SharedFunctionInfo::ReplaceCode(Code* value) {
-#ifdef DEBUG
- Code::VerifyRecompiledCode(code(), value);
-#endif // DEBUG
-
- set_code(value);
-}
-
bool SharedFunctionInfo::IsInterpreted() const {
return code()->is_interpreter_trampoline_builtin();
}
-bool SharedFunctionInfo::HasBaselineCode() const {
- return code()->kind() == Code::FUNCTION;
-}
-
ScopeInfo* SharedFunctionInfo::scope_info() const {
return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset));
}
@@ -325,6 +311,25 @@ void SharedFunctionInfo::ClearAsmWasmData() {
set_function_data(GetHeap()->undefined_value());
}
+bool SharedFunctionInfo::HasLazyDeserializationBuiltinId() const {
+ return function_data()->IsSmi();
+}
+
+int SharedFunctionInfo::lazy_deserialization_builtin_id() const {
+ DCHECK(HasLazyDeserializationBuiltinId());
+ int id = Smi::ToInt(function_data());
+ DCHECK(Builtins::IsBuiltinId(id));
+ return id;
+}
+
+void SharedFunctionInfo::set_lazy_deserialization_builtin_id(int builtin_id) {
+ DCHECK(function_data()->IsUndefined(GetIsolate()) ||
+ HasLazyDeserializationBuiltinId());
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+ DCHECK(Builtins::IsLazy(builtin_id));
+ set_function_data(Smi::FromInt(builtin_id));
+}
+
bool SharedFunctionInfo::HasBuiltinFunctionId() {
return function_identifier()->IsSmi();
}
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index 6676c619a6..57c07f9b13 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -19,7 +19,7 @@ class DebugInfo;
class PreParsedScopeData : public Struct {
public:
- DECL_ACCESSORS(scope_data, PodArray<uint32_t>)
+ DECL_ACCESSORS(scope_data, PodArray<uint8_t>)
DECL_ACCESSORS(child_data, FixedArray)
static const int kScopeDataOffset = Struct::kHeaderSize;
@@ -58,9 +58,6 @@ class SharedFunctionInfo : public HeapObject {
// optimized.
inline bool IsInterpreted() const;
- inline void ReplaceCode(Code* code);
- inline bool HasBaselineCode() const;
-
// Set up the link between shared function info and the script. The shared
// function info is added to the list on the script.
V8_EXPORT_PRIVATE static void SetScript(
@@ -146,6 +143,7 @@ class SharedFunctionInfo : public HeapObject {
// - a FunctionTemplateInfo to make benefit the API [IsApiFunction()].
// - a BytecodeArray for the interpreter [HasBytecodeArray()].
// - a FixedArray with Asm->Wasm conversion [HasAsmWasmData()].
+ // - a Smi containing the builtin id [HasLazyDeserializationBuiltinId()]
DECL_ACCESSORS(function_data, Object)
inline bool IsApiFunction();
@@ -159,6 +157,14 @@ class SharedFunctionInfo : public HeapObject {
inline FixedArray* asm_wasm_data() const;
inline void set_asm_wasm_data(FixedArray* data);
inline void ClearAsmWasmData();
+ // A brief note to clear up possible confusion:
+ // lazy_deserialization_builtin_id corresponds to the auto-generated
+ // Builtins::Name id, while builtin_function_id corresponds to
+ // BuiltinFunctionId (a manually maintained list of 'interesting' functions
+ // mainly used during optimization).
+ inline bool HasLazyDeserializationBuiltinId() const;
+ inline int lazy_deserialization_builtin_id() const;
+ inline void set_lazy_deserialization_builtin_id(int builtin_id);
// [function identifier]: This field holds an additional identifier for the
// function.
@@ -294,9 +300,6 @@ class SharedFunctionInfo : public HeapObject {
// global object.
DECL_BOOLEAN_ACCESSORS(native)
- // Indicate that this function should always be inlined in optimized code.
- DECL_BOOLEAN_ACCESSORS(force_inline)
-
// Whether this function was created from a FunctionDeclaration.
DECL_BOOLEAN_ACCESSORS(is_declaration)
@@ -438,6 +441,7 @@ class SharedFunctionInfo : public HeapObject {
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
SHARED_FUNCTION_INFO_FIELDS)
+#undef SHARED_FUNCTION_INFO_FIELDS
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
@@ -464,7 +468,6 @@ class SharedFunctionInfo : public HeapObject {
V(AllowLazyCompilationBit, bool, 1, _) \
V(UsesArgumentsBit, bool, 1, _) \
V(NeedsHomeObjectBit, bool, 1, _) \
- V(ForceInlineBit, bool, 1, _) \
V(IsDeclarationBit, bool, 1, _) \
V(IsAsmWasmBrokenBit, bool, 1, _) \
V(FunctionMapIndexBits, int, 5, _) \
@@ -517,13 +520,6 @@ class SharedFunctionInfo : public HeapObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
-// Result of searching in an optimized code map of a SharedFunctionInfo. Note
-// that both {code} and {vector} can be NULL to pass search result status.
-struct CodeAndVector {
- Code* code; // Cached optimized code.
- FeedbackVector* vector; // Cached feedback vector.
-};
-
// Printing support.
struct SourceCodeOf {
explicit SourceCodeOf(SharedFunctionInfo* v, int max = -1)
diff --git a/deps/v8/src/objects/template-objects.cc b/deps/v8/src/objects/template-objects.cc
new file mode 100644
index 0000000000..5e374b4722
--- /dev/null
+++ b/deps/v8/src/objects/template-objects.cc
@@ -0,0 +1,129 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/template-objects.h"
+
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/property-descriptor.h"
+
+namespace v8 {
+namespace internal {
+
+bool TemplateObjectDescription::Equals(
+ TemplateObjectDescription const* that) const {
+ if (this->raw_strings()->length() == that->raw_strings()->length()) {
+ for (int i = this->raw_strings()->length(); --i >= 0;) {
+ if (this->raw_strings()->get(i) != that->raw_strings()->get(i)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+// static
+Handle<JSArray> TemplateObjectDescription::GetTemplateObject(
+ Handle<TemplateObjectDescription> description,
+ Handle<Context> native_context) {
+ DCHECK(native_context->IsNativeContext());
+ Isolate* const isolate = native_context->GetIsolate();
+
+ // Check if we already have a [[TemplateMap]] for the {native_context},
+ // and if not, just allocate one on the fly (which will be set below).
+ Handle<TemplateMap> template_map =
+ native_context->template_map()->IsUndefined(isolate)
+ ? TemplateMap::New(isolate)
+ : handle(TemplateMap::cast(native_context->template_map()), isolate);
+
+ // Check if we already have an appropriate entry.
+ Handle<JSArray> template_object;
+ if (!TemplateMap::Lookup(template_map, description)
+ .ToHandle(&template_object)) {
+ // Create the raw object from the {raw_strings}.
+ Handle<FixedArray> raw_strings(description->raw_strings(), isolate);
+ Handle<JSArray> raw_object = isolate->factory()->NewJSArrayWithElements(
+ raw_strings, PACKED_ELEMENTS, raw_strings->length(), TENURED);
+
+ // Create the template object from the {cooked_strings}.
+ Handle<FixedArray> cooked_strings(description->cooked_strings(), isolate);
+ template_object = isolate->factory()->NewJSArrayWithElements(
+ cooked_strings, PACKED_ELEMENTS, cooked_strings->length(), TENURED);
+
+ // Freeze the {raw_object}.
+ JSObject::SetIntegrityLevel(raw_object, FROZEN, Object::THROW_ON_ERROR)
+ .ToChecked();
+
+ // Install a "raw" data property for {raw_object} on {template_object}.
+ PropertyDescriptor raw_desc;
+ raw_desc.set_value(raw_object);
+ raw_desc.set_configurable(false);
+ raw_desc.set_enumerable(false);
+ raw_desc.set_writable(false);
+ JSArray::DefineOwnProperty(isolate, template_object,
+ isolate->factory()->raw_string(), &raw_desc,
+ Object::THROW_ON_ERROR)
+ .ToChecked();
+
+ // Freeze the {template_object} as well.
+ JSObject::SetIntegrityLevel(template_object, FROZEN, Object::THROW_ON_ERROR)
+ .ToChecked();
+
+ // Remember the {template_object} in the {template_map}.
+ template_map = TemplateMap::Add(template_map, description, template_object);
+ native_context->set_template_map(*template_map);
+ }
+
+ return template_object;
+}
+
+// static
+bool TemplateMapShape::IsMatch(TemplateObjectDescription* key, Object* value) {
+ return key->Equals(TemplateObjectDescription::cast(value));
+}
+
+// static
+uint32_t TemplateMapShape::Hash(Isolate* isolate,
+ TemplateObjectDescription* key) {
+ return key->hash();
+}
+
+// static
+uint32_t TemplateMapShape::HashForObject(Isolate* isolate, Object* object) {
+ return Hash(isolate, TemplateObjectDescription::cast(object));
+}
+
+// static
+Handle<TemplateMap> TemplateMap::New(Isolate* isolate) {
+ return HashTable::New(isolate, 0);
+}
+
+// static
+MaybeHandle<JSArray> TemplateMap::Lookup(
+ Handle<TemplateMap> template_map, Handle<TemplateObjectDescription> key) {
+ int const entry = template_map->FindEntry(*key);
+ if (entry == kNotFound) return MaybeHandle<JSArray>();
+ int const index = EntryToIndex(entry);
+ return handle(JSArray::cast(template_map->get(index + 1)));
+}
+
+// static
+Handle<TemplateMap> TemplateMap::Add(Handle<TemplateMap> template_map,
+ Handle<TemplateObjectDescription> key,
+ Handle<JSArray> value) {
+ DCHECK_EQ(kNotFound, template_map->FindEntry(*key));
+ template_map = EnsureCapacity(template_map, 1);
+ uint32_t const hash = ShapeT::Hash(key->GetIsolate(), *key);
+ int const entry = template_map->FindInsertionEntry(hash);
+ int const index = EntryToIndex(entry);
+ template_map->set(index + 0, *key);
+ template_map->set(index + 1, *value);
+ template_map->ElementAdded();
+ return template_map;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/template-objects.h b/deps/v8/src/objects/template-objects.h
new file mode 100644
index 0000000000..cac29a3530
--- /dev/null
+++ b/deps/v8/src/objects/template-objects.h
@@ -0,0 +1,79 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TEMPLATE_OBJECTS_H_
+#define V8_OBJECTS_TEMPLATE_OBJECTS_H_
+
+#include "src/objects.h"
+#include "src/objects/hash-table.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// TemplateObjectDescription is a triple of hash, raw strings and cooked
+// strings for tagged template literals. Used to communicate with the runtime
+// for template object creation within the {Runtime_GetTemplateObject} method.
+class TemplateObjectDescription final : public Tuple3 {
+ public:
+ DECL_INT_ACCESSORS(hash)
+ DECL_ACCESSORS(raw_strings, FixedArray)
+ DECL_ACCESSORS(cooked_strings, FixedArray)
+
+ bool Equals(TemplateObjectDescription const* that) const;
+
+ static Handle<JSArray> GetTemplateObject(
+ Handle<TemplateObjectDescription> description,
+ Handle<Context> native_context);
+
+ DECL_CAST(TemplateObjectDescription)
+
+ static constexpr int kHashOffset = kValue1Offset;
+ static constexpr int kRawStringsOffset = kValue2Offset;
+ static constexpr int kCookedStringsOffset = kValue3Offset;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateObjectDescription);
+};
+
+class TemplateMapShape final : public BaseShape<TemplateObjectDescription*> {
+ public:
+ static bool IsMatch(TemplateObjectDescription* key, Object* value);
+ static uint32_t Hash(Isolate* isolate, TemplateObjectDescription* key);
+ static uint32_t HashForObject(Isolate* isolate, Object* object);
+
+ static constexpr int kPrefixSize = 0;
+ static constexpr int kEntrySize = 2;
+};
+
+class TemplateMap final : public HashTable<TemplateMap, TemplateMapShape> {
+ public:
+ static Handle<TemplateMap> New(Isolate* isolate);
+
+ // Tries to lookup the given {key} in the {template_map}. Returns the
+ // value if it's found, otherwise returns an empty MaybeHandle.
+ WARN_UNUSED_RESULT static MaybeHandle<JSArray> Lookup(
+ Handle<TemplateMap> template_map, Handle<TemplateObjectDescription> key);
+
+ // Adds the {key} / {value} pair to the {template_map} and returns the
+ // new TemplateMap (we might need to re-allocate). This assumes that
+ // there's no entry for {key} in the {template_map} already.
+ static Handle<TemplateMap> Add(Handle<TemplateMap> template_map,
+ Handle<TemplateObjectDescription> key,
+ Handle<JSArray> value);
+
+ DECL_CAST(TemplateMap)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateMap);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_TEMPLATE_OBJECTS_H_
diff --git a/deps/v8/src/parsing/OWNERS b/deps/v8/src/parsing/OWNERS
index 87f67ac610..fbbdd8b715 100644
--- a/deps/v8/src/parsing/OWNERS
+++ b/deps/v8/src/parsing/OWNERS
@@ -1,11 +1,11 @@
set noparent
adamk@chromium.org
+gsathya@chromium.org
littledan@chromium.org
marja@chromium.org
neis@chromium.org
rossberg@chromium.org
verwaest@chromium.org
-vogelheim@chromium.org
# COMPONENT: Blink>JavaScript>Parser
diff --git a/deps/v8/src/parsing/func-name-inferrer.cc b/deps/v8/src/parsing/func-name-inferrer.cc
index e973616534..a6cc179b82 100644
--- a/deps/v8/src/parsing/func-name-inferrer.cc
+++ b/deps/v8/src/parsing/func-name-inferrer.cc
@@ -6,7 +6,6 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
-#include "src/list-inl.h"
#include "src/objects-inl.h"
namespace v8 {
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 6499be5806..f17916ccb6 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -22,7 +22,6 @@ ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
extension_(nullptr),
compile_options_(ScriptCompiler::kNoCompileOptions),
script_scope_(nullptr),
- asm_function_scope_(nullptr),
unicode_cache_(nullptr),
stack_limit_(0),
hash_seed_(0),
@@ -72,9 +71,10 @@ ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared)
// FeedbackMetadata, we can only collect type profile if the feedback vector
// has the appropriate slots.
set_collect_type_profile(
- shared->feedback_metadata()->length() == 0
- ? FLAG_type_profile && script->IsUserJavaScript()
- : shared->feedback_metadata()->HasTypeProfileSlot());
+ isolate->is_collecting_type_profile() &&
+ (shared->feedback_metadata()->length() == 0
+ ? script->IsUserJavaScript()
+ : shared->feedback_metadata()->HasTypeProfileSlot()));
if (block_coverage_enabled() && script->IsUserJavaScript()) {
AllocateSourceRangeMap();
}
@@ -91,7 +91,8 @@ ParseInfo::ParseInfo(Handle<Script> script)
set_native(script->type() == Script::TYPE_NATIVE);
set_eval(script->compilation_type() == Script::COMPILATION_TYPE_EVAL);
- set_collect_type_profile(FLAG_type_profile && script->IsUserJavaScript());
+ set_collect_type_profile(script->GetIsolate()->is_collecting_type_profile() &&
+ script->IsUserJavaScript());
if (block_coverage_enabled() && script->IsUserJavaScript()) {
AllocateSourceRangeMap();
}
@@ -151,9 +152,8 @@ void ParseInfo::InitFromIsolate(Isolate* isolate) {
set_unicode_cache(isolate->unicode_cache());
set_runtime_call_stats(isolate->counters()->runtime_call_stats());
set_ast_string_constants(isolate->ast_string_constants());
- if (FLAG_block_coverage && isolate->is_block_code_coverage()) {
- set_block_coverage_enabled();
- }
+ if (isolate->is_block_code_coverage()) set_block_coverage_enabled();
+ if (isolate->is_collecting_type_profile()) set_collect_type_profile();
}
void ParseInfo::UpdateStatisticsAfterBackgroundParse(Isolate* isolate) {
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 909f4c58e0..1426f94bbf 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -65,6 +65,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
void setter(bool val) { SetFlag(flag, val); }
FLAG_ACCESSOR(kToplevel, is_toplevel, set_toplevel)
+ FLAG_ACCESSOR(kEager, is_eager, set_eager)
FLAG_ACCESSOR(kEval, is_eval, set_eval)
FLAG_ACCESSOR(kStrictMode, is_strict_mode, set_strict_mode)
FLAG_ACCESSOR(kNative, is_native, set_native)
@@ -72,7 +73,6 @@ class V8_EXPORT_PRIVATE ParseInfo {
FLAG_ACCESSOR(kAllowLazyParsing, allow_lazy_parsing, set_allow_lazy_parsing)
FLAG_ACCESSOR(kIsNamedExpression, is_named_expression,
set_is_named_expression)
- FLAG_ACCESSOR(kDebug, is_debug, set_is_debug)
FLAG_ACCESSOR(kSerializing, will_serialize, set_will_serialize)
FLAG_ACCESSOR(kLazyCompile, lazy_compile, set_lazy_compile)
FLAG_ACCESSOR(kCollectTypeProfile, collect_type_profile,
@@ -120,11 +120,6 @@ class V8_EXPORT_PRIVATE ParseInfo {
script_scope_ = script_scope;
}
- DeclarationScope* asm_function_scope() const { return asm_function_scope_; }
- void set_asm_function_scope(DeclarationScope* scope) {
- asm_function_scope_ = scope;
- }
-
AstValueFactory* ast_value_factory() const {
DCHECK(ast_value_factory_.get());
return ast_value_factory_.get();
@@ -241,7 +236,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
enum Flag {
// ---------- Input flags ---------------------------
kToplevel = 1 << 0,
- kLazy = 1 << 1,
+ kEager = 1 << 1,
kEval = 1 << 2,
kStrictMode = 1 << 3,
kNative = 1 << 4,
@@ -249,12 +244,11 @@ class V8_EXPORT_PRIVATE ParseInfo {
kModule = 1 << 6,
kAllowLazyParsing = 1 << 7,
kIsNamedExpression = 1 << 8,
- kDebug = 1 << 9,
- kSerializing = 1 << 10,
- kLazyCompile = 1 << 11,
- kCollectTypeProfile = 1 << 12,
- kBlockCoverageEnabled = 1 << 13,
- kIsAsmWasmBroken = 1 << 14,
+ kSerializing = 1 << 9,
+ kLazyCompile = 1 << 10,
+ kCollectTypeProfile = 1 << 11,
+ kBlockCoverageEnabled = 1 << 12,
+ kIsAsmWasmBroken = 1 << 13,
};
//------------- Inputs to parsing and scope analysis -----------------------
@@ -263,7 +257,6 @@ class V8_EXPORT_PRIVATE ParseInfo {
v8::Extension* extension_;
ScriptCompiler::CompileOptions compile_options_;
DeclarationScope* script_scope_;
- DeclarationScope* asm_function_scope_;
UnicodeCache* unicode_cache_;
uintptr_t stack_limit_;
uint32_t hash_seed_;
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index f555dbdbe0..b211b85d2a 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -5,6 +5,8 @@
#ifndef V8_PARSING_PARSER_BASE_H
#define V8_PARSING_PARSER_BASE_H
+#include <vector>
+
#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
@@ -275,6 +277,7 @@ class ParserBase {
allow_harmony_class_fields_(false),
allow_harmony_object_rest_spread_(false),
allow_harmony_dynamic_import_(false),
+ allow_harmony_import_meta_(false),
allow_harmony_async_iteration_(false),
allow_harmony_template_escapes_(false) {}
@@ -289,6 +292,7 @@ class ParserBase {
ALLOW_ACCESSORS(harmony_class_fields);
ALLOW_ACCESSORS(harmony_object_rest_spread);
ALLOW_ACCESSORS(harmony_dynamic_import);
+ ALLOW_ACCESSORS(harmony_import_meta);
ALLOW_ACCESSORS(harmony_async_iteration);
ALLOW_ACCESSORS(harmony_template_escapes);
@@ -507,12 +511,11 @@ class ParserBase {
};
DeclarationParsingResult()
- : declarations(4),
- first_initializer_loc(Scanner::Location::invalid()),
+ : first_initializer_loc(Scanner::Location::invalid()),
bindings_loc(Scanner::Location::invalid()) {}
DeclarationDescriptor descriptor;
- List<Declaration> declarations;
+ std::vector<Declaration> declarations;
Scanner::Location first_initializer_loc;
Scanner::Location bindings_loc;
};
@@ -550,7 +553,7 @@ class ParserBase {
struct ClassInfo {
public:
explicit ClassInfo(ParserBase* parser)
- : proxy(nullptr),
+ : variable(nullptr),
extends(parser->impl()->NullExpression()),
properties(parser->impl()->NewClassPropertyList(4)),
constructor(parser->impl()->NullExpression()),
@@ -558,7 +561,7 @@ class ParserBase {
has_name_static_property(false),
has_static_computed_names(false),
is_anonymous(false) {}
- VariableProxy* proxy;
+ Variable* variable;
ExpressionT extends;
typename Types::ClassPropertyList properties;
FunctionLiteralT constructor;
@@ -1087,10 +1090,11 @@ class ParserBase {
ObjectLiteralChecker* checker, bool* is_computed_name,
bool* is_rest_property, bool* ok);
ExpressionListT ParseArguments(Scanner::Location* first_spread_pos,
- bool maybe_arrow, bool* ok);
+ bool maybe_arrow,
+ bool* is_simple_parameter_list, bool* ok);
ExpressionListT ParseArguments(Scanner::Location* first_spread_pos,
bool* ok) {
- return ParseArguments(first_spread_pos, false, ok);
+ return ParseArguments(first_spread_pos, false, nullptr, ok);
}
ExpressionT ParseAssignmentExpression(bool accept_IN, bool* ok);
@@ -1123,7 +1127,7 @@ class ParserBase {
ExpressionT ParseTemplateLiteral(ExpressionT tag, int start, bool tagged,
bool* ok);
ExpressionT ParseSuperExpression(bool is_new, bool* ok);
- ExpressionT ParseDynamicImportExpression(bool* ok);
+ ExpressionT ParseImportExpressions(bool* ok);
ExpressionT ParseNewTargetExpression(bool* ok);
void ParseFormalParameter(FormalParametersT* parameters, bool* ok);
@@ -1180,8 +1184,6 @@ class ParserBase {
StatementT ParseStatement(ZoneList<const AstRawString*>* labels,
AllowLabelledFunctionStatement allow_function,
bool* ok);
- StatementT ParseStatementAsUnlabelled(ZoneList<const AstRawString*>* labels,
- bool* ok);
BlockT ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok);
// Parse a SubStatement in strict mode, or with an extra block scope in
@@ -1500,6 +1502,7 @@ class ParserBase {
bool allow_harmony_class_fields_;
bool allow_harmony_object_rest_spread_;
bool allow_harmony_dynamic_import_;
+ bool allow_harmony_import_meta_;
bool allow_harmony_async_iteration_;
bool allow_harmony_template_escapes_;
@@ -2666,7 +2669,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
template <typename Impl>
typename ParserBase<Impl>::ExpressionListT ParserBase<Impl>::ParseArguments(
- Scanner::Location* first_spread_arg_loc, bool maybe_arrow, bool* ok) {
+ Scanner::Location* first_spread_arg_loc, bool maybe_arrow,
+ bool* is_simple_parameter_list, bool* ok) {
// Arguments ::
// '(' (AssignmentExpression)*[','] ')'
@@ -2681,10 +2685,17 @@ typename ParserBase<Impl>::ExpressionListT ParserBase<Impl>::ParseArguments(
ExpressionT argument =
ParseAssignmentExpression(true, CHECK_OK_CUSTOM(NullExpressionList));
+ if (!impl()->IsIdentifier(argument) &&
+ is_simple_parameter_list != nullptr) {
+ *is_simple_parameter_list = false;
+ }
if (!maybe_arrow) {
impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullExpressionList));
}
if (is_spread) {
+ if (is_simple_parameter_list != nullptr) {
+ *is_simple_parameter_list = false;
+ }
if (!spread_arg.IsValid()) {
spread_arg.beg_pos = start_pos;
spread_arg.end_pos = peek_position();
@@ -3244,7 +3255,9 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
ExpressionListT args;
if (V8_UNLIKELY(is_async && impl()->IsIdentifier(result))) {
ExpressionClassifier async_classifier(this);
- args = ParseArguments(&spread_pos, true, CHECK_OK);
+ bool is_simple_parameter_list = true;
+ args = ParseArguments(&spread_pos, true, &is_simple_parameter_list,
+ CHECK_OK);
if (peek() == Token::ARROW) {
if (fni_) {
fni_->RemoveAsyncKeywordFromEnd();
@@ -3259,6 +3272,9 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
}
if (args->length()) {
// async ( Arguments ) => ...
+ if (!is_simple_parameter_list) {
+ async_classifier.previous()->RecordNonSimpleParameter();
+ }
return impl()->ExpressionListToExpression(args);
}
// async () => ...
@@ -3267,7 +3283,7 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
AccumulateFormalParameterContainmentErrors();
}
} else {
- args = ParseArguments(&spread_pos, false, CHECK_OK);
+ args = ParseArguments(&spread_pos, CHECK_OK);
}
ArrowFormalParametersUnexpectedToken();
@@ -3365,7 +3381,8 @@ ParserBase<Impl>::ParseMemberWithNewPrefixesExpression(bool* is_async,
if (peek() == Token::SUPER) {
const bool is_new = true;
result = ParseSuperExpression(is_new, CHECK_OK);
- } else if (allow_harmony_dynamic_import() && peek() == Token::IMPORT) {
+ } else if (allow_harmony_dynamic_import() && peek() == Token::IMPORT &&
+ (!allow_harmony_import_meta() || PeekAhead() == Token::LPAREN)) {
impl()->ReportMessageAt(scanner()->peek_location(),
MessageTemplate::kImportCallNotNewExpression);
*ok = false;
@@ -3473,7 +3490,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
const bool is_new = false;
result = ParseSuperExpression(is_new, CHECK_OK);
} else if (allow_harmony_dynamic_import() && peek() == Token::IMPORT) {
- result = ParseDynamicImportExpression(CHECK_OK);
+ result = ParseImportExpressions(CHECK_OK);
} else {
result = ParsePrimaryExpression(is_async, CHECK_OK);
}
@@ -3483,11 +3500,26 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseDynamicImportExpression(bool* ok) {
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseImportExpressions(
+ bool* ok) {
DCHECK(allow_harmony_dynamic_import());
Consume(Token::IMPORT);
int pos = position();
+ if (allow_harmony_import_meta() && peek() == Token::PERIOD) {
+ classifier()->RecordPatternError(
+ Scanner::Location(pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidDestructuringTarget);
+ ArrowFormalParametersUnexpectedToken();
+ ExpectMetaProperty(Token::META, "import.meta", pos, CHECK_OK);
+ if (!parsing_module_) {
+ impl()->ReportMessageAt(scanner()->location(),
+ MessageTemplate::kImportMetaOutsideModule);
+ *ok = false;
+ return impl()->NullExpression();
+ }
+
+ return impl()->ExpressionFromLiteral(Token::NULL_LITERAL, pos);
+ }
Expect(Token::LPAREN, CHECK_OK);
ExpressionT arg = ParseAssignmentExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
@@ -3838,7 +3870,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
decl.value_beg_position = value_beg_position;
if (var_context == kForStatement) {
// Save the declaration for further handling in ParseForStatement.
- parsing_result->declarations.Add(decl);
+ parsing_result->declarations.push_back(decl);
} else {
// Immediately declare the variable otherwise. This avoids O(N^2)
// behavior (where N is the number of variables in a single
@@ -4871,23 +4903,25 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
}
return ParseForStatement(labels, ok);
case Token::CONTINUE:
+ return ParseContinueStatement(ok);
case Token::BREAK:
+ return ParseBreakStatement(labels, ok);
case Token::RETURN:
+ return ParseReturnStatement(ok);
case Token::THROW:
+ return ParseThrowStatement(ok);
case Token::TRY: {
- // These statements must have their labels preserved in an enclosing
- // block, as the corresponding AST nodes do not currently store their
- // labels.
- // TODO(nikolaos, marja): Consider adding the labels to the AST nodes.
- if (labels == nullptr) {
- return ParseStatementAsUnlabelled(labels, ok);
- } else {
- BlockT result = factory()->NewBlock(1, false, labels);
- typename Types::Target target(this, result);
- StatementT statement = ParseStatementAsUnlabelled(labels, CHECK_OK);
- result->statements()->Add(statement, zone());
- return result;
- }
+ // It is somewhat complicated to have labels on try-statements.
+ // When breaking out of a try-finally statement, one must take
+ // great care not to treat it as a fall-through. It is much easier
+ // just to wrap the entire try-statement in a statement block and
+ // put the labels there.
+ if (labels == nullptr) return ParseTryStatement(ok);
+ BlockT result = factory()->NewBlock(1, false, labels);
+ typename Types::Target target(this, result);
+ StatementT statement = ParseTryStatement(CHECK_OK);
+ result->statements()->Add(statement, zone());
+ return result;
}
case Token::WITH:
return ParseWithStatement(labels, ok);
@@ -4924,29 +4958,6 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
}
}
-// This method parses a subset of statements (break, continue, return, throw,
-// try) which are to be grouped because they all require their labeles to be
-// preserved in an enclosing block.
-template <typename Impl>
-typename ParserBase<Impl>::StatementT
-ParserBase<Impl>::ParseStatementAsUnlabelled(
- ZoneList<const AstRawString*>* labels, bool* ok) {
- switch (peek()) {
- case Token::CONTINUE:
- return ParseContinueStatement(ok);
- case Token::BREAK:
- return ParseBreakStatement(labels, ok);
- case Token::RETURN:
- return ParseReturnStatement(ok);
- case Token::THROW:
- return ParseThrowStatement(ok);
- case Token::TRY:
- return ParseTryStatement(ok);
- default:
- UNREACHABLE();
- }
-}
-
template <typename Impl>
typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
ZoneList<const AstRawString*>* labels, bool* ok) {
@@ -5396,7 +5407,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
ExpressionT tag = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- auto switch_statement = factory()->NewSwitchStatement(labels, switch_pos);
+ auto switch_statement =
+ factory()->NewSwitchStatement(labels, tag, switch_pos);
{
BlockState cases_block_state(zone(), &scope_);
@@ -5405,7 +5417,6 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
typename Types::Target target(this, switch_statement);
bool default_seen = false;
- auto cases = impl()->NewCaseClauseList(4);
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
// An empty label indicates the default case.
@@ -5424,24 +5435,26 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
default_seen = true;
}
Expect(Token::COLON, CHECK_OK);
- int clause_pos = position();
StatementListT statements = impl()->NewStatementList(5);
while (peek() != Token::CASE && peek() != Token::DEFAULT &&
peek() != Token::RBRACE) {
StatementT stat = ParseStatementListItem(CHECK_OK);
statements->Add(stat, zone());
}
- auto clause = factory()->NewCaseClause(label, statements, clause_pos);
+ auto clause = factory()->NewCaseClause(label, statements);
impl()->RecordCaseClauseSourceRange(clause, range_scope.Finalize());
- cases->Add(clause, zone());
+ switch_statement->cases()->Add(clause, zone());
}
Expect(Token::RBRACE, CHECK_OK);
int end_position = scanner()->location().end_pos;
scope()->set_end_position(end_position);
impl()->RecordSwitchStatementSourceRange(switch_statement, end_position);
- return impl()->RewriteSwitchStatement(tag, switch_statement, cases,
- scope()->FinalizeBlockScope());
+ Scope* switch_scope = scope()->FinalizeBlockScope();
+ if (switch_scope != nullptr) {
+ return impl()->RewriteSwitchStatement(switch_statement, switch_scope);
+ }
+ return switch_statement;
}
}
@@ -5648,7 +5661,7 @@ ParserBase<Impl>::ParseForEachStatementWithDeclarations(
int stmt_pos, ForInfo* for_info, ZoneList<const AstRawString*>* labels,
Scope* inner_block_scope, bool* ok) {
// Just one declaration followed by in/of.
- if (for_info->parsing_result.declarations.length() != 1) {
+ if (for_info->parsing_result.declarations.size() != 1) {
impl()->ReportMessageAt(for_info->parsing_result.bindings_loc,
MessageTemplate::kForInOfLoopMultiBindings,
ForEachStatement::VisitModeString(for_info->mode));
@@ -5916,7 +5929,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
for_info.position = scanner()->location().beg_pos;
// Only a single declaration is allowed in for-await-of loops
- if (for_info.parsing_result.declarations.length() != 1) {
+ if (for_info.parsing_result.declarations.size() != 1) {
impl()->ReportMessageAt(for_info.parsing_result.bindings_loc,
MessageTemplate::kForInOfLoopMultiBindings,
"for-await-of");
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 9915a18881..a554d7d242 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -332,21 +332,6 @@ Expression* Parser::BuildUnaryExpression(Expression* expression,
}
}
}
- // Desugar '+foo' => 'foo*1'
- if (op == Token::ADD) {
- return factory()->NewBinaryOperation(
- Token::MUL, expression, factory()->NewNumberLiteral(1, pos), pos);
- }
- // The same idea for '-foo' => 'foo*(-1)'.
- if (op == Token::SUB) {
- return factory()->NewBinaryOperation(
- Token::MUL, expression, factory()->NewNumberLiteral(-1, pos), pos);
- }
- // ...and one more time for '~foo' => 'foo^(~0)'.
- if (op == Token::BIT_NOT) {
- return factory()->NewBinaryOperation(
- Token::BIT_XOR, expression, factory()->NewNumberLiteral(~0, pos), pos);
- }
return factory()->NewUnaryOperation(op, expression, pos);
}
@@ -479,7 +464,7 @@ Parser::Parser(ParseInfo* info)
: ParserBase<Parser>(info->zone(), &scanner_, info->stack_limit(),
info->extension(), info->GetOrCreateAstValueFactory(),
info->runtime_call_stats(), true),
- scanner_(info->unicode_cache()),
+ scanner_(info->unicode_cache(), use_counts_),
reusable_preparser_(nullptr),
mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
source_range_map_(info->source_range_map()),
@@ -504,10 +489,8 @@ Parser::Parser(ParseInfo* info)
// aggressive about lazy compilation, because it might trigger compilation
// of functions without an outer context when setting a breakpoint through
// Debug::FindSharedFunctionInfoInScript
- bool can_compile_lazily = FLAG_lazy && !info->is_debug();
-
- // Consider compiling eagerly when targeting the code cache.
- can_compile_lazily &= !(FLAG_serialize_eager && info->will_serialize());
+ // We also compile eagerly for kProduceExhaustiveCodeCache.
+ bool can_compile_lazily = FLAG_lazy && !info->is_eager();
set_default_eager_compile_hint(can_compile_lazily
? FunctionLiteral::kShouldLazyCompile
@@ -521,6 +504,7 @@ Parser::Parser(ParseInfo* info)
set_allow_harmony_class_fields(FLAG_harmony_class_fields);
set_allow_harmony_object_rest_spread(FLAG_harmony_object_rest_spread);
set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
+ set_allow_harmony_import_meta(FLAG_harmony_import_meta);
set_allow_harmony_async_iteration(FLAG_harmony_async_iteration);
set_allow_harmony_template_escapes(FLAG_harmony_template_escapes);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
@@ -742,12 +726,7 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
timer.Start();
}
DeserializeScopeChain(info, info->maybe_outer_scope_info());
- if (info->asm_function_scope()) {
- original_scope_ = info->asm_function_scope();
- factory()->set_zone(info->zone());
- } else {
- DCHECK_EQ(factory()->zone(), info->zone());
- }
+ DCHECK_EQ(factory()->zone(), info->zone());
// Initialize parser state.
Handle<String> name(shared_info->name());
@@ -933,12 +912,15 @@ Statement* Parser::ParseModuleItem(bool* ok) {
return ParseExportDeclaration(ok);
}
- // We must be careful not to parse a dynamic import expression as an import
- // declaration.
- if (next == Token::IMPORT &&
- (!allow_harmony_dynamic_import() || PeekAhead() != Token::LPAREN)) {
- ParseImportDeclaration(CHECK_OK);
- return factory()->NewEmptyStatement(kNoSourcePosition);
+ if (next == Token::IMPORT) {
+ // We must be careful not to parse a dynamic import expression as an import
+ // declaration. Same for import.meta expressions.
+ Token::Value peek_ahead = PeekAhead();
+ if ((!allow_harmony_dynamic_import() || peek_ahead != Token::LPAREN) &&
+ (!allow_harmony_import_meta() || peek_ahead != Token::PERIOD)) {
+ ParseImportDeclaration(CHECK_OK);
+ return factory()->NewEmptyStatement(kNoSourcePosition);
+ }
}
return ParseStatementListItem(ok);
@@ -1584,9 +1566,7 @@ Expression* Parser::RewriteDoExpression(Block* body, int pos, bool* ok) {
return expr;
}
-Statement* Parser::RewriteSwitchStatement(Expression* tag,
- SwitchStatement* switch_statement,
- ZoneList<CaseClause*>* cases,
+Statement* Parser::RewriteSwitchStatement(SwitchStatement* switch_statement,
Scope* scope) {
// In order to get the CaseClauses to execute in their own lexical scope,
// but without requiring downstream code to have special scope handling
@@ -1597,35 +1577,29 @@ Statement* Parser::RewriteSwitchStatement(Expression* tag,
// switch (.tag_variable) { CaseClause* }
// }
// }
+ DCHECK_NOT_NULL(scope);
+ DCHECK(scope->is_block_scope());
+ DCHECK_GE(switch_statement->position(), scope->start_position());
+ DCHECK_LT(switch_statement->position(), scope->end_position());
Block* switch_block = factory()->NewBlock(2, false);
+ Expression* tag = switch_statement->tag();
Variable* tag_variable =
NewTemporary(ast_value_factory()->dot_switch_tag_string());
Assignment* tag_assign = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(tag_variable), tag,
tag->position());
- Statement* tag_statement =
- factory()->NewExpressionStatement(tag_assign, kNoSourcePosition);
+ // Wrap with IgnoreCompletion so the tag isn't returned as the completion
+ // value, in case the switch statements don't have a value.
+ Statement* tag_statement = IgnoreCompletion(
+ factory()->NewExpressionStatement(tag_assign, kNoSourcePosition));
switch_block->statements()->Add(tag_statement, zone());
- // make statement: undefined;
- // This is needed so the tag isn't returned as the value, in case the switch
- // statements don't have a value.
- switch_block->statements()->Add(
- factory()->NewExpressionStatement(
- factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition),
- zone());
-
- Expression* tag_read = factory()->NewVariableProxy(tag_variable);
- switch_statement->Initialize(tag_read, cases);
+ switch_statement->set_tag(factory()->NewVariableProxy(tag_variable));
Block* cases_block = factory()->NewBlock(1, false);
cases_block->statements()->Add(switch_statement, zone());
cases_block->set_scope(scope);
- DCHECK_IMPLIES(scope != nullptr,
- switch_statement->position() >= scope->start_position());
- DCHECK_IMPLIES(scope != nullptr,
- switch_statement->position() < scope->end_position());
switch_block->statements()->Add(cases_block, zone());
return switch_block;
}
@@ -1956,7 +1930,7 @@ void Parser::DesugarBindingInForEachStatement(ForInfo* for_info,
Block** body_block,
Expression** each_variable,
bool* ok) {
- DCHECK(for_info->parsing_result.declarations.length() == 1);
+ DCHECK_EQ(1, for_info->parsing_result.declarations.size());
DeclarationParsingResult::Declaration& decl =
for_info->parsing_result.declarations[0];
Variable* temp = NewTemporary(ast_value_factory()->dot_for_string());
@@ -2417,9 +2391,8 @@ void Parser::AddArrowFunctionFormalParameters(
expr = expr->AsSpread()->expression();
parameters->has_rest = true;
}
- if (parameters->is_simple) {
- parameters->is_simple = !is_rest && expr->IsVariableProxy();
- }
+ DCHECK_IMPLIES(parameters->is_simple, !is_rest);
+ DCHECK_IMPLIES(parameters->is_simple, expr->IsVariableProxy());
Expression* initializer = nullptr;
if (expr->IsAssignment()) {
@@ -2817,7 +2790,7 @@ Parser::LazyParsingResult Parser::SkipFunction(
DCHECK(log_);
log_->LogFunction(function_scope->start_position(),
function_scope->end_position(), *num_parameters,
- language_mode(), function_scope->uses_super_property(),
+ language_mode(), function_scope->NeedsHomeObject(),
logger->num_inner_functions());
}
return kLazyParsingComplete;
@@ -3155,11 +3128,12 @@ void Parser::DeclareClassVariable(const AstRawString* name,
#endif
if (name != nullptr) {
- class_info->proxy = factory()->NewVariableProxy(name, NORMAL_VARIABLE);
+ VariableProxy* proxy = factory()->NewVariableProxy(name, NORMAL_VARIABLE);
Declaration* declaration =
- factory()->NewVariableDeclaration(class_info->proxy, class_token_pos);
- Declare(declaration, DeclarationDescriptor::NORMAL, CONST,
- Variable::DefaultInitializationFlag(CONST), ok);
+ factory()->NewVariableDeclaration(proxy, class_token_pos);
+ class_info->variable =
+ Declare(declaration, DeclarationDescriptor::NORMAL, CONST,
+ Variable::DefaultInitializationFlag(CONST), ok);
}
}
@@ -3213,12 +3187,12 @@ Expression* Parser::RewriteClassLiteral(Scope* block_scope,
}
if (name != nullptr) {
- DCHECK_NOT_NULL(class_info->proxy);
- class_info->proxy->var()->set_initializer_position(end_pos);
+ DCHECK_NOT_NULL(class_info->variable);
+ class_info->variable->set_initializer_position(end_pos);
}
ClassLiteral* class_literal = factory()->NewClassLiteral(
- block_scope, class_info->proxy, class_info->extends,
+ block_scope, class_info->variable, class_info->extends,
class_info->constructor, class_info->properties, pos, end_pos,
class_info->has_name_static_property,
class_info->has_static_computed_names, class_info->is_anonymous);
@@ -3409,8 +3383,9 @@ void Parser::ParseOnBackground(ParseInfo* info) {
if (result != NULL) *info->cached_data() = logger.GetScriptData();
log_ = NULL;
}
- if (FLAG_runtime_stats &
- v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+ if (runtime_call_stats_ &&
+ (FLAG_runtime_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
auto value = v8::tracing::TracedValue::Create();
runtime_call_stats_->Dump(value.get());
TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"),
@@ -3450,8 +3425,8 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
Expression* tag) {
TemplateLiteral* lit = *state;
int pos = lit->position();
- const ZoneList<Expression*>* cooked_strings = lit->cooked();
- const ZoneList<Expression*>* raw_strings = lit->raw();
+ const ZoneList<Literal*>* cooked_strings = lit->cooked();
+ const ZoneList<Literal*>* raw_strings = lit->raw();
const ZoneList<Expression*>* expressions = lit->expressions();
DCHECK_EQ(cooked_strings->length(), raw_strings->length());
DCHECK_EQ(cooked_strings->length(), expressions->length() + 1);
@@ -3478,38 +3453,39 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
}
return expr;
} else {
- uint32_t hash = ComputeTemplateLiteralHash(lit);
-
- // $getTemplateCallSite
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(4, zone());
- args->Add(factory()->NewArrayLiteral(
- const_cast<ZoneList<Expression*>*>(cooked_strings), pos),
- zone());
- args->Add(factory()->NewArrayLiteral(
- const_cast<ZoneList<Expression*>*>(raw_strings), pos),
- zone());
-
- // Truncate hash to Smi-range.
- Smi* hash_obj = Smi::cast(Internals::IntToSmi(static_cast<int>(hash)));
- args->Add(factory()->NewNumberLiteral(hash_obj->value(), pos), zone());
-
- Expression* call_site = factory()->NewCallRuntime(
- Context::GET_TEMPLATE_CALL_SITE_INDEX, args, start);
+ // GetTemplateObject
+ const int32_t hash = ComputeTemplateLiteralHash(lit);
+ Expression* template_object = factory()->NewGetTemplateObject(
+ const_cast<ZoneList<Literal*>*>(cooked_strings),
+ const_cast<ZoneList<Literal*>*>(raw_strings), hash, pos);
// Call TagFn
ZoneList<Expression*>* call_args =
new (zone()) ZoneList<Expression*>(expressions->length() + 1, zone());
- call_args->Add(call_site, zone());
+ call_args->Add(template_object, zone());
call_args->AddAll(*expressions, zone());
return factory()->NewCall(tag, call_args, pos);
}
}
+namespace {
+
+// http://burtleburtle.net/bob/hash/integer.html
+uint32_t HalfAvalance(uint32_t a) {
+ a = (a + 0x479ab41d) + (a << 8);
+ a = (a ^ 0xe4aa10ce) ^ (a >> 5);
+ a = (a + 0x9942f0a6) - (a << 14);
+ a = (a ^ 0x5aedd67d) ^ (a >> 3);
+ a = (a + 0x17bea992) + (a << 7);
+ return a;
+}
+
+} // namespace
-uint32_t Parser::ComputeTemplateLiteralHash(const TemplateLiteral* lit) {
- const ZoneList<Expression*>* raw_strings = lit->raw();
+int32_t Parser::ComputeTemplateLiteralHash(const TemplateLiteral* lit) {
+ const ZoneList<Literal*>* raw_strings = lit->raw();
int total = raw_strings->length();
- DCHECK(total);
+ DCHECK_GT(total, 0);
uint32_t running_hash = 0;
@@ -3532,7 +3508,10 @@ uint32_t Parser::ComputeTemplateLiteralHash(const TemplateLiteral* lit) {
}
}
- return running_hash;
+ // Pass {running_hash} throught a decent 'half avalance' hash function
+ // and take the most significant bits (in Smi range).
+ return static_cast<int32_t>(HalfAvalance(running_hash)) >>
+ (sizeof(int32_t) * CHAR_BIT - kSmiValueSize);
}
namespace {
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 1b2c98ccf5..296762c14d 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -295,8 +295,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
SET_ALLOW(harmony_class_fields);
SET_ALLOW(harmony_object_rest_spread);
SET_ALLOW(harmony_dynamic_import);
+ SET_ALLOW(harmony_import_meta);
SET_ALLOW(harmony_async_iteration);
SET_ALLOW(harmony_template_escapes);
+ SET_ALLOW(harmony_restrictive_generators);
#undef SET_ALLOW
}
return reusable_preparser_;
@@ -331,9 +333,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
bool ContainsLabel(ZoneList<const AstRawString*>* labels,
const AstRawString* label);
Expression* RewriteReturn(Expression* return_value, int pos);
- Statement* RewriteSwitchStatement(Expression* tag,
- SwitchStatement* switch_statement,
- ZoneList<CaseClause*>* cases, Scope* scope);
+ Statement* RewriteSwitchStatement(SwitchStatement* switch_statement,
+ Scope* scope);
void RewriteCatchPattern(CatchInfo* catch_info, bool* ok);
void ValidateCatchBlock(const CatchInfo& catch_info, bool* ok);
Statement* RewriteTryStatement(Block* try_block, Block* catch_block,
@@ -498,8 +499,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
TemplateLiteral(Zone* zone, int pos)
: cooked_(8, zone), raw_(8, zone), expressions_(8, zone), pos_(pos) {}
- const ZoneList<Expression*>* cooked() const { return &cooked_; }
- const ZoneList<Expression*>* raw() const { return &raw_; }
+ const ZoneList<Literal*>* cooked() const { return &cooked_; }
+ const ZoneList<Literal*>* raw() const { return &raw_; }
const ZoneList<Expression*>* expressions() const { return &expressions_; }
int position() const { return pos_; }
@@ -516,8 +517,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
}
private:
- ZoneList<Expression*> cooked_;
- ZoneList<Expression*> raw_;
+ ZoneList<Literal*> cooked_;
+ ZoneList<Literal*> raw_;
ZoneList<Expression*> expressions_;
int pos_;
};
@@ -538,7 +539,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* expression);
Expression* CloseTemplateLiteral(TemplateLiteralState* state, int start,
Expression* tag);
- uint32_t ComputeTemplateLiteralHash(const TemplateLiteral* lit);
+ int32_t ComputeTemplateLiteralHash(const TemplateLiteral* lit);
ZoneList<Expression*>* PrepareSpreadArguments(ZoneList<Expression*>* list);
Expression* SpreadCall(Expression* function, ZoneList<Expression*>* args,
@@ -913,9 +914,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
V8_INLINE ZoneList<Statement*>* NewStatementList(int size) const {
return new (zone()) ZoneList<Statement*>(size, zone());
}
- V8_INLINE ZoneList<CaseClause*>* NewCaseClauseList(int size) const {
- return new (zone()) ZoneList<CaseClause*>(size, zone());
- }
V8_INLINE Expression* NewV8Intrinsic(const AstRawString* name,
ZoneList<Expression*>* args, int pos,
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index c5e4b8c024..2f5d248aed 100644
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -731,7 +731,6 @@ NOT_A_PATTERN(BreakStatement)
NOT_A_PATTERN(Call)
NOT_A_PATTERN(CallNew)
NOT_A_PATTERN(CallRuntime)
-NOT_A_PATTERN(CaseClause)
NOT_A_PATTERN(ClassLiteral)
NOT_A_PATTERN(CompareOperation)
NOT_A_PATTERN(CompoundAssignment)
@@ -750,6 +749,7 @@ NOT_A_PATTERN(ForStatement)
NOT_A_PATTERN(FunctionDeclaration)
NOT_A_PATTERN(FunctionLiteral)
NOT_A_PATTERN(GetIterator)
+NOT_A_PATTERN(GetTemplateObject)
NOT_A_PATTERN(IfStatement)
NOT_A_PATTERN(ImportCallExpression)
NOT_A_PATTERN(Literal)
diff --git a/deps/v8/src/parsing/preparsed-scope-data.cc b/deps/v8/src/parsing/preparsed-scope-data.cc
index db78a46194..8d2ce2d1a5 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.cc
+++ b/deps/v8/src/parsing/preparsed-scope-data.cc
@@ -28,14 +28,15 @@ class VariableContextAllocatedField
const int kMagicValue = 0xc0de0de;
-enum SkippableFunctionDataOffsets {
- kStartPosition,
- kEndPosition,
- kNumParameters,
- kNumInnerFunctions,
- kLanguageAndSuper,
- kSize
-};
+#ifdef DEBUG
+const size_t kUint32Size = 5;
+const size_t kUint8Size = 2;
+#else
+const size_t kUint32Size = 4;
+const size_t kUint8Size = 1;
+#endif
+
+const int kSkippableFunctionDataSize = 4 * kUint32Size + 1 * kUint8Size;
STATIC_ASSERT(LANGUAGE_END == 2);
class LanguageField : public BitField<int, 0, 1> {};
@@ -45,17 +46,20 @@ class UsesSuperField : public BitField<bool, LanguageField::kNext, 1> {};
/*
- Internal data format for the backing store of ProducedPreparsedScopeData:
+ Internal data format for the backing store of ProducedPreparsedScopeData and
+ PreParsedScopeData::scope_data (on the heap):
(Skippable function data:)
------------------------------------
+ | scope_data_start |
+ ------------------------------------
| data for inner function 1 |
| ... |
------------------------------------
| data for inner function n |
| ... |
------------------------------------
- (Scope allocation data:)
+ (Scope allocation data:) << scope_data_start points here
------------------------------------
magic value
------------------------------------
@@ -78,31 +82,83 @@ class UsesSuperField : public BitField<bool, LanguageField::kNext, 1> {};
| ... |
------------------------------------
+ PreParsedScopeData::child_data is an array of PreParsedScopeData objects, one
+ for each skippable inner function.
- Data format for PreParsedScopeData (on the heap):
+ ConsumedPreParsedScopeData wraps a PreParsedScopeData and reads data from it.
- PreParsedScopeData::scope_data:
+ */
- ------------------------------------
- | scope_data_start |
- ------------------------------------
- | Skippable function data |
- | (see above) |
- | ... |
- ------------------------------------
- ------------------------------------
- | Scope allocation data | << scope_data_start points here
- | (see above) |
- | ... |
- ------------------------------------
+void ProducedPreParsedScopeData::ByteData::WriteUint32(uint32_t data) {
+#ifdef DEBUG
+ // Save expected item size in debug mode.
+ backing_store_.push_back(kUint32Size);
+#endif
+ const uint8_t* d = reinterpret_cast<uint8_t*>(&data);
+ for (int i = 0; i < 4; ++i) {
+ backing_store_.push_back(*d++);
+ }
+}
- PreParsedScopeData::child_data is an array of PreParsedScopeData objects, one
- for each skippable inner function.
+void ProducedPreParsedScopeData::ByteData::OverwriteFirstUint32(uint32_t data) {
+ auto it = backing_store_.begin();
+#ifdef DEBUG
+ // Check that that position already holds an item of the expected size.
+ DCHECK_GE(backing_store_.size(), kUint32Size);
+ DCHECK_EQ(*it, kUint32Size);
+ ++it;
+#endif
+ const uint8_t* d = reinterpret_cast<uint8_t*>(&data);
+ for (size_t i = 0; i < 4; ++i) {
+ *it++ = *d++;
+ }
+}
+void ProducedPreParsedScopeData::ByteData::WriteUint8(uint8_t data) {
+#ifdef DEBUG
+ // Save expected item size in debug mode.
+ backing_store_.push_back(kUint8Size);
+#endif
+ backing_store_.push_back(data);
+}
- ConsumedPreParsedScopeData wraps a PreParsedScopeData and reads data from it.
+Handle<PodArray<uint8_t>> ProducedPreParsedScopeData::ByteData::Serialize(
+ Isolate* isolate) {
+ Handle<PodArray<uint8_t>> array = PodArray<uint8_t>::New(
+ isolate, static_cast<int>(backing_store_.size()), TENURED);
- */
+ DisallowHeapAllocation no_gc;
+ PodArray<uint8_t>* raw_array = *array;
+
+ int i = 0;
+ for (uint8_t item : backing_store_) {
+ raw_array->set(i++, item);
+ }
+ return array;
+}
+
+ProducedPreParsedScopeData::ProducedPreParsedScopeData(
+ Zone* zone, ProducedPreParsedScopeData* parent)
+ : parent_(parent),
+ byte_data_(new (zone) ByteData(zone)),
+ data_for_inner_functions_(zone),
+ bailed_out_(false) {
+ if (parent != nullptr) {
+ parent->data_for_inner_functions_.push_back(this);
+ }
+ // Reserve space for scope_data_start, written later:
+ byte_data_->WriteUint32(0);
+}
+
+// Create a ProducedPreParsedScopeData which is just a proxy for a previous
+// produced PreParsedScopeData.
+ProducedPreParsedScopeData::ProducedPreParsedScopeData(
+ Handle<PreParsedScopeData> data, Zone* zone)
+ : parent_(nullptr),
+ byte_data_(nullptr),
+ data_for_inner_functions_(zone),
+ bailed_out_(false),
+ previously_produced_preparsed_scope_data_(data) {}
ProducedPreParsedScopeData::DataGatheringScope::DataGatheringScope(
DeclarationScope* function_scope, PreParser* preparser)
@@ -137,7 +193,7 @@ void ProducedPreParsedScopeData::DataGatheringScope::MarkFunctionAsSkippable(
produced_preparsed_scope_data_->parent_->AddSkippableFunction(
function_scope_->start_position(), end_position,
function_scope_->num_parameters(), num_inner_functions,
- function_scope_->language_mode(), function_scope_->uses_super_property());
+ function_scope_->language_mode(), function_scope_->NeedsHomeObject());
}
void ProducedPreParsedScopeData::AddSkippableFunction(
@@ -145,66 +201,59 @@ void ProducedPreParsedScopeData::AddSkippableFunction(
int num_inner_functions, LanguageMode language_mode,
bool uses_super_property) {
DCHECK(FLAG_preparser_scope_analysis);
- DCHECK_EQ(scope_data_start_, -1);
DCHECK(previously_produced_preparsed_scope_data_.is_null());
if (bailed_out_) {
return;
}
- size_t current_size = backing_store_.size();
- backing_store_.resize(current_size + SkippableFunctionDataOffsets::kSize);
- backing_store_[current_size + SkippableFunctionDataOffsets::kStartPosition] =
- start_position;
- backing_store_[current_size + SkippableFunctionDataOffsets::kEndPosition] =
- end_position;
- backing_store_[current_size + SkippableFunctionDataOffsets::kNumParameters] =
- num_parameters;
- backing_store_[current_size +
- SkippableFunctionDataOffsets::kNumInnerFunctions] =
- num_inner_functions;
-
- uint32_t language_and_super = LanguageField::encode(language_mode) |
- UsesSuperField::encode(uses_super_property);
-
- backing_store_[current_size +
- SkippableFunctionDataOffsets::kLanguageAndSuper] =
- language_and_super;
+ byte_data_->WriteUint32(start_position);
+ byte_data_->WriteUint32(end_position);
+ byte_data_->WriteUint32(num_parameters);
+ byte_data_->WriteUint32(num_inner_functions);
+
+ uint8_t language_and_super = LanguageField::encode(language_mode) |
+ UsesSuperField::encode(uses_super_property);
+
+ byte_data_->WriteUint8(language_and_super);
}
void ProducedPreParsedScopeData::SaveScopeAllocationData(
DeclarationScope* scope) {
DCHECK(FLAG_preparser_scope_analysis);
DCHECK(previously_produced_preparsed_scope_data_.is_null());
- DCHECK_EQ(scope_data_start_, -1);
- DCHECK_EQ(backing_store_.size() % SkippableFunctionDataOffsets::kSize, 0);
+ // The data contains a uint32 (reserved space for scope_data_start) and
+ // function data items, kSkippableFunctionDataSize each.
+ DCHECK_GE(byte_data_->size(), kUint32Size);
+ DCHECK_LE(byte_data_->size(), std::numeric_limits<uint32_t>::max());
+ DCHECK_EQ(byte_data_->size() % kSkippableFunctionDataSize, kUint32Size);
if (bailed_out_) {
return;
}
- scope_data_start_ = static_cast<int>(backing_store_.size());
+ uint32_t scope_data_start = static_cast<uint32_t>(byte_data_->size());
// If there are no skippable inner functions, we don't need to save anything.
- if (backing_store_.size() == 0) {
+ if (scope_data_start == kUint32Size) {
return;
}
- // For sanity checks.
- backing_store_.push_back(kMagicValue);
- backing_store_.push_back(scope->start_position());
- backing_store_.push_back(scope->end_position());
+ byte_data_->OverwriteFirstUint32(scope_data_start);
// For a data integrity check, write a value between data about skipped inner
// funcs and data about variables.
+ byte_data_->WriteUint32(kMagicValue);
+ byte_data_->WriteUint32(scope->start_position());
+ byte_data_->WriteUint32(scope->end_position());
+
SaveDataForScope(scope);
}
MaybeHandle<PreParsedScopeData> ProducedPreParsedScopeData::Serialize(
- Isolate* isolate) const {
+ Isolate* isolate) {
if (!previously_produced_preparsed_scope_data_.is_null()) {
DCHECK(!bailed_out_);
- DCHECK_EQ(backing_store_.size(), 0);
DCHECK_EQ(data_for_inner_functions_.size(), 0);
return previously_produced_preparsed_scope_data_;
}
@@ -214,27 +263,16 @@ MaybeHandle<PreParsedScopeData> ProducedPreParsedScopeData::Serialize(
DCHECK(!ThisOrParentBailedOut());
- // FIXME(marja): save space by using a byte array and converting
- // function data to bytes.
- size_t length = backing_store_.size();
- if (length == 0) {
+ if (byte_data_->size() <= kUint32Size) {
+ // The data contains only the placeholder.
return MaybeHandle<PreParsedScopeData>();
}
- Handle<PodArray<uint32_t>> data_array =
- PodArray<uint32_t>::New(isolate, static_cast<int>(length + 1), TENURED);
-
- DCHECK_GE(scope_data_start_, 0);
- data_array->set(0, scope_data_start_);
- {
- int i = 1;
- for (const auto& item : backing_store_) {
- data_array->set(i++, item);
- }
- }
-
Handle<PreParsedScopeData> data = isolate->factory()->NewPreParsedScopeData();
+ Handle<PodArray<uint8_t>> scope_data_array = byte_data_->Serialize(isolate);
+ data->set_scope_data(*scope_data_array);
+
int child_data_length = static_cast<int>(data_for_inner_functions_.size());
if (child_data_length == 0) {
data->set_child_data(*(isolate->factory()->empty_fixed_array()));
@@ -256,7 +294,6 @@ MaybeHandle<PreParsedScopeData> ProducedPreParsedScopeData::Serialize(
data->set_child_data(*child_array);
}
- data->set_scope_data(*data_array);
return data;
}
@@ -299,26 +336,20 @@ bool ProducedPreParsedScopeData::ScopeIsSkippableFunctionScope(Scope* scope) {
void ProducedPreParsedScopeData::SaveDataForScope(Scope* scope) {
DCHECK_NE(scope->end_position(), kNoSourcePosition);
- // We're not trying to save data for default constructors because the
- // PreParser doesn't construct them.
- DCHECK_IMPLIES(scope->scope_type() == ScopeType::FUNCTION_SCOPE,
- (scope->AsDeclarationScope()->function_kind() &
- kDefaultConstructor) == 0);
-
if (!ScopeNeedsData(scope)) {
return;
}
#ifdef DEBUG
- backing_store_.push_back(scope->scope_type());
+ byte_data_->WriteUint8(scope->scope_type());
#endif
- uint32_t eval =
+ uint8_t eval =
ScopeCallsSloppyEvalField::encode(
scope->is_declaration_scope() &&
scope->AsDeclarationScope()->calls_sloppy_eval()) |
InnerScopeCallsEvalField::encode(scope->inner_scope_calls_eval());
- backing_store_.push_back(eval);
+ byte_data_->WriteUint8(eval);
if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
Variable* function = scope->AsDeclarationScope()->function_var();
@@ -341,9 +372,9 @@ void ProducedPreParsedScopeData::SaveDataForVariable(Variable* var) {
// Store the variable name in debug mode; this way we can check that we
// restore data to the correct variable.
const AstRawString* name = var->raw_name();
- backing_store_.push_back(name->length());
+ byte_data_->WriteUint32(name->length());
for (int i = 0; i < name->length(); ++i) {
- backing_store_.push_back(name->raw_data()[i]);
+ byte_data_->WriteUint8(name->raw_data()[i]);
}
#endif
// FIXME(marja): Only 3 bits needed, not a full byte.
@@ -353,7 +384,7 @@ void ProducedPreParsedScopeData::SaveDataForVariable(Variable* var) {
VariableContextAllocatedField::encode(
var->has_forced_context_allocation());
- backing_store_.push_back(variable_data);
+ byte_data_->WriteUint8(variable_data);
}
void ProducedPreParsedScopeData::SaveDataForInnerScopes(Scope* scope) {
@@ -377,15 +408,51 @@ void ProducedPreParsedScopeData::SaveDataForInnerScopes(Scope* scope) {
}
}
+ConsumedPreParsedScopeData::ByteData::ReadingScope::ReadingScope(
+ ConsumedPreParsedScopeData* parent)
+ : ReadingScope(parent->scope_data_.get(), parent->data_->scope_data()) {}
+
+int32_t ConsumedPreParsedScopeData::ByteData::ReadUint32() {
+ DCHECK_NOT_NULL(data_);
+ DCHECK_GE(RemainingBytes(), kUint32Size);
+#ifdef DEBUG
+ // Check that there indeed is an integer following.
+ DCHECK_EQ(data_->get(index_++), kUint32Size);
+#endif
+ int32_t result = 0;
+ byte* p = reinterpret_cast<byte*>(&result);
+ for (int i = 0; i < 4; ++i) {
+ *p++ = data_->get(index_++);
+ }
+ return result;
+}
+
+uint8_t ConsumedPreParsedScopeData::ByteData::ReadUint8() {
+ DCHECK_NOT_NULL(data_);
+ DCHECK_GE(RemainingBytes(), kUint8Size);
+#ifdef DEBUG
+ // Check that there indeed is a byte following.
+ DCHECK_EQ(data_->get(index_++), kUint8Size);
+#endif
+ return data_->get(index_++);
+}
+
+ConsumedPreParsedScopeData::ConsumedPreParsedScopeData()
+ : scope_data_(new ByteData()), child_index_(0) {}
+
+ConsumedPreParsedScopeData::~ConsumedPreParsedScopeData() {}
+
void ConsumedPreParsedScopeData::SetData(Handle<PreParsedScopeData> data) {
DCHECK(data->IsPreParsedScopeData());
data_ = data;
#ifdef DEBUG
- DisallowHeapAllocation no_gc;
- PodArray<uint32_t>* scope_data = data_->scope_data();
- DCHECK_GT(scope_data->length(), 2);
- DCHECK_EQ(scope_data->get(scope_data->get(0) + 1), kMagicValue);
+ ByteData::ReadingScope reading_scope(this);
+ int scope_data_start = scope_data_->ReadUint32();
+ scope_data_->SetPosition(scope_data_start);
+ DCHECK_EQ(scope_data_->ReadUint32(), kMagicValue);
#endif
+ // The first data item is scope_data_start. Skip over it.
+ scope_data_->SetPosition(kUint32Size);
}
ProducedPreParsedScopeData*
@@ -393,31 +460,22 @@ ConsumedPreParsedScopeData::GetDataForSkippableFunction(
Zone* zone, int start_position, int* end_position, int* num_parameters,
int* num_inner_functions, bool* uses_super_property,
LanguageMode* language_mode) {
- DisallowHeapAllocation no_gc;
- PodArray<uint32_t>* scope_data = data_->scope_data();
-
// The skippable function *must* be the next function in the data. Use the
// start position as a sanity check.
- CHECK_GE(scope_data->length(), index_ + SkippableFunctionDataOffsets::kSize);
- int start_position_from_data =
- scope_data->get(index_ + SkippableFunctionDataOffsets::kStartPosition);
+ ByteData::ReadingScope reading_scope(this);
+ CHECK_GE(scope_data_->RemainingBytes(), kSkippableFunctionDataSize);
+ int start_position_from_data = scope_data_->ReadUint32();
CHECK_EQ(start_position, start_position_from_data);
- *end_position =
- scope_data->get(index_ + SkippableFunctionDataOffsets::kEndPosition);
+ *end_position = scope_data_->ReadUint32();
DCHECK_GT(*end_position, start_position);
- *num_parameters =
- scope_data->get(index_ + SkippableFunctionDataOffsets::kNumParameters);
- *num_inner_functions = scope_data->get(
- index_ + SkippableFunctionDataOffsets::kNumInnerFunctions);
+ *num_parameters = scope_data_->ReadUint32();
+ *num_inner_functions = scope_data_->ReadUint32();
- int language_and_super =
- scope_data->get(index_ + SkippableFunctionDataOffsets::kLanguageAndSuper);
+ uint8_t language_and_super = scope_data_->ReadUint8();
*language_mode = LanguageMode(LanguageField::decode(language_and_super));
*uses_super_property = UsesSuperField::decode(language_and_super);
- index_ += SkippableFunctionDataOffsets::kSize;
-
// Retrieve the corresponding PreParsedScopeData and associate it to the
// skipped function. If the skipped functions contains inner functions, those
// can be skipped when the skipped function is eagerly parsed.
@@ -438,34 +496,31 @@ void ConsumedPreParsedScopeData::RestoreScopeAllocationData(
DCHECK_EQ(scope->scope_type(), ScopeType::FUNCTION_SCOPE);
DCHECK(!data_.is_null());
- DisallowHeapAllocation no_gc;
- PodArray<uint32_t>* scope_data = data_->scope_data();
- int magic_value_from_data = scope_data->get(index_++);
+ ByteData::ReadingScope reading_scope(this);
+
+ int magic_value_from_data = scope_data_->ReadUint32();
// Check that we've consumed all inner function data.
CHECK_EQ(magic_value_from_data, kMagicValue);
- int start_position_from_data = scope_data->get(index_++);
- int end_position_from_data = scope_data->get(index_++);
+ int start_position_from_data = scope_data_->ReadUint32();
+ int end_position_from_data = scope_data_->ReadUint32();
CHECK_EQ(start_position_from_data, scope->start_position());
CHECK_EQ(end_position_from_data, scope->end_position());
- RestoreData(scope, scope_data);
+ RestoreData(scope);
// Check that we consumed all scope data.
- DCHECK_EQ(index_, scope_data->length());
+ DCHECK_EQ(scope_data_->RemainingBytes(), 0);
}
void ConsumedPreParsedScopeData::SkipFunctionDataForTesting() {
- DCHECK_EQ(index_, 1);
- DisallowHeapAllocation no_gc;
- PodArray<uint32_t>* scope_data = data_->scope_data();
- DCHECK_GT(scope_data->length(), 2);
- index_ = scope_data->get(0) + 1;
- DCHECK_EQ(scope_data->get(index_), kMagicValue);
+ ByteData::ReadingScope reading_scope(this);
+ scope_data_->SetPosition(0);
+ uint32_t scope_data_start = scope_data_->ReadUint32();
+ scope_data_->SetPosition(scope_data_start);
}
-void ConsumedPreParsedScopeData::RestoreData(Scope* scope,
- PodArray<uint32_t>* scope_data) {
+void ConsumedPreParsedScopeData::RestoreData(Scope* scope) {
if (scope->is_declaration_scope() &&
scope->AsDeclarationScope()->is_skipped_function()) {
return;
@@ -478,12 +533,18 @@ void ConsumedPreParsedScopeData::RestoreData(Scope* scope,
return;
}
+ if (scope_data_->RemainingBytes() < kUint8Size) {
+ // Temporary debugging code for detecting inconsistent data. Write debug
+ // information on the stack, then crash.
+ data_->GetIsolate()->PushStackTraceAndDie(0xc0defee, nullptr, nullptr,
+ 0xc0defee);
+ }
+
// scope_type is stored only in debug mode.
- CHECK_GE(scope_data->length(), index_ + 1);
- DCHECK_GE(scope_data->length(), index_ + 2);
- DCHECK_EQ(scope_data->get(index_++), scope->scope_type());
+ CHECK_GE(scope_data_->RemainingBytes(), kUint8Size);
+ DCHECK_EQ(scope_data_->ReadUint8(), scope->scope_type());
- uint32_t eval = scope_data->get(index_++);
+ uint32_t eval = scope_data_->ReadUint8();
if (ScopeCallsSloppyEvalField::decode(eval)) {
scope->RecordEvalCall();
}
@@ -494,31 +555,29 @@ void ConsumedPreParsedScopeData::RestoreData(Scope* scope,
if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
Variable* function = scope->AsDeclarationScope()->function_var();
if (function != nullptr) {
- RestoreDataForVariable(function, scope_data);
+ RestoreDataForVariable(function);
}
}
for (Variable* var : *scope->locals()) {
if (IsDeclaredVariableMode(var->mode())) {
- RestoreDataForVariable(var, scope_data);
+ RestoreDataForVariable(var);
}
}
- RestoreDataForInnerScopes(scope, scope_data);
+ RestoreDataForInnerScopes(scope);
}
-void ConsumedPreParsedScopeData::RestoreDataForVariable(
- Variable* var, PodArray<uint32_t>* scope_data) {
+void ConsumedPreParsedScopeData::RestoreDataForVariable(Variable* var) {
#ifdef DEBUG
const AstRawString* name = var->raw_name();
- DCHECK_GT(scope_data->length(), index_ + name->length());
- DCHECK_EQ(scope_data->get(index_++), static_cast<uint32_t>(name->length()));
+ DCHECK_EQ(scope_data_->ReadUint32(), static_cast<uint32_t>(name->length()));
for (int i = 0; i < name->length(); ++i) {
- DCHECK_EQ(scope_data->get(index_++), name->raw_data()[i]);
+ DCHECK_EQ(scope_data_->ReadUint8(), name->raw_data()[i]);
}
#endif
- CHECK_GT(scope_data->length(), index_);
- byte variable_data = scope_data->get(index_++);
+ CHECK_GE(scope_data_->RemainingBytes(), kUint8Size);
+ uint8_t variable_data = scope_data_->ReadUint8();
if (VariableIsUsedField::decode(variable_data)) {
var->set_is_used();
}
@@ -530,15 +589,14 @@ void ConsumedPreParsedScopeData::RestoreDataForVariable(
}
}
-void ConsumedPreParsedScopeData::RestoreDataForInnerScopes(
- Scope* scope, PodArray<uint32_t>* scope_data) {
+void ConsumedPreParsedScopeData::RestoreDataForInnerScopes(Scope* scope) {
std::vector<Scope*> scopes;
for (Scope* inner = scope->inner_scope(); inner != nullptr;
inner = inner->sibling()) {
scopes.push_back(inner);
}
for (auto it = scopes.rbegin(); it != scopes.rend(); ++it) {
- RestoreData(*it, scope_data);
+ RestoreData(*it);
}
}
diff --git a/deps/v8/src/parsing/preparsed-scope-data.h b/deps/v8/src/parsing/preparsed-scope-data.h
index 52cf5f8843..290bfba2fd 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.h
+++ b/deps/v8/src/parsing/preparsed-scope-data.h
@@ -13,7 +13,7 @@
#include "src/handles.h"
#include "src/objects/shared-function-info.h"
#include "src/parsing/preparse-data.h"
-#include "src/zone/zone-containers.h"
+#include "src/zone/zone-chunk-list.h"
namespace v8 {
namespace internal {
@@ -67,29 +67,31 @@ class PreParsedScopeData;
class ProducedPreParsedScopeData : public ZoneObject {
public:
+ class ByteData : public ZoneObject {
+ public:
+ explicit ByteData(Zone* zone) : backing_store_(zone) {}
+
+ void WriteUint32(uint32_t data);
+ void WriteUint8(uint8_t data);
+
+ // For overwriting previously written data at position 0.
+ void OverwriteFirstUint32(uint32_t data);
+
+ Handle<PodArray<uint8_t>> Serialize(Isolate* isolate);
+
+ size_t size() const { return backing_store_.size(); }
+
+ private:
+ ZoneChunkList<uint8_t> backing_store_;
+ };
+
// Create a ProducedPreParsedScopeData object which will collect data as we
// parse.
- explicit ProducedPreParsedScopeData(Zone* zone,
- ProducedPreParsedScopeData* parent)
- : parent_(parent),
- backing_store_(zone),
- data_for_inner_functions_(zone),
- scope_data_start_(-1),
- bailed_out_(false) {
- if (parent != nullptr) {
- parent->data_for_inner_functions_.push_back(this);
- }
- }
+ ProducedPreParsedScopeData(Zone* zone, ProducedPreParsedScopeData* parent);
// Create a ProducedPreParsedScopeData which is just a proxy for a previous
// produced PreParsedScopeData.
- ProducedPreParsedScopeData(Handle<PreParsedScopeData> data, Zone* zone)
- : parent_(nullptr),
- backing_store_(zone),
- data_for_inner_functions_(zone),
- scope_data_start_(-1),
- bailed_out_(false),
- previously_produced_preparsed_scope_data_(data) {}
+ ProducedPreParsedScopeData(Handle<PreParsedScopeData> data, Zone* zone);
ProducedPreParsedScopeData* parent() const { return parent_; }
@@ -143,7 +145,7 @@ class ProducedPreParsedScopeData : public ZoneObject {
// If there is data (if the Scope contains skippable inner functions), move
// the data into the heap and return a Handle to it; otherwise return a null
// MaybeHandle.
- MaybeHandle<PreParsedScopeData> Serialize(Isolate* isolate) const;
+ MaybeHandle<PreParsedScopeData> Serialize(Isolate* isolate);
static bool ScopeNeedsData(Scope* scope);
static bool ScopeIsSkippableFunctionScope(Scope* scope);
@@ -160,14 +162,8 @@ class ProducedPreParsedScopeData : public ZoneObject {
ProducedPreParsedScopeData* parent_;
- // TODO(marja): Make the backing store more efficient once we know exactly
- // what data is needed.
- ZoneDeque<uint32_t> backing_store_;
- ZoneDeque<ProducedPreParsedScopeData*> data_for_inner_functions_;
- // The backing store contains data about inner functions and then data about
- // this scope's (and its subscopes') variables. scope_data_start_ marks where
- // the latter starts.
- int scope_data_start_;
+ ByteData* byte_data_;
+ ZoneChunkList<ProducedPreParsedScopeData*> data_for_inner_functions_;
// Whether we've given up producing the data for this function.
bool bailed_out_;
@@ -181,10 +177,44 @@ class ProducedPreParsedScopeData : public ZoneObject {
class ConsumedPreParsedScopeData {
public:
- // Real data starts from index 1 (see data format description in the .cc
- // file).
- ConsumedPreParsedScopeData() : index_(1), child_index_(0) {}
- ~ConsumedPreParsedScopeData() {}
+ class ByteData {
+ public:
+ ByteData() : data_(nullptr), index_(0) {}
+
+ // Reading from the ByteData is only allowed when a ReadingScope is on the
+ // stack. This ensures that we have a DisallowHeapAllocation in place
+ // whenever ByteData holds a raw pointer into the heap.
+ class ReadingScope {
+ public:
+ ReadingScope(ByteData* consumed_data, PodArray<uint8_t>* data)
+ : consumed_data_(consumed_data) {
+ consumed_data->data_ = data;
+ }
+ explicit ReadingScope(ConsumedPreParsedScopeData* parent);
+ ~ReadingScope() { consumed_data_->data_ = nullptr; }
+
+ private:
+ ByteData* consumed_data_;
+ DisallowHeapAllocation no_gc;
+ };
+
+ void SetPosition(int position) { index_ = position; }
+
+ int32_t ReadUint32();
+ uint8_t ReadUint8();
+
+ size_t RemainingBytes() const {
+ DCHECK_NOT_NULL(data_);
+ return data_->length() - index_;
+ }
+
+ // private:
+ PodArray<uint8_t>* data_;
+ int index_;
+ };
+
+ ConsumedPreParsedScopeData();
+ ~ConsumedPreParsedScopeData();
void SetData(Handle<PreParsedScopeData> data);
@@ -205,14 +235,14 @@ class ConsumedPreParsedScopeData {
void SkipFunctionDataForTesting();
private:
- void RestoreData(Scope* scope, PodArray<uint32_t>* scope_data);
- void RestoreDataForVariable(Variable* var, PodArray<uint32_t>* scope_data);
- void RestoreDataForInnerScopes(Scope* scope, PodArray<uint32_t>* scope_data);
+ void RestoreData(Scope* scope);
+ void RestoreDataForVariable(Variable* var);
+ void RestoreDataForInnerScopes(Scope* scope);
Handle<PreParsedScopeData> data_;
+ std::unique_ptr<ByteData> scope_data_;
// When consuming the data, these indexes point to the data we're going to
// consume next.
- int index_;
int child_index_;
DISALLOW_COPY_AND_ASSIGN(ConsumedPreParsedScopeData);
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index 259e0c31a9..c31fd4af8e 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -9,7 +9,6 @@
#include "src/conversions-inl.h"
#include "src/conversions.h"
#include "src/globals.h"
-#include "src/list.h"
#include "src/parsing/duplicate-finder.h"
#include "src/parsing/parser-base.h"
#include "src/parsing/preparse-data-format.h"
@@ -207,7 +206,8 @@ PreParser::PreParseResult PreParser::PreParseFunction(
}
}
- if (!IsArrowFunction(kind) && track_unresolved_variables_) {
+ if (!IsArrowFunction(kind) && track_unresolved_variables_ &&
+ result == kLazyParsingComplete) {
CreateFunctionNameAssignment(function_name, function_type, function_scope);
// Declare arguments after parsing the function since lexical 'arguments'
@@ -375,8 +375,8 @@ PreParserStatement PreParser::BuildParameterInitializationBlock(
DCHECK(!parameters.is_simple);
DCHECK(scope()->is_function_scope());
if (FLAG_preparser_scope_analysis &&
- scope()->AsDeclarationScope()->calls_sloppy_eval()) {
- DCHECK_NOT_NULL(produced_preparsed_scope_data_);
+ scope()->AsDeclarationScope()->calls_sloppy_eval() &&
+ produced_preparsed_scope_data_ != nullptr) {
// We cannot replicate the Scope structure constructed by the Parser,
// because we've lost information whether each individual parameter was
// simple or not. Give up trying to produce data to skip inner functions.
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index c85923c0e7..275c5e9e0b 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -464,7 +464,11 @@ class PreParserStatement {
// and PreParser.
PreParserStatement* operator->() { return this; }
+ // TODO(adamk): These should return something even lighter-weight than
+ // PreParserStatementList.
PreParserStatementList statements() { return PreParserStatementList(); }
+ PreParserStatementList cases() { return PreParserStatementList(); }
+
void set_scope(Scope* scope) {}
void Initialize(const PreParserExpression& cond, PreParserStatement body,
const SourceRange& body_range = {}) {}
@@ -713,12 +717,13 @@ class PreParserFactory {
}
PreParserStatement NewSwitchStatement(ZoneList<const AstRawString*>* labels,
+ const PreParserExpression& tag,
int pos) {
return PreParserStatement::Default();
}
PreParserStatement NewCaseClause(const PreParserExpression& label,
- PreParserStatementList statements, int pos) {
+ PreParserStatementList statements) {
return PreParserStatement::Default();
}
@@ -996,9 +1001,8 @@ class PreParser : public ParserBase<PreParser> {
RewriteReturn(const PreParserExpression& return_value, int pos) {
return return_value;
}
- V8_INLINE PreParserStatement RewriteSwitchStatement(
- const PreParserExpression& tag, PreParserStatement switch_statement,
- PreParserStatementList cases, Scope* scope) {
+ V8_INLINE PreParserStatement
+ RewriteSwitchStatement(PreParserStatement switch_statement, Scope* scope) {
return PreParserStatement::Default();
}
@@ -1008,11 +1012,7 @@ class PreParser : public ParserBase<PreParser> {
if (catch_name == nullptr) {
catch_name = ast_value_factory()->dot_catch_string();
}
- // Unlike in the parser, we need to declare the catch variable as LET
- // variable, so that it won't get hoisted out of the scope. (Parser uses
- // DeclareLocal instead of DeclareVariable to prevent hoisting.) Another
- // solution would've been to add DeclareLocalName just for this purpose.
- catch_info->scope->DeclareVariableName(catch_name, LET);
+ catch_info->scope->DeclareCatchVariableName(catch_name);
if (catch_info->pattern.variables_ != nullptr) {
for (auto variable : *catch_info->pattern.variables_) {
@@ -1124,7 +1124,22 @@ class PreParser : public ParserBase<PreParser> {
ClassInfo* class_info, int pos, int end_pos, bool* ok) {
bool has_default_constructor = !class_info->has_seen_constructor;
// Account for the default constructor.
- if (has_default_constructor) GetNextFunctionLiteralId();
+ if (has_default_constructor) {
+ // Creating and disposing of a FunctionState makes tracking of
+ // next_function_is_likely_called match what Parser does. TODO(marja):
+ // Make the lazy function + next_function_is_likely_called + default ctor
+ // logic less surprising. Default ctors shouldn't affect the laziness of
+ // functions.
+ bool has_extends = class_info->extends.IsNull();
+ FunctionKind kind = has_extends ? FunctionKind::kDefaultDerivedConstructor
+ : FunctionKind::kDefaultBaseConstructor;
+ DeclarationScope* function_scope = NewFunctionScope(kind);
+ SetLanguageMode(function_scope, STRICT);
+ function_scope->set_start_position(pos);
+ function_scope->set_end_position(pos);
+ FunctionState function_state(&function_state_, &scope_, function_scope);
+ GetNextFunctionLiteralId();
+ }
return PreParserExpression::Default();
}
@@ -1290,7 +1305,7 @@ class PreParser : public ParserBase<PreParser> {
ForInfo* for_info, PreParserStatement* body_block,
PreParserExpression* each_variable, bool* ok) {
if (track_unresolved_variables_) {
- DCHECK(for_info->parsing_result.declarations.length() == 1);
+ DCHECK_EQ(1, for_info->parsing_result.declarations.size());
bool is_for_var_of =
for_info->mode == ForEachStatement::ITERATE &&
for_info->parsing_result.descriptor.mode == VariableMode::VAR;
@@ -1512,10 +1527,6 @@ class PreParser : public ParserBase<PreParser> {
return PreParserStatementList();
}
- PreParserStatementList NewCaseClauseList(int size) {
- return PreParserStatementList();
- }
-
V8_INLINE PreParserExpression
NewV8Intrinsic(const PreParserIdentifier& name,
const PreParserExpressionList& arguments, int pos, bool* ok) {
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index c8bea599b4..5229aed780 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -443,7 +443,9 @@ size_t Utf8ExternalStreamingStream::FillBuffer(size_t position) {
SearchPosition(position);
bool out_of_data = current_.chunk_no != chunks_.size() &&
- chunks_[current_.chunk_no].length == 0;
+ chunks_[current_.chunk_no].length == 0 &&
+ current_.pos.incomplete_char == 0;
+
if (out_of_data) return 0;
// Fill the buffer, until we have at least one char (or are out of data).
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 3d707288be..f0ce0012ae 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -13,8 +13,8 @@
#include "src/ast/ast-value-factory.h"
#include "src/char-predicates-inl.h"
#include "src/conversions-inl.h"
-#include "src/list-inl.h"
#include "src/parsing/duplicate-finder.h" // For Scanner::FindSymbol
+#include "src/unicode-cache-inl.h"
namespace v8 {
namespace internal {
@@ -173,14 +173,30 @@ bool Scanner::BookmarkScope::HasBeenApplied() {
return bookmark_ == kBookmarkWasApplied;
}
+// LineTerminator: 'JS_Line_Terminator' in point.properties
+// ES#sec-line-terminators lists exactly 4 code points:
+// LF (U+000A), CR (U+000D), LS(U+2028), PS(U+2029)
+bool Scanner::IsLineTerminator(uc32 c) {
+ if (c == 0x000A || c == 0x000D) {
+ return true;
+ }
+ if (c == 0x2028 || c == 0x2029) {
+ ++use_counts_[v8::Isolate::UseCounterFeature::
+ kLineOrParagraphSeparatorAsLineTerminator];
+ return true;
+ }
+ return false;
+}
+
// ----------------------------------------------------------------------------
// Scanner
-Scanner::Scanner(UnicodeCache* unicode_cache)
+Scanner::Scanner(UnicodeCache* unicode_cache, int* use_counts)
: unicode_cache_(unicode_cache),
octal_pos_(Location::invalid()),
octal_message_(MessageTemplate::kNone),
- found_html_comment_(false) {}
+ found_html_comment_(false),
+ use_counts_(use_counts) {}
void Scanner::Initialize(Utf16CharacterStream* source, bool is_module) {
DCHECK_NOT_NULL(source);
@@ -439,7 +455,7 @@ Token::Value Scanner::SkipWhiteSpace() {
// Advance as long as character is a WhiteSpace or LineTerminator.
// Remember if the latter is the case.
- if (unicode_cache_->IsLineTerminator(c0_)) {
+ if (IsLineTerminator(c0_)) {
has_line_terminator_before_next_ = true;
} else if (!unicode_cache_->IsWhiteSpace(c0_)) {
break;
@@ -496,7 +512,7 @@ Token::Value Scanner::SkipSingleLineComment() {
// separately by the lexical grammar and becomes part of the
// stream of input elements for the syntactic grammar (see
// ECMA-262, section 7.4).
- while (c0_ != kEndOfInput && !unicode_cache_->IsLineTerminator(c0_)) {
+ while (c0_ != kEndOfInput && !IsLineTerminator(c0_)) {
Advance();
}
@@ -506,7 +522,7 @@ Token::Value Scanner::SkipSingleLineComment() {
Token::Value Scanner::SkipSourceURLComment() {
TryToParseSourceURLComment();
- while (c0_ != kEndOfInput && !unicode_cache_->IsLineTerminator(c0_)) {
+ while (c0_ != kEndOfInput && !IsLineTerminator(c0_)) {
Advance();
}
@@ -542,7 +558,7 @@ void Scanner::TryToParseSourceURLComment() {
while (c0_ != kEndOfInput && unicode_cache_->IsWhiteSpace(c0_)) {
Advance();
}
- while (c0_ != kEndOfInput && !unicode_cache_->IsLineTerminator(c0_)) {
+ while (c0_ != kEndOfInput && !IsLineTerminator(c0_)) {
// Disallowed characters.
if (c0_ == '"' || c0_ == '\'') {
value->Reset();
@@ -555,7 +571,7 @@ void Scanner::TryToParseSourceURLComment() {
Advance();
}
// Allow whitespace at the end.
- while (c0_ != kEndOfInput && !unicode_cache_->IsLineTerminator(c0_)) {
+ while (c0_ != kEndOfInput && !IsLineTerminator(c0_)) {
if (!unicode_cache_->IsWhiteSpace(c0_)) {
value->Reset();
break;
@@ -572,7 +588,7 @@ Token::Value Scanner::SkipMultiLineComment() {
while (c0_ != kEndOfInput) {
uc32 ch = c0_;
Advance();
- if (c0_ != kEndOfInput && unicode_cache_->IsLineTerminator(ch)) {
+ if (c0_ != kEndOfInput && IsLineTerminator(ch)) {
// Following ECMA-262, section 7.4, a comment containing
// a newline will make the comment count as a line-terminator.
has_multiline_comment_before_next_ = true;
@@ -968,8 +984,7 @@ bool Scanner::ScanEscape() {
Advance<capture_raw>();
// Skip escaped newlines.
- if (!in_template_literal && c0_ != kEndOfInput &&
- unicode_cache_->IsLineTerminator(c)) {
+ if (!in_template_literal && c0_ != kEndOfInput && IsLineTerminator(c)) {
// Allow escaped CR+LF newlines in multiline string literals.
if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance<capture_raw>();
return true;
@@ -1062,8 +1077,7 @@ Token::Value Scanner::ScanString() {
AddLiteralChar(c);
}
- while (c0_ != quote && c0_ != kEndOfInput &&
- !unicode_cache_->IsLineTerminator(c0_)) {
+ while (c0_ != quote && c0_ != kEndOfInput && !IsLineTerminator(c0_)) {
uc32 c = c0_;
Advance();
if (c == '\\') {
@@ -1119,7 +1133,7 @@ Token::Value Scanner::ScanTemplateSpan() {
ReduceRawLiteralLength(2);
break;
} else if (c == '\\') {
- if (c0_ != kEndOfInput && unicode_cache_->IsLineTerminator(c0_)) {
+ if (c0_ != kEndOfInput && IsLineTerminator(c0_)) {
// The TV of LineContinuation :: \ LineTerminatorSequence is the empty
// code unit sequence.
uc32 lastChar = c0_;
@@ -1423,6 +1437,8 @@ uc32 Scanner::ScanUnicodeEscape() {
KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD) \
KEYWORD_GROUP('l') \
KEYWORD("let", Token::LET) \
+ KEYWORD_GROUP('m') \
+ KEYWORD("meta", Token::META) \
KEYWORD_GROUP('n') \
KEYWORD("name", Token::NAME) \
KEYWORD("new", Token::NEW) \
@@ -1660,12 +1676,14 @@ bool Scanner::ScanRegExpPattern() {
}
while (c0_ != '/' || in_character_class) {
- if (c0_ == kEndOfInput || unicode_cache_->IsLineTerminator(c0_))
+ if (c0_ == kEndOfInput || IsLineTerminator(c0_)) {
return false;
+ }
if (c0_ == '\\') { // Escape sequence.
AddLiteralCharAdvance();
- if (c0_ == kEndOfInput || unicode_cache_->IsLineTerminator(c0_))
+ if (c0_ == kEndOfInput || IsLineTerminator(c0_)) {
return false;
+ }
AddLiteralCharAdvance();
// If the escape allows more characters, i.e., \x??, \u????, or \c?,
// only "safe" characters are allowed (letters, digits, underscore),
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 38fd6b1b74..200054d893 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -207,7 +207,7 @@ class Scanner {
static const int kNoOctalLocation = -1;
static const uc32 kEndOfInput = Utf16CharacterStream::kEndOfInput;
- explicit Scanner(UnicodeCache* scanner_contants);
+ explicit Scanner(UnicodeCache* scanner_contants, int* use_counts_);
void Initialize(Utf16CharacterStream* source, bool is_module);
@@ -735,6 +735,8 @@ class Scanner {
bool is_module_;
+ bool IsLineTerminator(uc32 c);
+
Token::Value ScanTemplateSpan();
// Return the current source position.
@@ -799,6 +801,8 @@ class Scanner {
// Whether this scanner encountered an HTML comment.
bool found_html_comment_;
+ int* use_counts_;
+
MessageTemplate::Template scanner_error_;
Location scanner_error_location_;
};
diff --git a/deps/v8/src/parsing/token.h b/deps/v8/src/parsing/token.h
index e109cb3ad1..4cc4db288a 100644
--- a/deps/v8/src/parsing/token.h
+++ b/deps/v8/src/parsing/token.h
@@ -187,6 +187,7 @@ namespace internal {
C(OF, "of", 0) \
C(TARGET, "target", 0) \
C(SENT, "sent", 0) \
+ C(META, "meta", 0) \
C(AS, "as", 0) \
C(FROM, "from", 0) \
C(NAME, "name", 0) \
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
index 46597a9685..1699064bda 100644
--- a/deps/v8/src/perf-jit.cc
+++ b/deps/v8/src/perf-jit.cc
@@ -37,6 +37,7 @@
#if V8_OS_LINUX
#include <fcntl.h>
#include <sys/mman.h>
+#undef MAP_TYPE // jumbo: conflicts with v8::internal::InstanceType::MAP_TYPE
#include <unistd.h>
#endif // V8_OS_LINUX
@@ -196,8 +197,7 @@ void PerfJitLogger::LogRecordedBuffer(AbstractCode* abstract_code,
SharedFunctionInfo* shared,
const char* name, int length) {
if (FLAG_perf_basic_prof_only_functions &&
- (abstract_code->kind() != AbstractCode::FUNCTION &&
- abstract_code->kind() != AbstractCode::INTERPRETED_FUNCTION &&
+ (abstract_code->kind() != AbstractCode::INTERPRETED_FUNCTION &&
abstract_code->kind() != AbstractCode::OPTIMIZED_FUNCTION)) {
return;
}
@@ -398,7 +398,8 @@ void PerfJitLogger::LogWriteHeader() {
header.reserved_ = 0xdeadbeef;
header.process_id_ = base::OS::GetCurrentProcessId();
header.time_stamp_ =
- static_cast<uint64_t>(base::OS::TimeCurrentMillis() * 1000.0);
+ static_cast<uint64_t>(V8::GetCurrentPlatform()->CurrentClockTimeMillis() *
+ base::Time::kMicrosecondsPerMillisecond);
header.flags_ = 0;
LogWriteBytes(reinterpret_cast<const char*>(&header), sizeof(header));
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 08336cb310..e458364027 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -254,47 +254,7 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
}
}
-Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) {
- rm_ = no_reg;
- value_.immediate = immediate;
- rmode_ = rmode;
-}
-
-Operand::Operand(const ExternalReference& f) {
- rm_ = no_reg;
- value_.immediate = reinterpret_cast<intptr_t>(f.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-Operand::Operand(Smi* value) {
- rm_ = no_reg;
- value_.immediate = reinterpret_cast<intptr_t>(value);
- rmode_ = kRelocInfo_NONEPTR;
-}
-
-Operand::Operand(Register rm) {
- rm_ = rm;
- rmode_ = kRelocInfo_NONEPTR; // PPC -why doesn't ARM do this?
-}
-
-void Assembler::CheckBuffer() {
- if (buffer_space() <= kGap) {
- GrowBuffer();
- }
-}
-
-void Assembler::TrackBranch() {
- DCHECK(!trampoline_emitted_);
- int count = tracked_branch_count_++;
- if (count == 0) {
- // We leave space (kMaxBlockTrampolineSectionSize)
- // for BlockTrampolinePoolScope buffer.
- next_trampoline_check_ =
- pc_offset() + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize;
- } else {
- next_trampoline_check_ -= kTrampolineSlotsSize;
- }
-}
+Operand::Operand(Register rm) : rm_(rm), rmode_(kRelocInfo_NONEPTR) {}
void Assembler::UntrackBranch() {
DCHECK(!trampoline_emitted_);
@@ -308,22 +268,6 @@ void Assembler::UntrackBranch() {
}
}
-void Assembler::CheckTrampolinePoolQuick() {
- if (pc_offset() >= next_trampoline_check_) {
- CheckTrampolinePool();
- }
-}
-
-void Assembler::emit(Instr x) {
- CheckBuffer();
- *reinterpret_cast<Instr*>(pc_) = x;
- pc_ += kInstrSize;
- CheckTrampolinePoolQuick();
-}
-
-bool Operand::is_reg() const { return rm_.is_valid(); }
-
-
// Fetch the 32bit value from the FIXED_SEQUENCE lis/ori
Address Assembler::target_address_at(Address pc, Address constant_pool) {
if (FLAG_enable_embedded_constant_pool && constant_pool) {
@@ -374,7 +318,7 @@ bool Assembler::IsConstantPoolLoadStart(Address pc,
ConstantPoolEntry::Access* access) {
Instr instr = instr_at(pc);
uint32_t opcode = instr & kOpcodeMask;
- if (!GetRA(instr).is(kConstantPoolRegister)) return false;
+ if (GetRA(instr) != kConstantPoolRegister) return false;
bool overflowed = (opcode == ADDIS);
#ifdef DEBUG
if (overflowed) {
@@ -396,10 +340,10 @@ bool Assembler::IsConstantPoolLoadEnd(Address pc,
uint32_t opcode = instr & kOpcodeMask;
bool overflowed = false;
if (!(opcode == kLoadIntptrOpcode || opcode == LFD)) return false;
- if (!GetRA(instr).is(kConstantPoolRegister)) {
+ if (GetRA(instr) != kConstantPoolRegister) {
instr = instr_at(pc - kInstrSize);
opcode = instr & kOpcodeMask;
- if ((opcode != ADDIS) || !GetRA(instr).is(kConstantPoolRegister)) {
+ if ((opcode != ADDIS) || GetRA(instr) != kConstantPoolRegister) {
return false;
}
overflowed = true;
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 98f5451cdb..32758092c4 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -208,18 +208,11 @@ Operand Operand::EmbeddedCode(CodeStub* stub) {
return result;
}
-MemOperand::MemOperand(Register rn, int32_t offset) {
- ra_ = rn;
- rb_ = no_reg;
- offset_ = offset;
-}
-
+MemOperand::MemOperand(Register rn, int32_t offset)
+ : ra_(rn), offset_(offset), rb_(no_reg) {}
-MemOperand::MemOperand(Register ra, Register rb) {
- ra_ = ra;
- rb_ = rb;
- offset_ = 0;
-}
+MemOperand::MemOperand(Register ra, Register rb)
+ : ra_(ra), offset_(0), rb_(rb) {}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
for (auto& request : heap_object_requests_) {
@@ -308,12 +301,12 @@ Condition Assembler::GetCondition(Instr instr) {
bool Assembler::IsLis(Instr instr) {
- return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr).is(r0);
+ return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr) == r0;
}
bool Assembler::IsLi(Instr instr) {
- return ((instr & kOpcodeMask) == ADDI) && GetRA(instr).is(r0);
+ return ((instr & kOpcodeMask) == ADDI) && GetRA(instr) == r0;
}
@@ -327,16 +320,12 @@ bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
Register Assembler::GetRA(Instr instr) {
- Register reg;
- reg.reg_code = Instruction::RAValue(instr);
- return reg;
+ return Register::from_code(Instruction::RAValue(instr));
}
Register Assembler::GetRB(Instr instr) {
- Register reg;
- reg.reg_code = Instruction::RBValue(instr);
- return reg;
+ return Register::from_code(Instruction::RBValue(instr));
}
@@ -914,13 +903,13 @@ void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
void Assembler::addi(Register dst, Register src, const Operand& imm) {
- DCHECK(!src.is(r0)); // use li instead to show intent
+ DCHECK(src != r0); // use li instead to show intent
d_form(ADDI, dst, src, imm.immediate(), true);
}
void Assembler::addis(Register dst, Register src, const Operand& imm) {
- DCHECK(!src.is(r0)); // use lis instead to show intent
+ DCHECK(src != r0); // use lis instead to show intent
d_form(ADDIS, dst, src, imm.immediate(), true);
}
@@ -1031,31 +1020,31 @@ void Assembler::mr(Register dst, Register src) {
void Assembler::lbz(Register dst, const MemOperand& src) {
- DCHECK(!src.ra_.is(r0));
+ DCHECK(src.ra_ != r0);
d_form(LBZ, dst, src.ra(), src.offset(), true);
}
void Assembler::lhz(Register dst, const MemOperand& src) {
- DCHECK(!src.ra_.is(r0));
+ DCHECK(src.ra_ != r0);
d_form(LHZ, dst, src.ra(), src.offset(), true);
}
void Assembler::lwz(Register dst, const MemOperand& src) {
- DCHECK(!src.ra_.is(r0));
+ DCHECK(src.ra_ != r0);
d_form(LWZ, dst, src.ra(), src.offset(), true);
}
void Assembler::lwzu(Register dst, const MemOperand& src) {
- DCHECK(!src.ra_.is(r0));
+ DCHECK(src.ra_ != r0);
d_form(LWZU, dst, src.ra(), src.offset(), true);
}
void Assembler::lha(Register dst, const MemOperand& src) {
- DCHECK(!src.ra_.is(r0));
+ DCHECK(src.ra_ != r0);
d_form(LHA, dst, src.ra(), src.offset(), true);
}
@@ -1063,7 +1052,7 @@ void Assembler::lha(Register dst, const MemOperand& src) {
void Assembler::lwa(Register dst, const MemOperand& src) {
#if V8_TARGET_ARCH_PPC64
int offset = src.offset();
- DCHECK(!src.ra_.is(r0));
+ DCHECK(src.ra_ != r0);
CHECK(!(offset & 3) && is_int16(offset));
offset = kImm16Mask & offset;
emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
@@ -1073,25 +1062,25 @@ void Assembler::lwa(Register dst, const MemOperand& src) {
}
void Assembler::stb(Register dst, const MemOperand& src) {
- DCHECK(!src.ra_.is(r0));
+ DCHECK(src.ra_ != r0);
d_form(STB, dst, src.ra(), src.offset(), true);
}
void Assembler::sth(Register dst, const MemOperand& src) {
- DCHECK(!src.ra_.is(r0));
+ DCHECK(src.ra_ != r0);
d_form(STH, dst, src.ra(), src.offset(), true);
}
void Assembler::stw(Register dst, const MemOperand& src) {
- DCHECK(!src.ra_.is(r0));
+ DCHECK(src.ra_ != r0);
d_form(STW, dst, src.ra(), src.offset(), true);
}
void Assembler::stwu(Register dst, const MemOperand& src) {
- DCHECK(!src.ra_.is(r0));
+ DCHECK(src.ra_ != r0);
d_form(STWU, dst, src.ra(), src.offset(), true);
}
@@ -1105,7 +1094,7 @@ void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
// 64bit specific instructions
void Assembler::ld(Register rd, const MemOperand& src) {
int offset = src.offset();
- DCHECK(!src.ra_.is(r0));
+ DCHECK(src.ra_ != r0);
CHECK(!(offset & 3) && is_int16(offset));
offset = kImm16Mask & offset;
emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
@@ -1114,7 +1103,7 @@ void Assembler::ld(Register rd, const MemOperand& src) {
void Assembler::ldu(Register rd, const MemOperand& src) {
int offset = src.offset();
- DCHECK(!src.ra_.is(r0));
+ DCHECK(src.ra_ != r0);
CHECK(!(offset & 3) && is_int16(offset));
offset = kImm16Mask & offset;
emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
@@ -1123,7 +1112,7 @@ void Assembler::ldu(Register rd, const MemOperand& src) {
void Assembler::std(Register rs, const MemOperand& src) {
int offset = src.offset();
- DCHECK(!src.ra_.is(r0));
+ DCHECK(src.ra_ != r0);
CHECK(!(offset & 3) && is_int16(offset));
offset = kImm16Mask & offset;
emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
@@ -1132,7 +1121,7 @@ void Assembler::std(Register rs, const MemOperand& src) {
void Assembler::stdu(Register rs, const MemOperand& src) {
int offset = src.offset();
- DCHECK(!src.ra_.is(r0));
+ DCHECK(src.ra_ != r0);
CHECK(!(offset & 3) && is_int16(offset));
offset = kImm16Mask & offset;
emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
@@ -1272,9 +1261,9 @@ bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
}
intptr_t value = src.immediate();
#if V8_TARGET_ARCH_PPC64
- bool allowOverflow = !((canOptimize && is_int32(value)) || dst.is(r0));
+ bool allowOverflow = !((canOptimize && is_int32(value)) || dst == r0);
#else
- bool allowOverflow = !(canOptimize || dst.is(r0));
+ bool allowOverflow = !(canOptimize || dst == r0);
#endif
if (canOptimize && is_int16(value)) {
// Prefer a single-instruction load-immediate.
@@ -1664,7 +1653,7 @@ void Assembler::isync() { emit(EXT1 | ISYNC); }
void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
- DCHECK(!ra.is(r0));
+ DCHECK(ra != r0);
CHECK(is_int16(offset));
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
@@ -1675,7 +1664,7 @@ void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
- DCHECK(!ra.is(r0));
+ DCHECK(ra != r0);
CHECK(is_int16(offset));
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
@@ -1687,7 +1676,7 @@ void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
CHECK(is_int16(offset));
- DCHECK(!ra.is(r0));
+ DCHECK(ra != r0);
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
@@ -1698,7 +1687,7 @@ void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
CHECK(is_int16(offset));
- DCHECK(!ra.is(r0));
+ DCHECK(ra != r0);
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
@@ -1709,7 +1698,7 @@ void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
CHECK(is_int16(offset));
- DCHECK(!ra.is(r0));
+ DCHECK(ra != r0);
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
@@ -1720,7 +1709,7 @@ void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
CHECK(is_int16(offset));
- DCHECK(!ra.is(r0));
+ DCHECK(ra != r0);
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
@@ -1731,7 +1720,7 @@ void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
CHECK(is_int16(offset));
- DCHECK(!ra.is(r0));
+ DCHECK(ra != r0);
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
@@ -1742,7 +1731,7 @@ void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
CHECK(is_int16(offset));
- DCHECK(!ra.is(r0));
+ DCHECK(ra != r0);
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index abd306c33f..d1411c142d 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -104,12 +104,17 @@ namespace internal {
V(r24) V(r25) V(r26) V(r27) V(r28) V(r30)
#endif
-#define DOUBLE_REGISTERS(V) \
+#define LOW_DOUBLE_REGISTERS(V) \
V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
- V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
+
+#define NON_LOW_DOUBLE_REGISTERS(V) \
V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+#define DOUBLE_REGISTERS(V) \
+ LOW_DOUBLE_REGISTERS(V) NON_LOW_DOUBLE_REGISTERS(V)
+
#define FLOAT_REGISTERS DOUBLE_REGISTERS
#define SIMD128_REGISTERS DOUBLE_REGISTERS
@@ -118,6 +123,10 @@ namespace internal {
V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) \
V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+
+#define C_REGISTERS(V) \
+ V(cr0) V(cr1) V(cr2) V(cr3) V(cr4) V(cr5) V(cr6) V(cr7) \
+ V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
// clang-format on
// Register list in load/store instructions
@@ -250,69 +259,21 @@ const int kStackFrameLRSlot = 1;
const int kStackFrameExtraParamSlot = 2;
#endif
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-
-struct Register {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = -1
- };
-
- static constexpr int kNumRegisters = Code::kAfterLast;
-
-#define REGISTER_COUNT(R) 1 +
- static constexpr int kNumAllocatable =
- ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT) 0;
-#undef REGISTER_COUNT
-
-#define REGISTER_BIT(R) 1 << kCode_##R |
- static constexpr RegList kAllocatable =
- ALLOCATABLE_GENERAL_REGISTERS(REGISTER_BIT) 0;
-#undef REGISTER_BIT
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
- static Register from_code(int code) {
- DCHECK(code >= 0);
- DCHECK(code < kNumRegisters);
- Register r = {code};
- return r;
- }
- bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
- bool is(Register reg) const { return reg_code == reg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
- void set_code(int code) {
- reg_code = code;
- DCHECK(is_valid());
- }
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kRegAfterLast
+};
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ public:
#if V8_TARGET_LITTLE_ENDIAN
static constexpr int kMantissaOffset = 0;
static constexpr int kExponentOffset = 4;
@@ -321,14 +282,20 @@ struct Register {
static constexpr int kExponentOffset = 0;
#endif
- // Unfortunately we can't make this private in a struct.
- int reg_code;
+ private:
+ friend class RegisterBase;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
};
-#define DEFINE_REGISTER(R) constexpr Register R = {Register::kCode_##R};
+static_assert(IS_TRIVIALLY_COPYABLE(Register) &&
+ sizeof(Register) == sizeof(int),
+ "Register can efficiently be passed by value");
+
+#define DEFINE_REGISTER(R) \
+ constexpr Register R = Register::from_code<kRegCode_##R>();
GENERAL_REGISTERS(DEFINE_REGISTER)
#undef DEFINE_REGISTER
-constexpr Register no_reg = {Register::kCode_no_reg};
+constexpr Register no_reg = Register::no_reg();
// Aliases
constexpr Register kLithiumScratch = r11; // lithium scratch.
@@ -339,48 +306,42 @@ constexpr Register cp = r30; // JavaScript context pointer.
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
-// Double word FP register.
-struct DoubleRegister {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = -1
- };
-
- static constexpr int kNumRegisters = Code::kAfterLast;
- static constexpr int kMaxNumRegisters = kNumRegisters;
-
- bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
- bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
+ kDoubleAfterLast
+};
- static DoubleRegister from_code(int code) {
- DoubleRegister r = {code};
- return r;
- }
+// Double word FP register.
+class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
+ public:
+ // A few double registers are reserved: one as a scratch register and one to
+ // hold 0.0, that does not fit in the immediate field of vmov instructions.
+ // d14: 0.0
+ // d15: scratch register.
+ static constexpr int kSizeInBytes = 8;
+ inline static int NumRegisters();
- int reg_code;
+ private:
+ friend class RegisterBase;
+ explicit constexpr DoubleRegister(int code) : RegisterBase(code) {}
};
+static_assert(IS_TRIVIALLY_COPYABLE(DoubleRegister) &&
+ sizeof(DoubleRegister) == sizeof(int),
+ "DoubleRegister can efficiently be passed by value");
+
typedef DoubleRegister FloatRegister;
// TODO(ppc) Define SIMD registers.
typedef DoubleRegister Simd128Register;
#define DEFINE_REGISTER(R) \
- constexpr DoubleRegister R = {DoubleRegister::kCode_##R};
+ constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
DOUBLE_REGISTERS(DEFINE_REGISTER)
#undef DEFINE_REGISTER
-constexpr Register no_dreg = {Register::kCode_no_reg};
+constexpr Register no_dreg = Register::no_reg();
constexpr DoubleRegister kFirstCalleeSavedDoubleReg = d14;
constexpr DoubleRegister kLastCalleeSavedDoubleReg = d31;
@@ -389,33 +350,24 @@ constexpr DoubleRegister kScratchDoubleReg = d13;
Register ToRegister(int num);
-// Coprocessor register
-struct CRegister {
- bool is_valid() const { return 0 <= reg_code && reg_code < 8; }
- bool is(CRegister creg) const { return reg_code == creg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
-
- // Unfortunately we can't make this private in a struct.
- int reg_code;
+enum CRegisterCode {
+#define REGISTER_CODE(R) kCCode_##R,
+ C_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kCAfterLast
};
-constexpr CRegister no_creg = {-1};
+// Coprocessor register
+class CRegister : public RegisterBase<CRegister, kCAfterLast> {
+ friend class RegisterBase;
+ explicit constexpr CRegister(int code) : RegisterBase(code) {}
+};
-constexpr CRegister cr0 = {0};
-constexpr CRegister cr1 = {1};
-constexpr CRegister cr2 = {2};
-constexpr CRegister cr3 = {3};
-constexpr CRegister cr4 = {4};
-constexpr CRegister cr5 = {5};
-constexpr CRegister cr6 = {6};
-constexpr CRegister cr7 = {7};
+constexpr CRegister no_creg = CRegister::no_reg();
+#define DECLARE_C_REGISTER(R) \
+ constexpr CRegister R = CRegister::from_code<kCCode_##R>();
+C_REGISTERS(DECLARE_C_REGISTER)
+#undef DECLARE_C_REGISTER
// -----------------------------------------------------------------------------
// Machine instruction Operands
@@ -431,12 +383,19 @@ class Operand BASE_EMBEDDED {
public:
// immediate
INLINE(explicit Operand(intptr_t immediate,
- RelocInfo::Mode rmode = kRelocInfo_NONEPTR));
+ RelocInfo::Mode rmode = kRelocInfo_NONEPTR)
+ : rmode_(rmode)) {
+ value_.immediate = immediate;
+ }
INLINE(static Operand Zero()) { return Operand(static_cast<intptr_t>(0)); }
- INLINE(explicit Operand(const ExternalReference& f));
+ INLINE(explicit Operand(const ExternalReference& f)
+ : rmode_(RelocInfo::EXTERNAL_REFERENCE)) {
+ value_.immediate = reinterpret_cast<intptr_t>(f.address());
+ }
explicit Operand(Handle<HeapObject> handle);
- INLINE(explicit Operand(Smi* value));
-
+ INLINE(explicit Operand(Smi* value) : rmode_(kRelocInfo_NONEPTR)) {
+ value_.immediate = reinterpret_cast<intptr_t>(value);
+ }
// rm
INLINE(explicit Operand(Register rm));
@@ -444,7 +403,7 @@ class Operand BASE_EMBEDDED {
static Operand EmbeddedCode(CodeStub* stub);
// Return true if this is a register operand.
- INLINE(bool is_reg() const);
+ INLINE(bool is_reg() const) { return rm_.is_valid(); }
bool must_output_reloc_info(const Assembler* assembler) const;
@@ -471,7 +430,7 @@ class Operand BASE_EMBEDDED {
}
private:
- Register rm_;
+ Register rm_ = no_reg;
union Value {
Value() {}
HeapObjectRequest heap_object_request; // if is_heap_object_request_
@@ -496,18 +455,18 @@ class MemOperand BASE_EMBEDDED {
explicit MemOperand(Register ra, Register rb);
int32_t offset() const {
- DCHECK(rb_.is(no_reg));
+ DCHECK(rb_ == no_reg);
return offset_;
}
// PowerPC - base register
Register ra() const {
- DCHECK(!ra_.is(no_reg));
+ DCHECK(ra_ != no_reg);
return ra_;
}
Register rb() const {
- DCHECK(offset_ == 0 && !rb_.is(no_reg));
+ DCHECK(offset_ == 0 && rb_ != no_reg);
return rb_;
}
@@ -710,16 +669,16 @@ class Assembler : public AssemblerBase {
x_form(instr_name, src, dst, r0, rc); \
}
-#define DECLARE_PPC_X_INSTRUCTIONS_D_FORM(name, instr_name, instr_value) \
- template <class R> \
- inline void name(const R rt, const Register ra, \
- const Register rb, const RCBit rc = LeaveRC) { \
- DCHECK(!ra.is(r0)); \
- x_form(instr_name, rt.code(), ra.code(), rb.code(), rc); \
- } \
- template <class R> \
- inline void name(const R dst, const MemOperand& src) { \
- name(dst, src.ra(), src.rb()); \
+#define DECLARE_PPC_X_INSTRUCTIONS_D_FORM(name, instr_name, instr_value) \
+ template <class R> \
+ inline void name(const R rt, const Register ra, const Register rb, \
+ const RCBit rc = LeaveRC) { \
+ DCHECK(ra != r0); \
+ x_form(instr_name, rt.code(), ra.code(), rb.code(), rc); \
+ } \
+ template <class R> \
+ inline void name(const R dst, const MemOperand& src) { \
+ name(dst, src.ra(), src.rb()); \
}
#define DECLARE_PPC_X_INSTRUCTIONS_E_FORM(name, instr_name, instr_value) \
@@ -744,7 +703,7 @@ class Assembler : public AssemblerBase {
}
#define DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM(name, instr_name, instr_value) \
inline void name(const Register dst, const MemOperand& src) { \
- DCHECK(!src.ra_.is(r0)); \
+ DCHECK(src.ra_ != r0); \
x_form(instr_name, src.ra(), dst, src.rb(), SetEH); \
}
@@ -786,7 +745,7 @@ class Assembler : public AssemblerBase {
#if V8_TARGET_ARCH_PPC64
Register ra = src.ra();
Register rb = src.rb();
- DCHECK(!ra.is(r0));
+ DCHECK(ra != r0);
x_form(LWAX, rt, ra, rb, LeaveRC);
#else
lwzx(rt, src);
@@ -797,7 +756,7 @@ class Assembler : public AssemblerBase {
emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
#else
// nop on 32-bit
- DCHECK(rs.is(ra) && rc == LeaveRC);
+ DCHECK(rs == ra && rc == LeaveRC);
#endif
}
@@ -1577,17 +1536,43 @@ class Assembler : public AssemblerBase {
int last_bound_pos_;
// Optimizable cmpi information.
int optimizable_cmpi_pos_;
- CRegister cmpi_cr_;
+ CRegister cmpi_cr_ = CRegister::no_reg();
ConstantPoolBuilder constant_pool_builder_;
- // Code emission
- inline void CheckBuffer();
+ void CheckBuffer() {
+ if (buffer_space() <= kGap) {
+ GrowBuffer();
+ }
+ }
+
void GrowBuffer(int needed = 0);
- inline void emit(Instr x);
- inline void TrackBranch();
+ // Code emission
+ void emit(Instr x) {
+ CheckBuffer();
+ *reinterpret_cast<Instr*>(pc_) = x;
+ pc_ += kInstrSize;
+ CheckTrampolinePoolQuick();
+ }
+ void TrackBranch() {
+ DCHECK(!trampoline_emitted_);
+ int count = tracked_branch_count_++;
+ if (count == 0) {
+ // We leave space (kMaxBlockTrampolineSectionSize)
+ // for BlockTrampolinePoolScope buffer.
+ next_trampoline_check_ =
+ pc_offset() + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize;
+ } else {
+ next_trampoline_check_ -= kTrampolineSlotsSize;
+ }
+ }
+
inline void UntrackBranch();
- inline void CheckTrampolinePoolQuick();
+ void CheckTrampolinePoolQuick() {
+ if (pc_offset() >= next_trampoline_check_) {
+ CheckTrampolinePool();
+ }
+ }
// Instruction generation
void a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 8523ade0cb..7dcc543b87 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -5,6 +5,7 @@
#if V8_TARGET_ARCH_PPC
#include "src/api-arguments.h"
+#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
@@ -55,7 +56,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ push(scratch);
// Account for saved regs if input is sp.
- if (input_reg.is(sp)) double_offset += kPointerSize;
+ if (input_reg == sp) double_offset += kPointerSize;
if (!skip_fastpath()) {
// Load double input.
@@ -79,7 +80,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ Push(scratch_high, scratch_low);
// Account for saved regs if input is sp.
- if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
+ if (input_reg == sp) double_offset += 2 * kPointerSize;
__ lwz(scratch_high,
MemOperand(input_reg, double_offset + Register::kExponentOffset));
@@ -198,7 +199,7 @@ void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(exponent.is(r5));
+ DCHECK(exponent == r5);
const DoubleRegister double_base = d1;
const DoubleRegister double_exponent = d2;
const DoubleRegister double_result = d3;
@@ -380,7 +381,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Pass buffer for return value on stack if necessary
bool needs_return_buffer =
- result_size() > 2 ||
(result_size() == 2 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS);
if (needs_return_buffer) {
arg_stack_space += result_size();
@@ -436,7 +436,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// If return value is on the stack, pop it to registers.
if (needs_return_buffer) {
- if (result_size() > 2) __ LoadP(r5, MemOperand(r3, 2 * kPointerSize));
__ LoadP(r4, MemOperand(r3, kPointerSize));
__ LoadP(r3, MemOperand(r3));
}
@@ -466,14 +465,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// r3:r4: result
// sp: stack pointer
// fp: frame pointer
- Register argc;
- if (argv_in_register()) {
- // We don't want to pop arguments so set argc to no_reg.
- argc = no_reg;
- } else {
- // r14: still holds argc (callee-saved).
- argc = r14;
- }
+ Register argc = argv_in_register()
+ // We don't want to pop arguments so set argc to no_reg.
+ ? no_reg
+ // r14: still holds argc (callee-saved).
+ : r14;
__ LeaveExitFrame(save_doubles(), argc, true);
__ blr();
@@ -862,7 +858,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(
__ add(tmp, properties, ip);
__ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
- DCHECK(!tmp.is(entity_name));
+ DCHECK(tmp != entity_name);
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
__ cmp(entity_name, tmp);
__ beq(done);
@@ -1010,6 +1006,49 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
stub2.GetCode();
}
+RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
+ Instr first_instruction =
+ Assembler::instr_at(stub->instruction_start() + Assembler::kInstrSize);
+ Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+ (Assembler::kInstrSize * 2));
+
+ // Consider adding DCHECK here to catch unexpected instruction sequence
+ if (BF == (first_instruction & kBOfieldMask)) {
+ return INCREMENTAL;
+ }
+
+ if (BF == (second_instruction & kBOfieldMask)) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ return STORE_BUFFER_ONLY;
+}
+
+void RecordWriteStub::Patch(Code* stub, Mode mode) {
+ MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
+ stub->instruction_size(), CodeObjectRequired::kNo);
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ DCHECK(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+
+ PatchBranchIntoNop(&masm, Assembler::kInstrSize);
+ PatchBranchIntoNop(&masm, Assembler::kInstrSize * 2);
+ break;
+ case INCREMENTAL:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, Assembler::kInstrSize);
+ break;
+ case INCREMENTAL_COMPACTION:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, Assembler::kInstrSize * 2);
+ break;
+ }
+ DCHECK(GetMode(stub) == mode);
+ Assembler::FlushICache(stub->GetIsolate(),
+ stub->instruction_start() + Assembler::kInstrSize,
+ 2 * Assembler::kInstrSize);
+}
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
@@ -1031,8 +1070,7 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
__ blt(&skip_to_incremental_compacting, cr2);
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
}
__ Ret();
@@ -1067,8 +1105,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm);
regs_.Restore(masm);
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
__ bind(&dont_need_remembered_set);
}
@@ -1085,10 +1122,9 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
- Register address =
- r3.is(regs_.address()) ? regs_.scratch0() : regs_.address();
- DCHECK(!address.is(regs_.object()));
- DCHECK(!address.is(r3));
+ Register address = r3 == regs_.address() ? regs_.scratch0() : regs_.address();
+ DCHECK(address != regs_.object());
+ DCHECK(address != r3);
__ mr(address, regs_.address());
__ mr(r3, regs_.object());
__ mr(r4, address);
@@ -1101,6 +1137,9 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
+void RecordWriteStub::Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+}
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
@@ -1116,8 +1155,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ Ret();
}
@@ -1156,8 +1194,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ Ret();
}
@@ -1575,7 +1612,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
ExternalReference::handle_scope_level_address(isolate), next_address);
// Additional parameter is the address of the actual callback.
- DCHECK(function_address.is(r4) || function_address.is(r5));
+ DCHECK(function_address == r4 || function_address == r5);
Register scratch = r6;
__ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate)));
@@ -1801,7 +1838,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- DCHECK(!api_function_address.is(r3) && !scratch.is(r3));
+ DCHECK(api_function_address != r3 && scratch != r3);
// r3 = FunctionCallbackInfo&
// Arguments is after the return address.
__ addi(r3, sp, Operand(kFunctionCallbackInfoOffset));
diff --git a/deps/v8/src/ppc/code-stubs-ppc.h b/deps/v8/src/ppc/code-stubs-ppc.h
index 967e97303a..70da70831c 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.h
+++ b/deps/v8/src/ppc/code-stubs-ppc.h
@@ -94,49 +94,9 @@ class RecordWriteStub : public PlatformCodeStub {
masm->instr_at_put(pos, (masm->instr_at(pos) & ~kBOfieldMask) | BF);
}
- static Mode GetMode(Code* stub) {
- Instr first_instruction =
- Assembler::instr_at(stub->instruction_start() + Assembler::kInstrSize);
- Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
- (Assembler::kInstrSize * 2));
-
- // Consider adding DCHECK here to catch unexpected instruction sequence
- if (BF == (first_instruction & kBOfieldMask)) {
- return INCREMENTAL;
- }
-
- if (BF == (second_instruction & kBOfieldMask)) {
- return INCREMENTAL_COMPACTION;
- }
-
- return STORE_BUFFER_ONLY;
- }
+ static Mode GetMode(Code* stub);
- static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
- stub->instruction_size(), CodeObjectRequired::kNo);
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
-
- PatchBranchIntoNop(&masm, Assembler::kInstrSize);
- PatchBranchIntoNop(&masm, Assembler::kInstrSize * 2);
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, Assembler::kInstrSize);
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, Assembler::kInstrSize * 2);
- break;
- }
- DCHECK(GetMode(stub) == mode);
- Assembler::FlushICache(stub->GetIsolate(),
- stub->instruction_start() + Assembler::kInstrSize,
- 2 * Assembler::kInstrSize);
- }
+ static void Patch(Code* stub, Mode mode);
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
@@ -147,7 +107,10 @@ class RecordWriteStub : public PlatformCodeStub {
class RegisterAllocation {
public:
RegisterAllocation(Register object, Register address, Register scratch0)
- : object_(object), address_(address), scratch0_(scratch0) {
+ : object_(object),
+ address_(address),
+ scratch0_(scratch0),
+ scratch1_(no_reg) {
DCHECK(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
@@ -213,9 +176,7 @@ class RecordWriteStub : public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- void Activate(Code* code) override {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
+ void Activate(Code* code) override;
Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_));
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index 75206ee4a4..7bc47d4644 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/assembler-inl.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/register-configuration.h"
@@ -203,9 +204,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ lfd(dreg, MemOperand(r4, src_offset));
}
- // Push state, pc, and continuation from the last output frame.
- __ LoadP(r9, MemOperand(r5, FrameDescription::state_offset()));
- __ push(r9);
+ // Push pc, and continuation from the last output frame.
__ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset()));
__ push(r9);
__ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset()));
@@ -248,6 +247,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ push(ip);
}
+bool Deoptimizer::PadTopOfStackRegister() { return false; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
diff --git a/deps/v8/src/ppc/frame-constants-ppc.cc b/deps/v8/src/ppc/frame-constants-ppc.cc
index bc6a649f9b..6497ad440d 100644
--- a/deps/v8/src/ppc/frame-constants-ppc.cc
+++ b/deps/v8/src/ppc/frame-constants-ppc.cc
@@ -23,6 +23,10 @@ Register JavaScriptFrame::constant_pool_pointer_register() {
return kConstantPoolRegister;
}
+int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
+ return register_count;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 90ed9670fd..7f0b8a5961 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -22,9 +22,14 @@ void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // TODO(albertnetymk): Use default for now; should call
- // RestrictAllocatableRegisters like src/x64/interface-descriptors-x64.cc
- DefaultInitializePlatformSpecific(data, kParameterCount);
+ const Register default_stub_registers[] = {r3, r4, r5, r6, r7};
+
+ data->RestrictAllocatableRegisters(default_stub_registers,
+ arraysize(default_stub_registers));
+
+ CHECK_LE(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
const Register FastNewFunctionContextDescriptor::FunctionRegister() {
@@ -83,27 +88,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void FastCloneRegExpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r6, r5, r4, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r6, r5, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r6, r5, r4, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4};
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 2bd14f09fd..efb6c2bab9 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -10,6 +10,7 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
+#include "src/callable.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
@@ -26,46 +27,93 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, size, create_code_object) {}
-void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1, Register exclusion2,
- Register exclusion3) {
+TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
+ }
+}
+
+int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) const {
+ int bytes = 0;
RegList exclusions = 0;
- if (!exclusion1.is(no_reg)) {
+ if (exclusion1 != no_reg) {
exclusions |= exclusion1.bit();
- if (!exclusion2.is(no_reg)) {
+ if (exclusion2 != no_reg) {
exclusions |= exclusion2.bit();
- if (!exclusion3.is(no_reg)) {
+ if (exclusion3 != no_reg) {
exclusions |= exclusion3.bit();
}
}
}
- MultiPush(kJSCallerSaved & ~exclusions);
+ RegList list = kJSCallerSaved & ~exclusions;
+ bytes += NumRegs(list) * kPointerSize;
if (fp_mode == kSaveFPRegs) {
- MultiPushDoubles(kCallerSavedDoubles);
+ bytes += kNumCallerSavedDoubles * kDoubleSize;
}
+
+ return bytes;
}
-void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ int bytes = 0;
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = kJSCallerSaved & ~exclusions;
+ MultiPush(list);
+ bytes += NumRegs(list) * kPointerSize;
+
+ if (fp_mode == kSaveFPRegs) {
+ MultiPushDoubles(kCallerSavedDoubles);
+ bytes += kNumCallerSavedDoubles * kDoubleSize;
+ }
+
+ return bytes;
+}
+
+int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
if (fp_mode == kSaveFPRegs) {
MultiPopDoubles(kCallerSavedDoubles);
+ bytes += kNumCallerSavedDoubles * kDoubleSize;
}
RegList exclusions = 0;
- if (!exclusion1.is(no_reg)) {
+ if (exclusion1 != no_reg) {
exclusions |= exclusion1.bit();
- if (!exclusion2.is(no_reg)) {
+ if (exclusion2 != no_reg) {
exclusions |= exclusion2.bit();
- if (!exclusion3.is(no_reg)) {
+ if (exclusion3 != no_reg) {
exclusions |= exclusion3.bit();
}
}
}
- MultiPop(kJSCallerSaved & ~exclusions);
+ RegList list = kJSCallerSaved & ~exclusions;
+ MultiPop(list);
+ bytes += NumRegs(list) * kPointerSize;
+
+ return bytes;
}
+
void TurboAssembler::Jump(Register target) {
mtctr(target);
bctr();
@@ -98,10 +146,10 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
}
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond) {
+ Condition cond, CRegister cr) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ppc code, never THUMB code
- Jump(reinterpret_cast<intptr_t>(code.address()), rmode, cond);
+ Jump(reinterpret_cast<intptr_t>(code.address()), rmode, cond, cr);
}
int TurboAssembler::CallSize(Register target) { return 2 * kInstrSize; }
@@ -119,7 +167,7 @@ void TurboAssembler::Call(Register target) {
}
void MacroAssembler::CallJSEntry(Register target) {
- DCHECK(target.is(ip));
+ DCHECK(target == ip);
Call(target);
}
@@ -212,13 +260,13 @@ void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
void TurboAssembler::Move(Register dst, Register src, Condition cond) {
DCHECK(cond == al);
- if (!dst.is(src)) {
+ if (dst != src) {
mr(dst, src);
}
}
void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
- if (!dst.is(src)) {
+ if (dst != src) {
fmr(dst, src);
}
}
@@ -287,12 +335,12 @@ void MacroAssembler::InNewSpace(Register object, Register scratch,
CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
}
-
-void MacroAssembler::RecordWriteField(
- Register object, int offset, Register value, Register dst,
- LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action, SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
+void MacroAssembler::RecordWriteField(Register object, int offset,
+ Register value, Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -316,7 +364,7 @@ void MacroAssembler::RecordWriteField(
}
RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
- OMIT_SMI_CHECK, pointers_to_here_check_for_value);
+ OMIT_SMI_CHECK);
bind(&done);
@@ -328,85 +376,78 @@ void MacroAssembler::RecordWriteField(
}
}
-
-// Will clobber 4 registers: object, map, dst, ip. The
-// register 'object' contains a heap object pointer.
-void MacroAssembler::RecordWriteForMap(Register object, Register map,
- Register dst,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode fp_mode) {
- if (emit_debug_code()) {
- LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
- Cmpi(dst, Operand(isolate()->factory()->meta_map()), r0);
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+void TurboAssembler::SaveRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
}
- if (!FLAG_incremental_marking) {
- return;
- }
+ MultiPush(regs);
+}
- if (emit_debug_code()) {
- LoadP(ip, FieldMemOperand(object, HeapObject::kMapOffset));
- cmp(ip, map);
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+void TurboAssembler::RestoreRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
}
+ MultiPop(regs);
+}
- Label done;
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
+ // i.e. always emit remember set and save FP registers in RecordWriteStub. If
+ // large performance regression is observed, we should use these values to
+ // avoid unnecessary work.
- // A single check of the map's pages interesting flag suffices, since it is
- // only set during incremental collection, and then it's also guaranteed that
- // the from object's page's interesting flag is also set. This optimization
- // relies on the fact that maps can never be in new space.
- CheckPageFlag(map,
- map, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
+ RegList registers = callable.descriptor().allocatable_registers();
- addi(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
- if (emit_debug_code()) {
- Label ok;
- andi(r0, dst, Operand(kPointerSize - 1));
- beq(&ok, cr0);
- stop("Unaligned cell in write barrier");
- bind(&ok);
- }
+ SaveRegisters(registers);
- // Record the actual write.
- if (lr_status == kLRHasNotBeenSaved) {
- mflr(r0);
- push(r0);
- }
- RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
- fp_mode);
- CallStub(&stub);
- if (lr_status == kLRHasNotBeenSaved) {
- pop(r0);
- mtlr(r0);
- }
+ Register object_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kObject));
+ Register slot_parameter(
+ callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register isolate_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kIsolate));
+ Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kFPMode));
- bind(&done);
+ push(object);
+ push(address);
- // Count number of write barriers in generated code.
- isolate()->counters()->write_barriers_static()->Increment();
- IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
+ pop(slot_parameter);
+ pop(object_parameter);
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12)));
- mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16)));
- }
-}
+ mov(isolate_parameter,
+ Operand(ExternalReference::isolate_address(isolate())));
+ Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
+ Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
+ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreRegisters(registers);
+}
// Will clobber 4 registers: object, address, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWrite(
- Register object, Register address, Register value,
- LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action, SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
- DCHECK(!object.is(value));
+void MacroAssembler::RecordWrite(Register object, Register address,
+ Register value, LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ DCHECK(object != value);
if (emit_debug_code()) {
LoadP(r0, MemOperand(address));
cmp(r0, value);
@@ -426,11 +467,9 @@ void MacroAssembler::RecordWrite(
JumpIfSmi(value, &done);
}
- if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
- }
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
@@ -440,9 +479,13 @@ void MacroAssembler::RecordWrite(
mflr(r0);
push(r0);
}
+#ifdef V8_CSA_WRITE_BARRIER
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+#else
RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
fp_mode);
CallStub(&stub);
+#endif
if (lr_status == kLRHasNotBeenSaved) {
pop(r0);
mtlr(r0);
@@ -465,8 +508,7 @@ void MacroAssembler::RecordWrite(
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address, Register scratch,
- SaveFPRegsMode fp_mode,
- RememberedSetFinalAction and_then) {
+ SaveFPRegsMode fp_mode) {
Label done;
if (emit_debug_code()) {
Label ok;
@@ -488,12 +530,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Check for end of buffer.
TestBitMask(scratch, StoreBuffer::kStoreBufferMask, r0);
- if (and_then == kFallThroughAtEnd) {
- bne(&done, cr0);
- } else {
- DCHECK(and_then == kReturnAtEnd);
- Ret(ne, cr0);
- }
+ Ret(ne, cr0);
mflr(r0);
push(r0);
StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
@@ -501,9 +538,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
pop(r0);
mtlr(r0);
bind(&done);
- if (and_then == kReturnAtEnd) {
- Ret();
- }
+ Ret();
}
void TurboAssembler::PushCommonFrame(Register marker_reg) {
@@ -563,10 +598,6 @@ void TurboAssembler::RestoreFrameStateForTailCall() {
mtlr(r0);
}
-const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
-const int MacroAssembler::kNumSafepointSavedRegisters =
- Register::kNumAllocatable;
-
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
@@ -866,7 +897,7 @@ void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
PushCommonFrame(r11);
}
if (FLAG_enable_embedded_constant_pool) {
- if (!base.is(no_reg)) {
+ if (base != no_reg) {
// base contains prologue address
LoadConstantPoolPointerRegister(base, -prologue_offset);
} else {
@@ -877,7 +908,7 @@ void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
}
void TurboAssembler::Prologue(Register base, int prologue_offset) {
- DCHECK(!base.is(no_reg));
+ DCHECK(base != no_reg);
PushStandardFrame(r4);
if (FLAG_enable_embedded_constant_pool) {
// base contains prologue address
@@ -1201,8 +1232,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// passed in registers.
// ARM has some sanity checks as per below, considering add them for PPC
- // DCHECK(actual.is_immediate() || actual.reg().is(r3));
- // DCHECK(expected.is_immediate() || expected.reg().is(r5));
+ // DCHECK(actual.is_immediate() || actual.reg() == r3);
+ // DCHECK(expected.is_immediate() || expected.reg() == r5);
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -1296,8 +1327,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
InvokeFlag flag) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- DCHECK(function.is(r4));
- DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r6));
+ DCHECK(function == r4);
+ DCHECK_IMPLIES(new_target.is_valid(), new_target == r6);
// On function call, call into the debugger if necessary.
CheckDebugHook(function, new_target, expected, actual);
@@ -1337,7 +1368,7 @@ void MacroAssembler::InvokeFunction(Register fun, Register new_target,
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in r4.
- DCHECK(fun.is(r4));
+ DCHECK(fun == r4);
Register expected_reg = r5;
Register temp_reg = r7;
@@ -1360,7 +1391,7 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in r4.
- DCHECK(function.is(r4));
+ DCHECK(function == r4);
// Get the function and setup the context.
LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
@@ -1415,105 +1446,9 @@ void MacroAssembler::PopStackHandler() {
}
-void MacroAssembler::Allocate(int object_size, Register result,
- Register scratch1, Register scratch2,
- Label* gc_required, AllocationFlags flags) {
- DCHECK(object_size <= kMaxRegularHeapObjectSize);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- li(result, Operand(0x7091));
- li(scratch1, Operand(0x7191));
- li(scratch2, Operand(0x7291));
- }
- b(gc_required);
- return;
- }
-
- DCHECK(!AreAliased(result, scratch1, scratch2, ip));
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
-
- // Check relative positions of allocation top and limit addresses.
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
- intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
- DCHECK((limit - top) == kPointerSize);
-
- // Set up allocation top address register.
- Register top_address = scratch1;
- // This code stores a temporary value in ip. This is OK, as the code below
- // does not need ip for implicit literal generation.
- Register alloc_limit = ip;
- Register result_end = scratch2;
- mov(top_address, Operand(allocation_top));
-
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into ip.
- LoadP(result, MemOperand(top_address));
- LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry.
- LoadP(alloc_limit, MemOperand(top_address));
- cmp(result, alloc_limit);
- Check(eq, kUnexpectedAllocationTop);
- }
- // Load allocation limit. Result already contains allocation top.
- LoadP(alloc_limit, MemOperand(top_address, limit - top));
- }
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
-#if V8_TARGET_ARCH_PPC64
- STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
-#else
- STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- andi(result_end, result, Operand(kDoubleAlignmentMask));
- Label aligned;
- beq(&aligned, cr0);
- if ((flags & PRETENURE) != 0) {
- cmpl(result, alloc_limit);
- bge(gc_required);
- }
- mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
- stw(result_end, MemOperand(result));
- addi(result, result, Operand(kDoubleSize / 2));
- bind(&aligned);
-#endif
- }
-
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top.
- sub(r0, alloc_limit, result);
- if (is_int16(object_size)) {
- cmpi(r0, Operand(object_size));
- blt(gc_required);
- addi(result_end, result, Operand(object_size));
- } else {
- Cmpi(r0, Operand(object_size), result_end);
- blt(gc_required);
- add(result_end, result, result_end);
- }
-
- StoreP(result_end, MemOperand(top_address));
-
- // Tag object.
- addi(result, result, Operand(kHeapObjectTag));
-}
-
void MacroAssembler::CompareObjectType(Register object, Register map,
Register type_reg, InstanceType type) {
- const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
+ const Register temp = type_reg == no_reg ? r0 : type_reg;
LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(map, temp, type);
@@ -1530,7 +1465,7 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
- DCHECK(!obj.is(r0));
+ DCHECK(obj != r0);
LoadRoot(r0, index);
cmp(obj, r0);
}
@@ -1539,22 +1474,22 @@ void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
- DCHECK(!overflow_dst.is(left));
- DCHECK(!overflow_dst.is(right));
+ DCHECK(dst != overflow_dst);
+ DCHECK(dst != scratch);
+ DCHECK(overflow_dst != scratch);
+ DCHECK(overflow_dst != left);
+ DCHECK(overflow_dst != right);
- bool left_is_right = left.is(right);
+ bool left_is_right = left == right;
RCBit xorRC = left_is_right ? SetRC : LeaveRC;
// C = A+B; C overflows if A/B have same sign and C has diff sign than A
- if (dst.is(left)) {
+ if (dst == left) {
mr(scratch, left); // Preserve left.
add(dst, left, right); // Left is overwritten.
xor_(overflow_dst, dst, scratch, xorRC); // Original left.
if (!left_is_right) xor_(scratch, dst, right);
- } else if (dst.is(right)) {
+ } else if (dst == right) {
mr(scratch, right); // Preserve right.
add(dst, left, right); // Right is overwritten.
xor_(overflow_dst, dst, left, xorRC);
@@ -1572,13 +1507,13 @@ void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
Register overflow_dst,
Register scratch) {
Register original_left = left;
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
- DCHECK(!overflow_dst.is(left));
+ DCHECK(dst != overflow_dst);
+ DCHECK(dst != scratch);
+ DCHECK(overflow_dst != scratch);
+ DCHECK(overflow_dst != left);
// C = A+B; C overflows if A/B have same sign and C has diff sign than A
- if (dst.is(left)) {
+ if (dst == left) {
// Preserve left.
original_left = overflow_dst;
mr(original_left, left);
@@ -1596,20 +1531,20 @@ void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
- DCHECK(!overflow_dst.is(left));
- DCHECK(!overflow_dst.is(right));
+ DCHECK(dst != overflow_dst);
+ DCHECK(dst != scratch);
+ DCHECK(overflow_dst != scratch);
+ DCHECK(overflow_dst != left);
+ DCHECK(overflow_dst != right);
// C = A-B; C overflows if A/B have diff signs and C has diff sign than A
- if (dst.is(left)) {
+ if (dst == left) {
mr(scratch, left); // Preserve left.
sub(dst, left, right); // Left is overwritten.
xor_(overflow_dst, dst, scratch);
xor_(scratch, scratch, right);
and_(overflow_dst, overflow_dst, scratch, SetRC);
- } else if (dst.is(right)) {
+ } else if (dst == right) {
mr(scratch, right); // Preserve right.
sub(dst, left, right); // Right is overwritten.
xor_(overflow_dst, dst, left);
@@ -1624,46 +1559,6 @@ void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
}
-void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
- Label* early_success) {
- LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- CompareMap(scratch, map, early_success);
-}
-
-
-void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
- Label* early_success) {
- mov(r0, Operand(map));
- cmp(obj_map, r0);
-}
-
-
-void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
- Label* fail, SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
-
- Label success;
- CompareMap(obj, scratch, map, &success);
- bne(fail);
- bind(&success);
-}
-
-
-void MacroAssembler::CheckMap(Register obj, Register scratch,
- Heap::RootListIndex index, Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
- LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- LoadRoot(r0, index);
- cmp(scratch, r0);
- bne(fail);
-}
-
-
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
mov(value, Operand(cell));
LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
@@ -1723,7 +1618,7 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
Register scratch,
DoubleRegister double_scratch) {
Label done;
- DCHECK(!double_input.is(double_scratch));
+ DCHECK(double_input != double_scratch);
ConvertDoubleToInt64(double_input,
#if !V8_TARGET_ARCH_PPC64
@@ -1850,16 +1745,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
}
-void MacroAssembler::SetCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- mov(scratch1, Operand(value));
- mov(scratch2, Operand(ExternalReference(counter)));
- stw(scratch1, MemOperand(scratch2));
- }
-}
-
-
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK(value > 0);
@@ -1932,22 +1817,6 @@ void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
}
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch) {
- // Load the initial map. The global functions all have initial maps.
- LoadP(map,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
- b(&ok);
- bind(&fail);
- Abort(kGlobalFunctionsMustHaveInitialMap);
- bind(&ok);
- }
-}
-
void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
Label* smi_case) {
STATIC_ASSERT(kSmiTag == 0);
@@ -2057,20 +1926,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
-void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
- Register first, Register second, Register scratch1, Register scratch2,
- Label* failure) {
- // Test that both first and second are sequential one-byte strings.
- // Assume that they are non-smis.
- LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
- LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
- lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- lbz(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
-
- JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
- scratch2, failure);
-}
-
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
Label* not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
@@ -2083,44 +1938,6 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
bind(&succeed);
}
-void MacroAssembler::AllocateJSValue(Register result, Register constructor,
- Register value, Register scratch1,
- Register scratch2, Label* gc_required) {
- DCHECK(!result.is(constructor));
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!result.is(value));
-
- // Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Initialize the JSValue.
- LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
- StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0);
- LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOrHashOffset),
- r0);
- StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0);
- StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0);
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-}
-
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
- Register first, Register second, Register scratch1, Register scratch2,
- Label* failure) {
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- andi(scratch1, first, Operand(kFlatOneByteStringMask));
- andi(scratch2, second, Operand(kFlatOneByteStringMask));
- cmpi(scratch1, Operand(kFlatOneByteStringTag));
- bne(failure);
- cmpi(scratch2, Operand(kFlatOneByteStringTag));
- bne(failure);
-}
-
static const int kRegisterPassedArguments = 8;
int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -2174,8 +1991,8 @@ void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
- if (src2.is(d1)) {
- DCHECK(!src1.is(d2));
+ if (src2 == d1) {
+ DCHECK(src1 != d2);
Move(d2, src2);
Move(d1, src1);
} else {
@@ -2372,7 +2189,7 @@ void TurboAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, Double value,
Register scratch) {
if (FLAG_enable_embedded_constant_pool && is_constant_pool_available() &&
- !(scratch.is(r0) && ConstantPoolAccessIsInOverflow())) {
+ !(scratch == r0 && ConstantPoolAccessIsInOverflow())) {
ConstantPoolEntry::Access access = ConstantPoolAddEntry(value);
if (access == ConstantPoolEntry::OVERFLOWED) {
addis(scratch, kConstantPoolRegister, Operand::Zero());
@@ -2428,7 +2245,7 @@ void TurboAssembler::MovIntToDouble(DoubleRegister dst, Register src,
}
#endif
- DCHECK(!src.is(scratch));
+ DCHECK(src != scratch);
subi(sp, sp, Operand(kDoubleSize));
#if V8_TARGET_ARCH_PPC64
extsw(scratch, src);
@@ -2453,7 +2270,7 @@ void TurboAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
}
#endif
- DCHECK(!src.is(scratch));
+ DCHECK(src != scratch);
subi(sp, sp, Operand(kDoubleSize));
#if V8_TARGET_ARCH_PPC64
clrldi(scratch, src, Operand(32));
@@ -2690,7 +2507,7 @@ void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
andi(ra, rs, rb);
} else {
// mov handles the relocation.
- DCHECK(!rs.is(r0));
+ DCHECK(rs != r0);
mov(r0, rb);
and_(ra, rs, r0, rc);
}
@@ -2707,7 +2524,7 @@ void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
ori(ra, rs, rb);
} else {
// mov handles the relocation.
- DCHECK(!rs.is(r0));
+ DCHECK(rs != r0);
mov(r0, rb);
orx(ra, rs, r0, rc);
}
@@ -2725,7 +2542,7 @@ void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
xori(ra, rs, rb);
} else {
// mov handles the relocation.
- DCHECK(!rs.is(r0));
+ DCHECK(rs != r0);
mov(r0, rb);
xor_(ra, rs, r0, rc);
}
@@ -2795,7 +2612,7 @@ void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
if (!is_int16(offset)) {
/* cannot use d-form */
- DCHECK(!scratch.is(no_reg));
+ DCHECK(scratch != no_reg);
mov(scratch, Operand(offset));
LoadPX(dst, MemOperand(mem.ra(), scratch));
} else {
@@ -2804,7 +2621,7 @@ void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
if (misaligned) {
// adjust base to conform to offset alignment requirements
// Todo: enhance to use scratch if dst is unsuitable
- DCHECK(!dst.is(r0));
+ DCHECK(dst != r0);
addi(dst, mem.ra(), Operand((offset & 3) - 4));
ld(dst, MemOperand(dst, (offset & ~3) + 4));
} else {
@@ -2822,7 +2639,7 @@ void TurboAssembler::LoadPU(Register dst, const MemOperand& mem,
if (!is_int16(offset)) {
/* cannot use d-form */
- DCHECK(!scratch.is(no_reg));
+ DCHECK(scratch != no_reg);
mov(scratch, Operand(offset));
LoadPUX(dst, MemOperand(mem.ra(), scratch));
} else {
@@ -2841,7 +2658,7 @@ void TurboAssembler::StoreP(Register src, const MemOperand& mem,
if (!is_int16(offset)) {
/* cannot use d-form */
- DCHECK(!scratch.is(no_reg));
+ DCHECK(scratch != no_reg);
mov(scratch, Operand(offset));
StorePX(src, MemOperand(mem.ra(), scratch));
} else {
@@ -2850,8 +2667,8 @@ void TurboAssembler::StoreP(Register src, const MemOperand& mem,
if (misaligned) {
// adjust base to conform to offset alignment requirements
// a suitable scratch is required here
- DCHECK(!scratch.is(no_reg));
- if (scratch.is(r0)) {
+ DCHECK(scratch != no_reg);
+ if (scratch == r0) {
LoadIntLiteral(scratch, offset);
stdx(src, MemOperand(mem.ra(), scratch));
} else {
@@ -2873,7 +2690,7 @@ void TurboAssembler::StorePU(Register src, const MemOperand& mem,
if (!is_int16(offset)) {
/* cannot use d-form */
- DCHECK(!scratch.is(no_reg));
+ DCHECK(scratch != no_reg);
mov(scratch, Operand(offset));
StorePUX(src, MemOperand(mem.ra(), scratch));
} else {
@@ -2885,12 +2702,12 @@ void TurboAssembler::StorePU(Register src, const MemOperand& mem,
}
}
-void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem,
+void TurboAssembler::LoadWordArith(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
if (!is_int16(offset)) {
- DCHECK(!scratch.is(no_reg));
+ DCHECK(scratch != no_reg);
mov(scratch, Operand(offset));
lwax(dst, MemOperand(mem.ra(), scratch));
} else {
@@ -2899,7 +2716,7 @@ void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem,
if (misaligned) {
// adjust base to conform to offset alignment requirements
// Todo: enhance to use scratch if dst is unsuitable
- DCHECK(!dst.is(r0));
+ DCHECK(dst != r0);
addi(dst, mem.ra(), Operand((offset & 3) - 4));
lwa(dst, MemOperand(dst, (offset & ~3) + 4));
} else {
@@ -2949,7 +2766,7 @@ void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
int offset = mem.offset();
if (!is_int16(offset)) {
- DCHECK(!scratch.is(no_reg));
+ DCHECK(scratch != no_reg);
mov(scratch, Operand(offset));
lhax(dst, MemOperand(mem.ra(), scratch));
} else {
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index ded1ec63ca..cc1d7a151e 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -9,25 +9,26 @@
#include "src/bailout-reason.h"
#include "src/double.h"
#include "src/globals.h"
+#include "src/ppc/assembler-ppc.h"
namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {Register::kCode_r3};
-const Register kReturnRegister1 = {Register::kCode_r4};
-const Register kReturnRegister2 = {Register::kCode_r5};
-const Register kJSFunctionRegister = {Register::kCode_r4};
-const Register kContextRegister = {Register::kCode_r30};
-const Register kAllocateSizeRegister = {Register::kCode_r4};
-const Register kInterpreterAccumulatorRegister = {Register::kCode_r3};
-const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r15};
-const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r16};
-const Register kInterpreterDispatchTableRegister = {Register::kCode_r17};
-const Register kJavaScriptCallArgCountRegister = {Register::kCode_r3};
-const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r6};
-const Register kRuntimeCallFunctionRegister = {Register::kCode_r4};
-const Register kRuntimeCallArgCountRegister = {Register::kCode_r3};
+const Register kReturnRegister0 = r3;
+const Register kReturnRegister1 = r4;
+const Register kReturnRegister2 = r5;
+const Register kJSFunctionRegister = r4;
+const Register kContextRegister = r30;
+const Register kAllocateSizeRegister = r4;
+const Register kInterpreterAccumulatorRegister = r3;
+const Register kInterpreterBytecodeOffsetRegister = r15;
+const Register kInterpreterBytecodeArrayRegister = r16;
+const Register kInterpreterDispatchTableRegister = r17;
+const Register kJavaScriptCallArgCountRegister = r3;
+const Register kJavaScriptCallNewTargetRegister = r6;
+const Register kRuntimeCallFunctionRegister = r4;
+const Register kRuntimeCallArgCountRegister = r3;
// ----------------------------------------------------------------------------
// Static helper functions
@@ -49,10 +50,6 @@ enum TaggingMode {
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum PointersToHereCheck {
- kPointersToHereMaybeInteresting,
- kPointersToHereAreAlwaysInteresting
-};
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
@@ -107,13 +104,8 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
class TurboAssembler : public Assembler {
public:
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ =
- Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
- }
- }
+ CodeObjectRequired create_code_object);
+
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
@@ -199,6 +191,8 @@ class TurboAssembler : public Assembler {
// These exist to provide portability between 32 and 64bit
void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg);
+ void LoadWordArith(Register dst, const MemOperand& mem,
+ Register scratch = no_reg);
void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg);
@@ -309,18 +303,37 @@ class TurboAssembler : public Assembler {
LoadP(src1, MemOperand(sp, 4 * kPointerSize));
addi(sp, sp, Operand(5 * kPointerSize));
}
+
+ void SaveRegisters(RegList registers);
+ void RestoreRegisters(RegList registers);
+
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode);
+
void MultiPush(RegList regs, Register location = sp);
void MultiPop(RegList regs, Register location = sp);
void MultiPushDoubles(RegList dregs, Register location = sp);
void MultiPopDoubles(RegList dregs, Register location = sp);
- void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
- void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ // Calculate how much stack space (in bytes) are required to store caller
+ // registers excluding those specified in the arguments.
+ int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg) const;
+
+ // Push caller saved registers on the stack, and return the number of bytes
+ // stack pointer is adjusted.
+ int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
+ // Restore caller saved registers from the stack, and return the number of
+ // bytes stack pointer is adjusted.
+ int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
// Load an object from the root table.
void LoadRoot(Register destination, Heap::RootListIndex index,
@@ -404,7 +417,8 @@ class TurboAssembler : public Assembler {
void Jump(Register target);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
- void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al,
+ CRegister cr = cr7);
void Call(Register target);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Handle<Code> code,
@@ -653,15 +667,12 @@ class MacroAssembler : public TurboAssembler {
void IncrementalMarkingRecordWriteHelper(Register object, Register value,
Register address);
- enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
-
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
Register addr, Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
+ SaveFPRegsMode save_fp);
void JumpToJSEntry(Register target);
// Check if object is in new space. Jumps if the object is not in new space.
@@ -697,26 +708,7 @@ class MacroAssembler : public TurboAssembler {
Register object, int offset, Register value, Register scratch,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
-
- // As above, but the offset has the tag presubtracted. For use with
- // MemOperand(reg, off).
- inline void RecordWriteContextSlot(
- Register context, int offset, Register value, Register scratch,
- LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting) {
- RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
- lr_status, save_fp, remembered_set_action, smi_check,
- pointers_to_here_check_for_value);
- }
-
- void RecordWriteForMap(Register object, Register map, Register dst,
- LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
@@ -725,9 +717,7 @@ class MacroAssembler : public TurboAssembler {
Register object, Register address, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
@@ -756,11 +746,6 @@ class MacroAssembler : public TurboAssembler {
bool restore_context,
bool argument_count_is_length = false);
- // Load the global object from the current context.
- void LoadGlobalObject(Register dst) {
- LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
- }
-
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
@@ -768,18 +753,12 @@ class MacroAssembler : public TurboAssembler {
void LoadNativeContextSlot(int index, Register dst);
- // Load the initial map from the global function. The registers
- // function and map can be the same, function is then overwritten.
- void LoadGlobalFunctionInitialMap(Register function, Register map,
- Register scratch);
// ----------------------------------------------------------------
// new PPC macro-assembler interfaces that are slightly higher level
// than assembler-ppc and may generate variable length sequences
// load a literal double value <value> to FPR <result>
void LoadWord(Register dst, const MemOperand& mem, Register scratch);
- void LoadWordArith(Register dst, const MemOperand& mem,
- Register scratch = no_reg);
void StoreWord(Register src, const MemOperand& mem, Register scratch);
void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch);
@@ -861,25 +840,6 @@ class MacroAssembler : public TurboAssembler {
void PopStackHandler();
// ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space or old space. The object_size is
- // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. If the space is exhausted control continues at the gc_required
- // label. The allocated object is returned in result. If the flag
- // tag_allocated_object is true the result is tagged as as a heap object.
- // All registers are clobbered also when control continues at the gc_required
- // label.
- void Allocate(int object_size, Register result, Register scratch1,
- Register scratch2, Label* gc_required, AllocationFlags flags);
-
- // Allocate and initialize a JSValue wrapper with the specified {constructor}
- // and {value}.
- void AllocateJSValue(Register result, Register constructor, Register value,
- Register scratch1, Register scratch2,
- Label* gc_required);
-
- // ---------------------------------------------------------------------------
// Support functions.
// Machine code version of Map::GetConstructor().
@@ -903,28 +863,6 @@ class MacroAssembler : public TurboAssembler {
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
- // set with result of map compare. If multiple map compares are required, the
- // compare sequences branches to early_success.
- void CompareMap(Register obj, Register scratch, Handle<Map> map,
- Label* early_success);
-
- // As above, but the map of the object is already loaded into the register
- // which is preserved by the code generated.
- void CompareMap(Register obj_map, Handle<Map> map, Label* early_success);
-
- // Check if the map of an object is equal to a specified map and branch to
- // label if not. Skip the smi check if not required (object is known to be a
- // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail,
- SmiCheckType smi_check_type);
-
-
- void CheckMap(Register obj, Register scratch, Heap::RootListIndex index,
- Label* fail, SmiCheckType smi_check_type);
-
void GetWeakValue(Register value, Handle<WeakCell> cell);
// Load the value of the weak cell in the value register. Branch to the given
@@ -1005,8 +943,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// StatsCounter support
- void SetCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
@@ -1081,20 +1017,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// String utilities
- // Checks if both objects are sequential one-byte strings and jumps to label
- // if either is not. Assumes that neither object is a smi.
- void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- // Checks if both instance types are sequential one-byte strings and jumps to
- // label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialOneByte(
- Register first_object_instance_type, Register second_object_instance_type,
- Register scratch1, Register scratch2, Label* failure);
-
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
// ---------------------------------------------------------------------------
@@ -1137,9 +1059,6 @@ class MacroAssembler : public TurboAssembler {
inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
Register mask_reg);
- static const RegList kSafepointSavedRegisters;
- static const int kNumSafepointSavedRegisters;
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index b643004aa3..0f90700c81 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -1300,10 +1300,6 @@ typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
typedef ObjectPair (*SimulatorRuntimePairCall)(intptr_t arg0, intptr_t arg1,
intptr_t arg2, intptr_t arg3,
intptr_t arg4, intptr_t arg5);
-typedef ObjectTriple (*SimulatorRuntimeTripleCall)(intptr_t arg0, intptr_t arg1,
- intptr_t arg2, intptr_t arg3,
- intptr_t arg4,
- intptr_t arg5);
// These prototypes handle the four types of FP calls.
typedef int (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
@@ -1338,7 +1334,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
int arg0_regnum = 3;
intptr_t result_buffer = 0;
bool uses_result_buffer =
- redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE ||
(redirection->type() == ExternalReference::BUILTIN_CALL_PAIR &&
!ABI_RETURNS_OBJECT_PAIRS_IN_REGS);
if (uses_result_buffer) {
@@ -1541,52 +1536,35 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- if (redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE) {
- SimulatorRuntimeTripleCall target =
- reinterpret_cast<SimulatorRuntimeTripleCall>(external);
- ObjectTriple result =
+ if (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR) {
+ SimulatorRuntimePairCall target =
+ reinterpret_cast<SimulatorRuntimePairCall>(external);
+ ObjectPair result =
target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ intptr_t x;
+ intptr_t y;
+ decodeObjectPair(&result, &x, &y);
if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
- "}\n",
- reinterpret_cast<intptr_t>(result.x),
- reinterpret_cast<intptr_t>(result.y),
- reinterpret_cast<intptr_t>(result.z));
+ PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR "}\n", x, y);
}
- memcpy(reinterpret_cast<void*>(result_buffer), &result,
- sizeof(ObjectTriple));
- set_register(r3, result_buffer);
- } else {
- if (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR) {
- SimulatorRuntimePairCall target =
- reinterpret_cast<SimulatorRuntimePairCall>(external);
- ObjectPair result =
- target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
- intptr_t x;
- intptr_t y;
- decodeObjectPair(&result, &x, &y);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR "}\n", x, y);
- }
- if (ABI_RETURNS_OBJECT_PAIRS_IN_REGS) {
- set_register(r3, x);
- set_register(r4, y);
- } else {
- memcpy(reinterpret_cast<void*>(result_buffer), &result,
- sizeof(ObjectPair));
- set_register(r3, result_buffer);
- }
+ if (ABI_RETURNS_OBJECT_PAIRS_IN_REGS) {
+ set_register(r3, x);
+ set_register(r4, y);
} else {
- DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- intptr_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
- arg[5], arg[6], arg[7], arg[8]);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08" V8PRIxPTR "\n", result);
- }
- set_register(r3, result);
+ memcpy(reinterpret_cast<void*>(result_buffer), &result,
+ sizeof(ObjectPair));
+ set_register(r3, result_buffer);
+ }
+ } else {
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ intptr_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
+ arg[5], arg[6], arg[7], arg[8]);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08" V8PRIxPTR "\n", result);
}
+ set_register(r3, result);
}
}
set_pc(saved_lr);
diff --git a/deps/v8/src/profiler/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index 8d8a3c7e1d..3a7a88a0c5 100644
--- a/deps/v8/src/profiler/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -22,14 +22,13 @@ AllocationTraceNode::AllocationTraceNode(
AllocationTraceNode::~AllocationTraceNode() {
- for (int i = 0; i < children_.length(); i++) delete children_[i];
+ for (AllocationTraceNode* node : children_) delete node;
}
AllocationTraceNode* AllocationTraceNode::FindChild(
unsigned function_info_index) {
- for (int i = 0; i < children_.length(); i++) {
- AllocationTraceNode* node = children_[i];
+ for (AllocationTraceNode* node : children_) {
if (node->function_info_index() == function_info_index) return node;
}
return NULL;
@@ -41,7 +40,7 @@ AllocationTraceNode* AllocationTraceNode::FindOrAddChild(
AllocationTraceNode* child = FindChild(function_info_index);
if (child == NULL) {
child = new AllocationTraceNode(tree_, function_info_index);
- children_.Add(child);
+ children_.push_back(child);
}
return child;
}
@@ -64,8 +63,8 @@ void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) {
}
base::OS::Print("\n");
indent += 2;
- for (int i = 0; i < children_.length(); i++) {
- children_[i]->Print(indent, tracker);
+ for (AllocationTraceNode* node : children_) {
+ node->Print(indent, tracker);
}
}
@@ -98,13 +97,6 @@ void AllocationTraceTree::Print(AllocationTracker* tracker) {
root()->Print(0, tracker);
}
-
-void AllocationTracker::DeleteUnresolvedLocation(
- UnresolvedLocation** location) {
- delete *location;
-}
-
-
AllocationTracker::FunctionInfo::FunctionInfo()
: name(""),
function_id(0),
@@ -185,11 +177,6 @@ void AddressToTraceMap::RemoveRange(Address start, Address end) {
}
}
-
-void AllocationTracker::DeleteFunctionInfo(FunctionInfo** info) {
- delete *info;
-}
-
AllocationTracker::AllocationTracker(HeapObjectsMap* ids, StringsStorage* names)
: ids_(ids),
names_(names),
@@ -197,24 +184,23 @@ AllocationTracker::AllocationTracker(HeapObjectsMap* ids, StringsStorage* names)
info_index_for_other_state_(0) {
FunctionInfo* info = new FunctionInfo();
info->name = "(root)";
- function_info_list_.Add(info);
+ function_info_list_.push_back(info);
}
AllocationTracker::~AllocationTracker() {
- unresolved_locations_.Iterate(DeleteUnresolvedLocation);
- function_info_list_.Iterate(&DeleteFunctionInfo);
+ for (UnresolvedLocation* location : unresolved_locations_) delete location;
+ for (FunctionInfo* info : function_info_list_) delete info;
}
void AllocationTracker::PrepareForSerialization() {
- List<UnresolvedLocation*> copy(unresolved_locations_.length());
- copy.AddAll(unresolved_locations_);
- unresolved_locations_.Clear();
- for (int i = 0; i < copy.length(); i++) {
- copy[i]->Resolve();
- delete copy[i];
+ for (UnresolvedLocation* location : unresolved_locations_) {
+ location->Resolve();
+ delete location;
}
+ unresolved_locations_.clear();
+ unresolved_locations_.shrink_to_fit();
}
@@ -273,13 +259,11 @@ unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
info->script_id = script->id();
// Converting start offset into line and column may cause heap
// allocations so we postpone them until snapshot serialization.
- unresolved_locations_.Add(new UnresolvedLocation(
- script,
- shared->start_position(),
- info));
+ unresolved_locations_.push_back(
+ new UnresolvedLocation(script, shared->start_position(), info));
}
- entry->value = reinterpret_cast<void*>(function_info_list_.length());
- function_info_list_.Add(info);
+ entry->value = reinterpret_cast<void*>(function_info_list_.size());
+ function_info_list_.push_back(info);
}
return static_cast<unsigned>(reinterpret_cast<intptr_t>((entry->value)));
}
@@ -290,8 +274,9 @@ unsigned AllocationTracker::functionInfoIndexForVMState(StateTag state) {
if (info_index_for_other_state_ == 0) {
FunctionInfo* info = new FunctionInfo();
info->name = "(V8 API)";
- info_index_for_other_state_ = function_info_list_.length();
- function_info_list_.Add(info);
+ info_index_for_other_state_ =
+ static_cast<unsigned>(function_info_list_.size());
+ function_info_list_.push_back(info);
}
return info_index_for_other_state_;
}
diff --git a/deps/v8/src/profiler/allocation-tracker.h b/deps/v8/src/profiler/allocation-tracker.h
index 45bd446714..a84fd4a8fd 100644
--- a/deps/v8/src/profiler/allocation-tracker.h
+++ b/deps/v8/src/profiler/allocation-tracker.h
@@ -6,11 +6,11 @@
#define V8_PROFILER_ALLOCATION_TRACKER_H_
#include <map>
+#include <vector>
#include "include/v8-profiler.h"
#include "src/base/hashmap.h"
#include "src/handles.h"
-#include "src/list.h"
#include "src/vector.h"
namespace v8 {
@@ -36,7 +36,9 @@ class AllocationTraceNode {
unsigned allocation_size() const { return total_size_; }
unsigned allocation_count() const { return allocation_count_; }
unsigned id() const { return id_; }
- Vector<AllocationTraceNode*> children() const { return children_.ToVector(); }
+ const std::vector<AllocationTraceNode*>& children() const {
+ return children_;
+ }
void Print(int indent, AllocationTracker* tracker);
@@ -46,7 +48,7 @@ class AllocationTraceNode {
unsigned total_size_;
unsigned allocation_count_;
unsigned id_;
- List<AllocationTraceNode*> children_;
+ std::vector<AllocationTraceNode*> children_;
DISALLOW_COPY_AND_ASSIGN(AllocationTraceNode);
};
@@ -112,14 +114,13 @@ class AllocationTracker {
void AllocationEvent(Address addr, int size);
AllocationTraceTree* trace_tree() { return &trace_tree_; }
- const List<FunctionInfo*>& function_info_list() const {
+ const std::vector<FunctionInfo*>& function_info_list() const {
return function_info_list_;
}
AddressToTraceMap* address_to_trace() { return &address_to_trace_; }
private:
unsigned AddFunctionInfo(SharedFunctionInfo* info, SnapshotObjectId id);
- static void DeleteFunctionInfo(FunctionInfo** info);
unsigned functionInfoIndexForVMState(StateTag state);
class UnresolvedLocation {
@@ -135,16 +136,15 @@ class AllocationTracker {
int start_position_;
FunctionInfo* info_;
};
- static void DeleteUnresolvedLocation(UnresolvedLocation** location);
static const int kMaxAllocationTraceLength = 64;
HeapObjectsMap* ids_;
StringsStorage* names_;
AllocationTraceTree trace_tree_;
unsigned allocation_trace_buffer_[kMaxAllocationTraceLength];
- List<FunctionInfo*> function_info_list_;
+ std::vector<FunctionInfo*> function_info_list_;
base::HashMap id_to_function_info_index_;
- List<UnresolvedLocation*> unresolved_locations_;
+ std::vector<UnresolvedLocation*> unresolved_locations_;
unsigned info_index_for_other_state_;
AddressToTraceMap address_to_trace_;
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 80d488f12c..bae592b36d 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -197,7 +197,7 @@ void ProfilerEventsProcessor::operator delete(void* ptr) {
int CpuProfiler::GetProfilesCount() {
// The count of profiles doesn't depend on a security token.
- return profiles_->profiles()->length();
+ return static_cast<int>(profiles_->profiles()->size());
}
@@ -215,7 +215,7 @@ void CpuProfiler::DeleteAllProfiles() {
void CpuProfiler::DeleteProfile(CpuProfile* profile) {
profiles_->RemoveProfile(profile);
delete profile;
- if (profiles_->profiles()->is_empty() && !is_profiling_) {
+ if (profiles_->profiles()->empty() && !is_profiling_) {
// If this was the last profile, clean up all accessory data as well.
ResetProfiles();
}
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index 4706b914e7..e28f267176 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -20,35 +20,34 @@ HeapProfiler::HeapProfiler(Heap* heap)
is_tracking_object_moves_(false),
get_retainer_infos_callback_(nullptr) {}
-static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
- delete *snapshot_ptr;
+static void DeleteHeapSnapshot(HeapSnapshot* snapshot_ptr) {
+ delete snapshot_ptr;
}
HeapProfiler::~HeapProfiler() {
- snapshots_.Iterate(DeleteHeapSnapshot);
- snapshots_.Clear();
+ std::for_each(snapshots_.begin(), snapshots_.end(), &DeleteHeapSnapshot);
}
void HeapProfiler::DeleteAllSnapshots() {
- snapshots_.Iterate(DeleteHeapSnapshot);
- snapshots_.Clear();
+ std::for_each(snapshots_.begin(), snapshots_.end(), &DeleteHeapSnapshot);
+ snapshots_.clear();
names_.reset(new StringsStorage(heap()));
}
void HeapProfiler::RemoveSnapshot(HeapSnapshot* snapshot) {
- snapshots_.RemoveElement(snapshot);
+ snapshots_.erase(std::find(snapshots_.begin(), snapshots_.end(), snapshot));
}
void HeapProfiler::DefineWrapperClass(
uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback) {
DCHECK(class_id != v8::HeapProfiler::kPersistentHandleNoClassId);
- if (wrapper_callbacks_.length() <= class_id) {
- wrapper_callbacks_.AddBlock(
- NULL, class_id - wrapper_callbacks_.length() + 1);
+ if (wrapper_callbacks_.size() <= class_id) {
+ wrapper_callbacks_.insert(wrapper_callbacks_.end(),
+ class_id - wrapper_callbacks_.size() + 1, NULL);
}
wrapper_callbacks_[class_id] = callback;
}
@@ -56,7 +55,7 @@ void HeapProfiler::DefineWrapperClass(
v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback(
uint16_t class_id, Object** wrapper) {
- if (wrapper_callbacks_.length() <= class_id) return NULL;
+ if (wrapper_callbacks_.size() <= class_id) return NULL;
return wrapper_callbacks_[class_id](
class_id, Utils::ToLocal(Handle<Object>(wrapper)));
}
@@ -85,7 +84,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
delete result;
result = NULL;
} else {
- snapshots_.Add(result);
+ snapshots_.push_back(result);
}
}
ids_->RemoveDeadEntries();
@@ -149,7 +148,7 @@ void HeapProfiler::StopHeapObjectsTracking() {
}
int HeapProfiler::GetSnapshotsCount() {
- return snapshots_.length();
+ return static_cast<int>(snapshots_.size());
}
HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index 354c48ea54..da6814ddcb 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -6,9 +6,9 @@
#define V8_PROFILER_HEAP_PROFILER_H_
#include <memory>
+#include <vector>
#include "src/isolate.h"
-#include "src/list.h"
namespace v8 {
namespace internal {
@@ -85,9 +85,9 @@ class HeapProfiler {
// Mapping from HeapObject addresses to objects' uids.
std::unique_ptr<HeapObjectsMap> ids_;
- List<HeapSnapshot*> snapshots_;
+ std::vector<HeapSnapshot*> snapshots_;
std::unique_ptr<StringsStorage> names_;
- List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
+ std::vector<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
std::unique_ptr<AllocationTracker> allocation_tracker_;
bool is_tracking_object_moves_;
base::Mutex profiler_mutex_;
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 98e4600cd3..5c80706a3c 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -406,11 +406,7 @@ SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
return id;
}
-
-void HeapObjectsMap::StopHeapObjectsTracking() {
- time_intervals_.Clear();
-}
-
+void HeapObjectsMap::StopHeapObjectsTracking() { time_intervals_.clear(); }
void HeapObjectsMap::UpdateHeapObjectsMap() {
if (FLAG_heap_profiler_trace_objects) {
@@ -440,15 +436,14 @@ void HeapObjectsMap::UpdateHeapObjectsMap() {
SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream,
int64_t* timestamp_us) {
UpdateHeapObjectsMap();
- time_intervals_.Add(TimeInterval(next_id_));
+ time_intervals_.emplace_back(next_id_);
int prefered_chunk_size = stream->GetChunkSize();
- List<v8::HeapStatsUpdate> stats_buffer;
+ std::vector<v8::HeapStatsUpdate> stats_buffer;
DCHECK(!entries_.empty());
EntryInfo* entry_info = &entries_.front();
EntryInfo* end_entry_info = &entries_.back() + 1;
- for (int time_interval_index = 0;
- time_interval_index < time_intervals_.length();
- ++time_interval_index) {
+ for (size_t time_interval_index = 0;
+ time_interval_index < time_intervals_.size(); ++time_interval_index) {
TimeInterval& time_interval = time_intervals_[time_interval_index];
SnapshotObjectId time_interval_id = time_interval.id;
uint32_t entries_size = 0;
@@ -461,28 +456,28 @@ SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream,
static_cast<uint32_t>(entry_info - start_entry_info);
if (time_interval.count != entries_count ||
time_interval.size != entries_size) {
- stats_buffer.Add(v8::HeapStatsUpdate(
- time_interval_index,
- time_interval.count = entries_count,
- time_interval.size = entries_size));
- if (stats_buffer.length() >= prefered_chunk_size) {
+ stats_buffer.emplace_back(static_cast<uint32_t>(time_interval_index),
+ time_interval.count = entries_count,
+ time_interval.size = entries_size);
+ if (static_cast<int>(stats_buffer.size()) >= prefered_chunk_size) {
OutputStream::WriteResult result = stream->WriteHeapStatsChunk(
- &stats_buffer.first(), stats_buffer.length());
+ &stats_buffer.front(), static_cast<int>(stats_buffer.size()));
if (result == OutputStream::kAbort) return last_assigned_id();
- stats_buffer.Clear();
+ stats_buffer.clear();
}
}
}
DCHECK(entry_info == end_entry_info);
- if (!stats_buffer.is_empty()) {
+ if (!stats_buffer.empty()) {
OutputStream::WriteResult result = stream->WriteHeapStatsChunk(
- &stats_buffer.first(), stats_buffer.length());
+ &stats_buffer.front(), static_cast<int>(stats_buffer.size()));
if (result == OutputStream::kAbort) return last_assigned_id();
}
stream->EndOfStream();
if (timestamp_us) {
- *timestamp_us = (time_intervals_.last().timestamp -
- time_intervals_[0].timestamp).InMicroseconds();
+ *timestamp_us =
+ (time_intervals_.back().timestamp - time_intervals_.front().timestamp)
+ .InMicroseconds();
}
return last_assigned_id();
}
@@ -957,11 +952,6 @@ void V8HeapExplorer::ExtractJSObjectReferences(
TagCodeObject(js_fun->code());
SetInternalReference(js_fun, entry, "code", js_fun->code(),
JSFunction::kCodeOffset);
- // Ensure no new weak references appeared in JSFunction.
- STATIC_ASSERT(JSFunction::kNonWeakFieldsEndOffset ==
- JSFunction::kNextFunctionLinkOffset);
- STATIC_ASSERT(JSFunction::kNextFunctionLinkOffset + kPointerSize
- == JSFunction::kSize);
} else if (obj->IsJSGlobalObject()) {
JSGlobalObject* global_obj = JSGlobalObject::cast(obj);
SetInternalReference(global_obj, entry, "native_context",
@@ -1070,16 +1060,13 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
TagObject(context->normalized_map_cache(), "(context norm. map cache)");
TagObject(context->embedder_data(), "(context data)");
NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD)
- EXTRACT_CONTEXT_FIELD(OPTIMIZED_FUNCTIONS_LIST, unused,
- optimized_functions_list);
EXTRACT_CONTEXT_FIELD(OPTIMIZED_CODE_LIST, unused, optimized_code_list);
EXTRACT_CONTEXT_FIELD(DEOPTIMIZED_CODE_LIST, unused, deoptimized_code_list);
#undef EXTRACT_CONTEXT_FIELD
- STATIC_ASSERT(Context::OPTIMIZED_FUNCTIONS_LIST ==
- Context::FIRST_WEAK_SLOT);
+ STATIC_ASSERT(Context::OPTIMIZED_CODE_LIST == Context::FIRST_WEAK_SLOT);
STATIC_ASSERT(Context::NEXT_CONTEXT_LINK + 1 ==
Context::NATIVE_CONTEXT_SLOTS);
- STATIC_ASSERT(Context::FIRST_WEAK_SLOT + 4 ==
+ STATIC_ASSERT(Context::FIRST_WEAK_SLOT + 3 ==
Context::NATIVE_CONTEXT_SLOTS);
}
}
@@ -1115,8 +1102,6 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
TagObject(descriptors, "(map descriptors)");
SetInternalReference(map, entry, "descriptors", descriptors,
Map::kDescriptorsOffset);
- SetInternalReference(map, entry, "code_cache", map->code_cache(),
- Map::kCodeCacheOffset);
SetInternalReference(map, entry, "prototype", map->prototype(),
Map::kPrototypeOffset);
#if V8_DOUBLE_FIELDS_UNBOXING
@@ -1277,11 +1262,6 @@ void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
SetInternalReference(code, entry, "source_position_table",
code->source_position_table(),
Code::kSourcePositionTableOffset);
- if (code->kind() == Code::FUNCTION) {
- SetInternalReference(code, entry, "type_feedback_info",
- code->type_feedback_info(),
- Code::kTypeFeedbackInfoOffset);
- }
}
void V8HeapExplorer::ExtractCellReferences(int entry, Cell* cell) {
@@ -1528,9 +1508,9 @@ HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
class RootsReferencesExtractor : public RootVisitor {
private:
struct IndexTag {
- IndexTag(int index, VisitorSynchronization::SyncTag tag)
- : index(index), tag(tag) { }
- int index;
+ IndexTag(size_t index, VisitorSynchronization::SyncTag tag)
+ : index(index), tag(tag) {}
+ size_t index;
VisitorSynchronization::SyncTag tag;
};
@@ -1543,22 +1523,24 @@ class RootsReferencesExtractor : public RootVisitor {
void VisitRootPointers(Root root, Object** start, Object** end) override {
if (collecting_all_references_) {
- for (Object** p = start; p < end; p++) all_references_.Add(*p);
+ for (Object** p = start; p < end; p++) all_references_.push_back(*p);
} else {
- for (Object** p = start; p < end; p++) strong_references_.Add(*p);
+ for (Object** p = start; p < end; p++) strong_references_.push_back(*p);
}
}
void SetCollectingAllReferences() { collecting_all_references_ = true; }
void FillReferences(V8HeapExplorer* explorer) {
- DCHECK(strong_references_.length() <= all_references_.length());
+ DCHECK_LE(strong_references_.size(), all_references_.size());
Builtins* builtins = heap_->isolate()->builtins();
USE(builtins);
- int strong_index = 0, all_index = 0, tags_index = 0, builtin_index = 0;
- while (all_index < all_references_.length()) {
- bool is_strong = strong_index < strong_references_.length()
- && strong_references_[strong_index] == all_references_[all_index];
+ size_t strong_index = 0, all_index = 0, tags_index = 0;
+ int builtin_index = 0;
+ while (all_index < all_references_.size()) {
+ bool is_strong =
+ strong_index < strong_references_.size() &&
+ strong_references_[strong_index] == all_references_[all_index];
explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
!is_strong,
all_references_[all_index]);
@@ -1573,23 +1555,23 @@ class RootsReferencesExtractor : public RootVisitor {
if (is_strong) ++strong_index;
if (reference_tags_[tags_index].index == all_index) ++tags_index;
}
- CHECK_EQ(strong_index, strong_references_.length());
+ CHECK_EQ(strong_index, strong_references_.size());
}
void Synchronize(VisitorSynchronization::SyncTag tag) override {
if (collecting_all_references_ &&
- previous_reference_count_ != all_references_.length()) {
- previous_reference_count_ = all_references_.length();
- reference_tags_.Add(IndexTag(previous_reference_count_, tag));
+ previous_reference_count_ != all_references_.size()) {
+ previous_reference_count_ = all_references_.size();
+ reference_tags_.emplace_back(previous_reference_count_, tag);
}
}
private:
bool collecting_all_references_;
- List<Object*> strong_references_;
- List<Object*> all_references_;
- int previous_reference_count_;
- List<IndexTag> reference_tags_;
+ std::vector<Object*> strong_references_;
+ std::vector<Object*> all_references_;
+ size_t previous_reference_count_;
+ std::vector<IndexTag> reference_tags_;
Heap* heap_;
};
@@ -1684,9 +1666,6 @@ bool V8HeapExplorer::IsEssentialHiddenReference(Object* parent,
if (parent->IsAllocationSite() &&
field_offset == AllocationSite::kWeakNextOffset)
return false;
- if (parent->IsJSFunction() &&
- field_offset == JSFunction::kNextFunctionLinkOffset)
- return false;
if (parent->IsCode() && field_offset == Code::kNextCodeLinkOffset)
return false;
if (parent->IsContext() &&
@@ -2080,8 +2059,8 @@ NativeObjectsExplorer::~NativeObjectsExplorer() {
v8::RetainedObjectInfo* info =
reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
info->Dispose();
- List<HeapObject*>* objects =
- reinterpret_cast<List<HeapObject*>* >(p->value);
+ std::vector<HeapObject*>* objects =
+ reinterpret_cast<std::vector<HeapObject*>*>(p->value);
delete objects;
}
for (base::HashMap::Entry* p = native_groups_.Start(); p != NULL;
@@ -2107,7 +2086,7 @@ void NativeObjectsExplorer::FillRetainedObjects() {
v8::HeapProfiler::RetainerInfos infos =
snapshot_->profiler()->GetRetainerInfos(isolate_);
for (auto& pair : infos.groups) {
- List<HeapObject*>* list = GetListMaybeDisposeInfo(pair.first);
+ std::vector<HeapObject*>* info = GetVectorMaybeDisposeInfo(pair.first);
for (auto& persistent : pair.second) {
if (persistent->IsEmpty()) continue;
@@ -2115,7 +2094,7 @@ void NativeObjectsExplorer::FillRetainedObjects() {
*persistent->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
DCHECK(!object.is_null());
HeapObject* heap_object = HeapObject::cast(*object);
- list->Add(heap_object);
+ info->push_back(heap_object);
in_groups_.Insert(heap_object);
}
}
@@ -2151,16 +2130,16 @@ void NativeObjectsExplorer::FillEdges() {
edges_.clear();
}
-List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
+std::vector<HeapObject*>* NativeObjectsExplorer::GetVectorMaybeDisposeInfo(
v8::RetainedObjectInfo* info) {
base::HashMap::Entry* entry =
objects_by_info_.LookupOrInsert(info, InfoHash(info));
if (entry->value != NULL) {
info->Dispose();
} else {
- entry->value = new List<HeapObject*>(4);
+ entry->value = new std::vector<HeapObject*>();
}
- return reinterpret_cast<List<HeapObject*>* >(entry->value);
+ return reinterpret_cast<std::vector<HeapObject*>*>(entry->value);
}
@@ -2175,10 +2154,10 @@ bool NativeObjectsExplorer::IterateAndExtractReferences(
v8::RetainedObjectInfo* info =
reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
SetNativeRootReference(info);
- List<HeapObject*>* objects =
- reinterpret_cast<List<HeapObject*>* >(p->value);
- for (int i = 0; i < objects->length(); ++i) {
- SetWrapperNativeReferences(objects->at(i), info);
+ std::vector<HeapObject*>* objects =
+ reinterpret_cast<std::vector<HeapObject*>*>(p->value);
+ for (HeapObject* object : *objects) {
+ SetWrapperNativeReferences(object, info);
}
}
SetRootNativeRootsReference();
@@ -2289,7 +2268,7 @@ void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
v8::RetainedObjectInfo* info =
isolate->heap_profiler()->ExecuteWrapperClassCallback(class_id, p);
if (info == NULL) return;
- GetListMaybeDisposeInfo(info)->Add(HeapObject::cast(*p));
+ GetVectorMaybeDisposeInfo(info)->push_back(HeapObject::cast(*p));
}
@@ -2750,7 +2729,7 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
uint32_t count = 0;
AllocationTracker* tracker = snapshot_->profiler()->allocation_tracker();
if (tracker) {
- count = tracker->function_info_list().length();
+ count = static_cast<uint32_t>(tracker->function_info_list().size());
}
writer_->AddNumber(count);
}
@@ -2793,12 +2772,12 @@ void HeapSnapshotJSONSerializer::SerializeTraceNode(AllocationTraceNode* node) {
buffer[buffer_pos++] = '\0';
writer_->AddString(buffer.start());
- Vector<AllocationTraceNode*> children = node->children();
- for (int i = 0; i < children.length(); i++) {
- if (i > 0) {
+ int i = 0;
+ for (AllocationTraceNode* child : node->children()) {
+ if (i++ > 0) {
writer_->AddCharacter(',');
}
- SerializeTraceNode(children[i]);
+ SerializeTraceNode(child);
}
writer_->AddCharacter(']');
}
@@ -2825,12 +2804,10 @@ void HeapSnapshotJSONSerializer::SerializeTraceNodeInfos() {
6 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ 6 + 1 + 1;
EmbeddedVector<char, kBufferSize> buffer;
- const List<AllocationTracker::FunctionInfo*>& list =
- tracker->function_info_list();
- for (int i = 0; i < list.length(); i++) {
- AllocationTracker::FunctionInfo* info = list[i];
+ int i = 0;
+ for (AllocationTracker::FunctionInfo* info : tracker->function_info_list()) {
int buffer_pos = 0;
- if (i > 0) {
+ if (i++ > 0) {
buffer[buffer_pos++] = ',';
}
buffer_pos = utoa(info->function_id, buffer, buffer_pos);
@@ -2854,9 +2831,9 @@ void HeapSnapshotJSONSerializer::SerializeTraceNodeInfos() {
void HeapSnapshotJSONSerializer::SerializeSamples() {
- const List<HeapObjectsMap::TimeInterval>& samples =
+ const std::vector<HeapObjectsMap::TimeInterval>& samples =
snapshot_->profiler()->heap_object_map()->samples();
- if (samples.is_empty()) return;
+ if (samples.empty()) return;
base::TimeTicks start_time = samples[0].timestamp;
// The buffer needs space for 2 unsigned ints, 2 commas, \n and \0
const int kBufferSize = MaxDecimalDigitsIn<sizeof(
@@ -2864,10 +2841,10 @@ void HeapSnapshotJSONSerializer::SerializeSamples() {
MaxDecimalDigitsIn<sizeof(samples[0].id)>::kUnsigned +
2 + 1 + 1;
EmbeddedVector<char, kBufferSize> buffer;
- for (int i = 0; i < samples.length(); i++) {
- HeapObjectsMap::TimeInterval& sample = samples[i];
+ int i = 0;
+ for (const HeapObjectsMap::TimeInterval& sample : samples) {
int buffer_pos = 0;
- if (i > 0) {
+ if (i++ > 0) {
buffer[buffer_pos++] = ',';
}
base::TimeDelta time_delta = sample.timestamp - start_time;
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index b40400aa7d..ec2460b922 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -7,6 +7,7 @@
#include <deque>
#include <unordered_map>
+#include <vector>
#include "include/v8-profiler.h"
#include "src/base/platform/time.h"
@@ -233,7 +234,7 @@ class HeapObjectsMap {
void StopHeapObjectsTracking();
SnapshotObjectId PushHeapObjectsStats(OutputStream* stream,
int64_t* timestamp_us);
- const List<TimeInterval>& samples() const { return time_intervals_; }
+ const std::vector<TimeInterval>& samples() const { return time_intervals_; }
SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
@@ -260,7 +261,7 @@ class HeapObjectsMap {
SnapshotObjectId next_id_;
base::HashMap entries_map_;
std::vector<EntryInfo> entries_;
- List<TimeInterval> time_intervals_;
+ std::vector<TimeInterval> time_intervals_;
Heap* heap_;
DISALLOW_COPY_AND_ASSIGN(HeapObjectsMap);
@@ -489,7 +490,8 @@ class NativeObjectsExplorer {
private:
void FillRetainedObjects();
void FillEdges();
- List<HeapObject*>* GetListMaybeDisposeInfo(v8::RetainedObjectInfo* info);
+ std::vector<HeapObject*>* GetVectorMaybeDisposeInfo(
+ v8::RetainedObjectInfo* info);
void SetNativeRootReference(v8::RetainedObjectInfo* info);
void SetRootNativeRootsReference();
void SetWrapperNativeReferences(HeapObject* wrapper,
@@ -516,7 +518,7 @@ class NativeObjectsExplorer {
StringsStorage* names_;
bool embedder_queried_;
HeapObjectsSet in_groups_;
- // RetainedObjectInfo* -> List<HeapObject*>*
+ // RetainedObjectInfo* -> std::vector<HeapObject*>*
base::CustomMatcherHashMap objects_by_info_;
base::CustomMatcherHashMap native_groups_;
HeapEntriesAllocator* synthetic_entries_allocator_;
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 029b6826ec..c84f3662c8 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -361,22 +361,22 @@ class Position {
// Non-recursive implementation of a depth-first post-order tree traversal.
template <typename Callback>
void ProfileTree::TraverseDepthFirst(Callback* callback) {
- List<Position> stack(10);
- stack.Add(Position(root_));
- while (stack.length() > 0) {
- Position& current = stack.last();
+ std::vector<Position> stack;
+ stack.emplace_back(root_);
+ while (stack.size() > 0) {
+ Position& current = stack.back();
if (current.has_current_child()) {
callback->BeforeTraversingChild(current.node, current.current_child());
- stack.Add(Position(current.current_child()));
+ stack.emplace_back(current.current_child());
} else {
callback->AfterAllChildrenTraversed(current.node);
- if (stack.length() > 1) {
- Position& parent = stack[stack.length() - 2];
+ if (stack.size() > 1) {
+ Position& parent = stack[stack.size() - 2];
callback->AfterChildTraversed(parent.node, current.node);
parent.next_child();
}
// Remove child from the stack.
- stack.RemoveLast();
+ stack.pop_back();
}
}
}
@@ -404,12 +404,12 @@ void CpuProfile::AddPath(base::TimeTicks timestamp,
ProfileNode* top_frame_node =
top_down_.AddPathFromEnd(path, src_line, update_stats);
if (record_samples_ && !timestamp.IsNull()) {
- timestamps_.Add(timestamp);
- samples_.Add(top_frame_node);
+ timestamps_.push_back(timestamp);
+ samples_.push_back(top_frame_node);
}
const int kSamplesFlushCount = 100;
const int kNodesFlushCount = 10;
- if (samples_.length() - streaming_next_sample_ >= kSamplesFlushCount ||
+ if (samples_.size() - streaming_next_sample_ >= kSamplesFlushCount ||
top_down_.pending_nodes_count() >= kNodesFlushCount) {
StreamPendingTraceEvents();
}
@@ -446,10 +446,10 @@ void BuildNodeValue(const ProfileNode* node, TracedValue* value) {
void CpuProfile::StreamPendingTraceEvents() {
std::vector<const ProfileNode*> pending_nodes = top_down_.TakePendingNodes();
- if (pending_nodes.empty() && !samples_.length()) return;
+ if (pending_nodes.empty() && samples_.empty()) return;
auto value = TracedValue::Create();
- if (!pending_nodes.empty() || streaming_next_sample_ != samples_.length()) {
+ if (!pending_nodes.empty() || streaming_next_sample_ != samples_.size()) {
value->BeginDictionary("cpuProfile");
if (!pending_nodes.empty()) {
value->BeginArray("nodes");
@@ -460,28 +460,28 @@ void CpuProfile::StreamPendingTraceEvents() {
}
value->EndArray();
}
- if (streaming_next_sample_ != samples_.length()) {
+ if (streaming_next_sample_ != samples_.size()) {
value->BeginArray("samples");
- for (int i = streaming_next_sample_; i < samples_.length(); ++i) {
+ for (size_t i = streaming_next_sample_; i < samples_.size(); ++i) {
value->AppendInteger(samples_[i]->id());
}
value->EndArray();
}
value->EndDictionary();
}
- if (streaming_next_sample_ != samples_.length()) {
+ if (streaming_next_sample_ != samples_.size()) {
value->BeginArray("timeDeltas");
base::TimeTicks lastTimestamp =
streaming_next_sample_ ? timestamps_[streaming_next_sample_ - 1]
: start_time();
- for (int i = streaming_next_sample_; i < timestamps_.length(); ++i) {
+ for (size_t i = streaming_next_sample_; i < timestamps_.size(); ++i) {
value->AppendInteger(
static_cast<int>((timestamps_[i] - lastTimestamp).InMicroseconds()));
lastTimestamp = timestamps_[i];
}
value->EndArray();
- DCHECK(samples_.length() == timestamps_.length());
- streaming_next_sample_ = samples_.length();
+ DCHECK_EQ(samples_.size(), timestamps_.size());
+ streaming_next_sample_ = samples_.size();
}
TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
@@ -547,33 +547,28 @@ CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
profiler_(nullptr),
current_profiles_semaphore_(1) {}
-static void DeleteCpuProfile(CpuProfile** profile_ptr) {
- delete *profile_ptr;
-}
-
-
CpuProfilesCollection::~CpuProfilesCollection() {
- finished_profiles_.Iterate(DeleteCpuProfile);
- current_profiles_.Iterate(DeleteCpuProfile);
+ for (CpuProfile* profile : finished_profiles_) delete profile;
+ for (CpuProfile* profile : current_profiles_) delete profile;
}
bool CpuProfilesCollection::StartProfiling(const char* title,
bool record_samples) {
current_profiles_semaphore_.Wait();
- if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
+ if (static_cast<int>(current_profiles_.size()) >= kMaxSimultaneousProfiles) {
current_profiles_semaphore_.Signal();
return false;
}
- for (int i = 0; i < current_profiles_.length(); ++i) {
- if (strcmp(current_profiles_[i]->title(), title) == 0) {
+ for (CpuProfile* profile : current_profiles_) {
+ if (strcmp(profile->title(), title) == 0) {
// Ignore attempts to start profile with the same title...
current_profiles_semaphore_.Signal();
// ... though return true to force it collect a sample.
return true;
}
}
- current_profiles_.Add(new CpuProfile(profiler_, title, record_samples));
+ current_profiles_.push_back(new CpuProfile(profiler_, title, record_samples));
current_profiles_semaphore_.Signal();
return true;
}
@@ -583,9 +578,11 @@ CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
const int title_len = StrLength(title);
CpuProfile* profile = nullptr;
current_profiles_semaphore_.Wait();
- for (int i = current_profiles_.length() - 1; i >= 0; --i) {
- if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
- profile = current_profiles_.Remove(i);
+ for (size_t i = current_profiles_.size(); i != 0; --i) {
+ CpuProfile* current_profile = current_profiles_[i - 1];
+ if (title_len == 0 || strcmp(current_profile->title(), title) == 0) {
+ profile = current_profile;
+ current_profiles_.erase(current_profiles_.begin() + i - 1);
break;
}
}
@@ -593,7 +590,7 @@ CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
if (!profile) return nullptr;
profile->FinishProfile();
- finished_profiles_.Add(profile);
+ finished_profiles_.push_back(profile);
return profile;
}
@@ -601,7 +598,7 @@ CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
bool CpuProfilesCollection::IsLastProfile(const char* title) {
// Called from VM thread, and only it can mutate the list,
// so no locking is needed here.
- if (current_profiles_.length() != 1) return false;
+ if (current_profiles_.size() != 1) return false;
return StrLength(title) == 0
|| strcmp(current_profiles_[0]->title(), title) == 0;
}
@@ -609,13 +606,10 @@ bool CpuProfilesCollection::IsLastProfile(const char* title) {
void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
// Called from VM thread for a completed profile.
- for (int i = 0; i < finished_profiles_.length(); i++) {
- if (profile == finished_profiles_[i]) {
- finished_profiles_.Remove(i);
- return;
- }
- }
- UNREACHABLE();
+ auto pos =
+ std::find(finished_profiles_.begin(), finished_profiles_.end(), profile);
+ DCHECK(pos != finished_profiles_.end());
+ finished_profiles_.erase(pos);
}
void CpuProfilesCollection::AddPathToCurrentProfiles(
@@ -625,8 +619,8 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
current_profiles_semaphore_.Wait();
- for (int i = 0; i < current_profiles_.length(); ++i) {
- current_profiles_[i]->AddPath(timestamp, path, src_line, update_stats);
+ for (CpuProfile* profile : current_profiles_) {
+ profile->AddPath(timestamp, path, src_line, update_stats);
}
current_profiles_semaphore_.Signal();
}
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index ddd34b00a4..69a85a1422 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -6,6 +6,8 @@
#define V8_PROFILER_PROFILE_GENERATOR_H_
#include <map>
+#include <vector>
+
#include "src/allocation.h"
#include "src/base/hashmap.h"
#include "src/log.h"
@@ -278,7 +280,7 @@ class CpuProfile {
const char* title() const { return title_; }
const ProfileTree* top_down() const { return &top_down_; }
- int samples_count() const { return samples_.length(); }
+ int samples_count() const { return static_cast<int>(samples_.size()); }
ProfileNode* sample(int index) const { return samples_.at(index); }
base::TimeTicks sample_timestamp(int index) const {
return timestamps_.at(index);
@@ -299,11 +301,11 @@ class CpuProfile {
bool record_samples_;
base::TimeTicks start_time_;
base::TimeTicks end_time_;
- List<ProfileNode*> samples_;
- List<base::TimeTicks> timestamps_;
+ std::vector<ProfileNode*> samples_;
+ std::vector<base::TimeTicks> timestamps_;
ProfileTree top_down_;
CpuProfiler* const profiler_;
- int streaming_next_sample_;
+ size_t streaming_next_sample_;
DISALLOW_COPY_AND_ASSIGN(CpuProfile);
};
@@ -340,7 +342,7 @@ class CpuProfilesCollection {
void set_cpu_profiler(CpuProfiler* profiler) { profiler_ = profiler; }
bool StartProfiling(const char* title, bool record_samples);
CpuProfile* StopProfiling(const char* title);
- List<CpuProfile*>* profiles() { return &finished_profiles_; }
+ std::vector<CpuProfile*>* profiles() { return &finished_profiles_; }
const char* GetName(Name* name) { return resource_names_.GetName(name); }
bool IsLastProfile(const char* title);
void RemoveProfile(CpuProfile* profile);
@@ -355,11 +357,11 @@ class CpuProfilesCollection {
private:
StringsStorage resource_names_;
- List<CpuProfile*> finished_profiles_;
+ std::vector<CpuProfile*> finished_profiles_;
CpuProfiler* profiler_;
// Accessed by VM thread and profile generator thread.
- List<CpuProfile*> current_profiles_;
+ std::vector<CpuProfile*> current_profiles_;
base::Semaphore current_profiles_semaphore_;
DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
diff --git a/deps/v8/src/property-descriptor.cc b/deps/v8/src/property-descriptor.cc
index 70ddd5d521..1593ce5ed1 100644
--- a/deps/v8/src/property-descriptor.cc
+++ b/deps/v8/src/property-descriptor.cc
@@ -9,6 +9,7 @@
#include "src/isolate-inl.h"
#include "src/lookup.h"
#include "src/objects-inl.h"
+#include "src/objects/property-descriptor-object-inl.h"
namespace v8 {
namespace internal {
@@ -341,5 +342,33 @@ void PropertyDescriptor::CompletePropertyDescriptor(Isolate* isolate,
// 8. Return Desc.
}
+Handle<PropertyDescriptorObject> PropertyDescriptor::ToPropertyDescriptorObject(
+ Isolate* isolate) {
+ Handle<PropertyDescriptorObject> obj = Handle<PropertyDescriptorObject>::cast(
+ isolate->factory()->NewFixedArray(PropertyDescriptorObject::kLength));
+
+ int flags =
+ PropertyDescriptorObject::IsEnumerableBit::encode(enumerable_) |
+ PropertyDescriptorObject::HasEnumerableBit::encode(has_enumerable_) |
+ PropertyDescriptorObject::IsConfigurableBit::encode(configurable_) |
+ PropertyDescriptorObject::HasConfigurableBit::encode(has_configurable_) |
+ PropertyDescriptorObject::IsWritableBit::encode(writable_) |
+ PropertyDescriptorObject::HasWritableBit::encode(has_writable_) |
+ PropertyDescriptorObject::HasValueBit::encode(has_value()) |
+ PropertyDescriptorObject::HasGetBit::encode(has_get()) |
+ PropertyDescriptorObject::HasSetBit::encode(has_set());
+
+ obj->set(PropertyDescriptorObject::kFlagsIndex, Smi::FromInt(flags));
+
+ obj->set(PropertyDescriptorObject::kValueIndex,
+ has_value() ? *value_ : isolate->heap()->the_hole_value());
+ obj->set(PropertyDescriptorObject::kGetIndex,
+ has_get() ? *get_ : isolate->heap()->the_hole_value());
+ obj->set(PropertyDescriptorObject::kSetIndex,
+ has_set() ? *set_ : isolate->heap()->the_hole_value());
+
+ return obj;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/property-descriptor.h b/deps/v8/src/property-descriptor.h
index cba43ed334..f5f5f7ad4a 100644
--- a/deps/v8/src/property-descriptor.h
+++ b/deps/v8/src/property-descriptor.h
@@ -15,6 +15,7 @@ namespace internal {
class Isolate;
class Object;
+class PropertyDescriptorObject;
class PropertyDescriptor {
public:
@@ -44,6 +45,8 @@ class PropertyDescriptor {
// ES6 6.2.4.4
Handle<Object> ToObject(Isolate* isolate);
+ Handle<PropertyDescriptorObject> ToPropertyDescriptorObject(Isolate* isolate);
+
// ES6 6.2.4.5
static bool ToPropertyDescriptor(Isolate* isolate, Handle<Object> obj,
PropertyDescriptor* desc);
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 11a6bade88..146312905c 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -139,8 +139,8 @@ void RegExpMacroAssemblerARM::AdvanceCurrentPosition(int by) {
void RegExpMacroAssemblerARM::AdvanceRegister(int reg, int by) {
- DCHECK(reg >= 0);
- DCHECK(reg < num_registers_);
+ DCHECK_LE(0, reg);
+ DCHECK_GT(num_registers_, reg);
if (by != 0) {
__ ldr(r0, register_location(reg));
__ add(r0, r0, Operand(by));
@@ -448,7 +448,7 @@ void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
uc16 minus,
uc16 mask,
Label* on_not_equal) {
- DCHECK(minus < String::kMaxUtf16CodeUnit);
+ DCHECK_GT(String::kMaxUtf16CodeUnit, minus);
__ sub(r0, current_character(), Operand(minus));
__ and_(r0, r0, Operand(mask));
__ cmp(r0, Operand(c));
@@ -887,8 +887,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
- Handle<Code> code = isolate()->factory()->NewCode(
- code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+ Handle<Code> code = isolate()->factory()->NewCode(code_desc, Code::REGEXP,
+ masm_->CodeObject());
PROFILE(masm_->isolate(),
RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
@@ -1071,7 +1071,7 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState() {
// Drop the return address from the stack.
__ add(sp, sp, Operand(stack_alignment));
- DCHECK(stack_alignment != 0);
+ DCHECK_NE(0, stack_alignment);
__ ldr(sp, MemOperand(sp, 0));
__ mov(code_pointer(), Operand(masm_->CodeObject()));
@@ -1165,14 +1165,14 @@ void RegExpMacroAssemblerARM::SafeCallTarget(Label* name) {
void RegExpMacroAssemblerARM::Push(Register source) {
- DCHECK(!source.is(backtrack_stackpointer()));
+ DCHECK(source != backtrack_stackpointer());
__ str(source,
MemOperand(backtrack_stackpointer(), kPointerSize, NegPreIndex));
}
void RegExpMacroAssemblerARM::Pop(Register target) {
- DCHECK(!target.is(backtrack_stackpointer()));
+ DCHECK(target != backtrack_stackpointer());
__ ldr(target,
MemOperand(backtrack_stackpointer(), kPointerSize, PostIndex));
}
@@ -1212,7 +1212,7 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
// If unaligned load/stores are not supported then this function must only
// be used to load a single character at a time.
if (!CanReadUnaligned()) {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
}
if (mode_ == LATIN1) {
@@ -1221,7 +1221,7 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
} else if (characters == 2) {
__ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
} else {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
__ ldrb(current_character(), MemOperand(end_of_input_address(), offset));
}
} else {
@@ -1229,7 +1229,7 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
if (characters == 2) {
__ ldr(current_character(), MemOperand(end_of_input_address(), offset));
} else {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
__ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
}
}
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index e8887b2694..5e3f37588f 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -154,7 +154,6 @@ void RegExpMacroAssemblerARM64::AdvanceCurrentPosition(int by) {
void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) {
DCHECK((reg >= 0) && (reg < num_registers_));
if (by != 0) {
- Register to_advance;
RegisterState register_state = GetRegisterState(reg);
switch (register_state) {
case STACKED:
@@ -162,15 +161,17 @@ void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) {
__ Add(w10, w10, by);
__ Str(w10, register_location(reg));
break;
- case CACHED_LSW:
- to_advance = GetCachedRegister(reg);
+ case CACHED_LSW: {
+ Register to_advance = GetCachedRegister(reg);
__ Add(to_advance, to_advance, by);
break;
- case CACHED_MSW:
- to_advance = GetCachedRegister(reg);
+ }
+ case CACHED_MSW: {
+ Register to_advance = GetCachedRegister(reg);
__ Add(to_advance, to_advance,
static_cast<int64_t>(by) << kWRegSizeInBits);
break;
+ }
default:
UNREACHABLE();
break;
@@ -247,7 +248,7 @@ void RegExpMacroAssemblerARM64::CheckCharacters(Vector<const uc16> str,
for (int i = 0; i < str.length(); i++) {
if (mode_ == LATIN1) {
__ Ldrb(w10, MemOperand(characters_address, 1, PostIndex));
- DCHECK(str[i] <= String::kMaxOneByteCharCode);
+ DCHECK_GE(String::kMaxOneByteCharCode, str[i]);
} else {
__ Ldrh(w10, MemOperand(characters_address, 2, PostIndex));
}
@@ -277,7 +278,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
DCHECK(kCalleeSaved.IncludesAliasOf(capture_length));
// Find length of back-referenced capture.
- DCHECK((start_reg % 2) == 0);
+ DCHECK_EQ(0, start_reg % 2);
if (start_reg < kNumCachedRegisters) {
__ Mov(capture_start_offset.X(), GetCachedRegister(start_reg));
__ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
@@ -373,7 +374,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
// The cached registers need to be retained.
CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
- DCHECK((cached_registers.Count() * 2) == kNumCachedRegisters);
+ DCHECK_EQ(kNumCachedRegisters, cached_registers.Count() * 2);
__ PushCPURegList(cached_registers);
// Put arguments into arguments registers.
@@ -438,7 +439,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReference(int start_reg,
Register capture_length = w15;
// Find length of back-referenced capture.
- DCHECK((start_reg % 2) == 0);
+ DCHECK_EQ(0, start_reg % 2);
if (start_reg < kNumCachedRegisters) {
__ Mov(x10, GetCachedRegister(start_reg));
__ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
@@ -535,7 +536,7 @@ void RegExpMacroAssemblerARM64::CheckNotCharacterAfterMinusAnd(
uc16 minus,
uc16 mask,
Label* on_not_equal) {
- DCHECK(minus < String::kMaxUtf16CodeUnit);
+ DCHECK_GT(String::kMaxUtf16CodeUnit, minus);
__ Sub(w10, current_character(), minus);
__ And(w10, w10, mask);
CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
@@ -717,7 +718,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
CPURegList argument_registers(x0, x5, x6, x7);
CPURegList registers_to_retain = kCalleeSaved;
- DCHECK(kCalleeSaved.Count() == 11);
+ DCHECK_EQ(11, kCalleeSaved.Count());
registers_to_retain.Combine(lr);
DCHECK(csp.Is(__ StackPointer()));
@@ -1029,7 +1030,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Registers x0 to x7 are used to store the first captures, they need to be
// retained over calls to C++ code.
CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
- DCHECK((cached_registers.Count() * 2) == kNumCachedRegisters);
+ DCHECK_EQ(kNumCachedRegisters, cached_registers.Count() * 2);
if (check_preempt_label_.is_linked()) {
__ Bind(&check_preempt_label_);
@@ -1079,8 +1080,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
- Handle<Code> code = isolate()->factory()->NewCode(
- code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+ Handle<Code> code = isolate()->factory()->NewCode(code_desc, Code::REGEXP,
+ masm_->CodeObject());
PROFILE(masm_->isolate(),
RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
@@ -1179,19 +1180,17 @@ void RegExpMacroAssemblerARM64::PushRegister(int register_index,
void RegExpMacroAssemblerARM64::ReadCurrentPositionFromRegister(int reg) {
- Register cached_register;
RegisterState register_state = GetRegisterState(reg);
switch (register_state) {
case STACKED:
__ Ldr(current_input_offset(), register_location(reg));
break;
case CACHED_LSW:
- cached_register = GetCachedRegister(reg);
- __ Mov(current_input_offset(), cached_register.W());
+ __ Mov(current_input_offset(), GetCachedRegister(reg).W());
break;
case CACHED_MSW:
- cached_register = GetCachedRegister(reg);
- __ Lsr(current_input_offset().X(), cached_register, kWRegSizeInBits);
+ __ Lsr(current_input_offset().X(), GetCachedRegister(reg),
+ kWRegSizeInBits);
break;
default:
UNREACHABLE();
@@ -1276,7 +1275,7 @@ void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
if (num_registers > 0) {
// If there are some remaining registers, they are stored on the stack.
- DCHECK(reg_from >= kNumCachedRegisters);
+ DCHECK_LE(kNumCachedRegisters, reg_from);
// Move down the indexes of the registers on stack to get the correct offset
// in memory.
@@ -1483,7 +1482,7 @@ void RegExpMacroAssemblerARM64::Pop(Register target) {
Register RegExpMacroAssemblerARM64::GetCachedRegister(int register_index) {
- DCHECK(register_index < kNumCachedRegisters);
+ DCHECK_GT(kNumCachedRegisters, register_index);
return Register::Create(register_index / 2, kXRegSizeInBits);
}
@@ -1491,11 +1490,11 @@ Register RegExpMacroAssemblerARM64::GetCachedRegister(int register_index) {
Register RegExpMacroAssemblerARM64::GetRegister(int register_index,
Register maybe_result) {
DCHECK(maybe_result.Is32Bits());
- DCHECK(register_index >= 0);
+ DCHECK_LE(0, register_index);
if (num_registers_ <= register_index) {
num_registers_ = register_index + 1;
}
- Register result;
+ Register result = NoReg;
RegisterState register_state = GetRegisterState(register_index);
switch (register_state) {
case STACKED:
@@ -1522,27 +1521,28 @@ Register RegExpMacroAssemblerARM64::GetRegister(int register_index,
void RegExpMacroAssemblerARM64::StoreRegister(int register_index,
Register source) {
DCHECK(source.Is32Bits());
- DCHECK(register_index >= 0);
+ DCHECK_LE(0, register_index);
if (num_registers_ <= register_index) {
num_registers_ = register_index + 1;
}
- Register cached_register;
RegisterState register_state = GetRegisterState(register_index);
switch (register_state) {
case STACKED:
__ Str(source, register_location(register_index));
break;
- case CACHED_LSW:
- cached_register = GetCachedRegister(register_index);
+ case CACHED_LSW: {
+ Register cached_register = GetCachedRegister(register_index);
if (!source.Is(cached_register.W())) {
__ Bfi(cached_register, source.X(), 0, kWRegSizeInBits);
}
break;
- case CACHED_MSW:
- cached_register = GetCachedRegister(register_index);
+ }
+ case CACHED_MSW: {
+ Register cached_register = GetCachedRegister(register_index);
__ Bfi(cached_register, source.X(), kWRegSizeInBits, kWRegSizeInBits);
break;
+ }
default:
UNREACHABLE();
break;
@@ -1574,7 +1574,7 @@ void RegExpMacroAssemblerARM64::SaveLinkRegister() {
MemOperand RegExpMacroAssemblerARM64::register_location(int register_index) {
DCHECK(register_index < (1<<30));
- DCHECK(register_index >= kNumCachedRegisters);
+ DCHECK_LE(kNumCachedRegisters, register_index);
if (num_registers_ <= register_index) {
num_registers_ = register_index + 1;
}
@@ -1587,7 +1587,7 @@ MemOperand RegExpMacroAssemblerARM64::capture_location(int register_index,
Register scratch) {
DCHECK(register_index < (1<<30));
DCHECK(register_index < num_saved_registers_);
- DCHECK(register_index >= kNumCachedRegisters);
+ DCHECK_LE(kNumCachedRegisters, register_index);
DCHECK_EQ(register_index % 2, 0);
register_index -= kNumCachedRegisters;
int offset = kFirstCaptureOnStack - register_index * kWRegSize;
@@ -1614,7 +1614,7 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
// disable it.
// TODO(pielan): See whether or not we should disable unaligned accesses.
if (!CanReadUnaligned()) {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
}
if (cp_offset != 0) {
@@ -1636,7 +1636,7 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
} else if (characters == 2) {
__ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
} else {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
__ Ldrb(current_character(), MemOperand(input_end(), offset, SXTW));
}
} else {
@@ -1644,7 +1644,7 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
if (characters == 2) {
__ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
} else {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
__ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
}
}
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index 614be624a9..87ccf2aa8b 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -232,7 +232,7 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
};
RegisterState GetRegisterState(int register_index) {
- DCHECK(register_index >= 0);
+ DCHECK_LE(0, register_index);
if (register_index >= kNumCachedRegisters) {
return STACKED;
} else {
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 35008b7b8a..28dab0b357 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -6,8 +6,10 @@
#include "src/regexp/ia32/regexp-macro-assembler-ia32.h"
+#include "src/assembler-inl.h"
#include "src/log.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
@@ -123,8 +125,8 @@ void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) {
void RegExpMacroAssemblerIA32::AdvanceRegister(int reg, int by) {
- DCHECK(reg >= 0);
- DCHECK(reg < num_registers_);
+ DCHECK_LE(0, reg);
+ DCHECK_GT(num_registers_, reg);
if (by != 0) {
__ add(register_location(reg), Immediate(by));
}
@@ -469,7 +471,7 @@ void RegExpMacroAssemblerIA32::CheckNotCharacterAfterMinusAnd(
uc16 minus,
uc16 mask,
Label* on_not_equal) {
- DCHECK(minus < String::kMaxUtf16CodeUnit);
+ DCHECK_GT(String::kMaxUtf16CodeUnit, minus);
__ lea(eax, Operand(current_character(), -minus));
if (c == 0) {
__ test(eax, Immediate(mask));
@@ -933,10 +935,8 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(masm_->isolate(), &code_desc);
- Handle<Code> code =
- isolate()->factory()->NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
+ Handle<Code> code = isolate()->factory()->NewCode(code_desc, Code::REGEXP,
+ masm_->CodeObject());
PROFILE(masm_->isolate(),
RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
@@ -1190,7 +1190,7 @@ void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) {
void RegExpMacroAssemblerIA32::Push(Register source) {
- DCHECK(!source.is(backtrack_stackpointer()));
+ DCHECK(source != backtrack_stackpointer());
// Notice: This updates flags, unlike normal Push.
__ sub(backtrack_stackpointer(), Immediate(kPointerSize));
__ mov(Operand(backtrack_stackpointer(), 0), source);
@@ -1205,7 +1205,7 @@ void RegExpMacroAssemblerIA32::Push(Immediate value) {
void RegExpMacroAssemblerIA32::Pop(Register target) {
- DCHECK(!target.is(backtrack_stackpointer()));
+ DCHECK(target != backtrack_stackpointer());
__ mov(target, Operand(backtrack_stackpointer(), 0));
// Notice: This updates flags, unlike normal Pop.
__ add(backtrack_stackpointer(), Immediate(kPointerSize));
@@ -1247,7 +1247,7 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
} else if (characters == 2) {
__ movzx_w(current_character(), Operand(esi, edi, times_1, cp_offset));
} else {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
__ movzx_b(current_character(), Operand(esi, edi, times_1, cp_offset));
}
} else {
@@ -1256,7 +1256,7 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
__ mov(current_character(),
Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
} else {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
__ movzx_w(current_character(),
Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
}
diff --git a/deps/v8/src/regexp/interpreter-irregexp.cc b/deps/v8/src/regexp/interpreter-irregexp.cc
index 83dca70804..7ba028020b 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.cc
+++ b/deps/v8/src/regexp/interpreter-irregexp.cc
@@ -114,13 +114,13 @@ static void TraceInterpreter(const byte* code_base,
static int32_t Load32Aligned(const byte* pc) {
- DCHECK((reinterpret_cast<intptr_t>(pc) & 3) == 0);
+ DCHECK_EQ(0, reinterpret_cast<intptr_t>(pc) & 3);
return *reinterpret_cast<const int32_t *>(pc);
}
static int32_t Load16Aligned(const byte* pc) {
- DCHECK((reinterpret_cast<intptr_t>(pc) & 1) == 0);
+ DCHECK_EQ(0, reinterpret_cast<intptr_t>(pc) & 1);
return *reinterpret_cast<const uint16_t *>(pc);
}
@@ -300,7 +300,7 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
break;
}
BYTECODE(LOAD_4_CURRENT_CHARS) {
- DCHECK(sizeof(Char) == 1);
+ DCHECK_EQ(1, sizeof(Char));
int pos = current + (insn >> BYTECODE_SHIFT);
if (pos + 4 > subject.length() || pos < 0) {
pc = code_base + Load32Aligned(pc + 4);
@@ -317,7 +317,7 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
break;
}
BYTECODE(LOAD_4_CURRENT_CHARS_UNCHECKED) {
- DCHECK(sizeof(Char) == 1);
+ DCHECK_EQ(1, sizeof(Char));
int pos = current + (insn >> BYTECODE_SHIFT);
Char next1 = subject[pos + 1];
Char next2 = subject[pos + 2];
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index 5751764592..52ed47cf53 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -5,6 +5,7 @@
#include "src/regexp/jsregexp.h"
#include <memory>
+#include <vector>
#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
@@ -25,6 +26,7 @@
#include "src/splay-tree-inl.h"
#include "src/string-search.h"
#include "src/unicode-decoder.h"
+#include "src/unicode-inl.h"
#ifdef V8_INTL_SUPPORT
#include "unicode/uniset.h"
@@ -77,8 +79,8 @@ ContainedInLattice AddRange(ContainedInLattice containment,
const int* ranges,
int ranges_length,
Interval new_range) {
- DCHECK((ranges_length & 1) == 1);
- DCHECK(ranges[ranges_length - 1] == String::kMaxCodePoint + 1);
+ DCHECK_EQ(1, ranges_length & 1);
+ DCHECK_EQ(String::kMaxCodePoint + 1, ranges[ranges_length - 1]);
if (containment == kLatticeUnknown) return containment;
bool inside = false;
int last = 0;
@@ -208,8 +210,8 @@ int RegExpImpl::AtomExecRaw(Handle<JSRegExp> regexp,
int output_size) {
Isolate* isolate = regexp->GetIsolate();
- DCHECK(0 <= index);
- DCHECK(index <= subject->length());
+ DCHECK_LE(0, index);
+ DCHECK_LE(index, subject->length());
subject = String::Flatten(subject);
DisallowHeapAllocation no_gc; // ensure vectors stay valid
@@ -430,8 +432,8 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()), isolate);
- DCHECK(index >= 0);
- DCHECK(index <= subject->length());
+ DCHECK_LE(0, index);
+ DCHECK_LE(index, subject->length());
DCHECK(subject->IsFlat());
bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
@@ -640,7 +642,7 @@ RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
// to the compiled regexp.
current_match_index_ = max_matches_ - 1;
num_matches_ = max_matches_;
- DCHECK(registers_per_match_ >= 2); // Each match has at least one capture.
+ DCHECK_LE(2, registers_per_match_); // Each match has at least one capture.
DCHECK_GE(register_array_size_, registers_per_match_);
int32_t* last_match =
&register_array_[current_match_index_ * registers_per_match_];
@@ -946,7 +948,7 @@ class RegExpCompiler {
inline void AddWork(RegExpNode* node) {
if (!node->on_work_list() && !node->label()->is_bound()) {
node->set_on_work_list(true);
- work_list_->Add(node);
+ work_list_->push_back(node);
}
}
@@ -997,7 +999,7 @@ class RegExpCompiler {
int next_register_;
int unicode_lookaround_stack_register_;
int unicode_lookaround_position_register_;
- List<RegExpNode*>* work_list_;
+ std::vector<RegExpNode*>* work_list_;
int recursion_depth_;
RegExpMacroAssembler* macro_assembler_;
JSRegExp::Flags flags_;
@@ -1049,7 +1051,7 @@ RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
isolate_(isolate),
zone_(zone) {
accept_ = new(zone) EndNode(EndNode::ACCEPT, zone);
- DCHECK(next_register_ - 1 <= RegExpMacroAssembler::kMaxRegister);
+ DCHECK_GE(RegExpMacroAssembler::kMaxRegister, next_register_ - 1);
}
@@ -1067,7 +1069,7 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
#endif
macro_assembler_ = macro_assembler;
- List <RegExpNode*> work_list(0);
+ std::vector<RegExpNode*> work_list;
work_list_ = &work_list;
Label fail;
macro_assembler_->PushBacktrack(&fail);
@@ -1075,8 +1077,9 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
start->Emit(this, &new_trace);
macro_assembler_->Bind(&fail);
macro_assembler_->Fail();
- while (!work_list.is_empty()) {
- RegExpNode* node = work_list.RemoveLast();
+ while (!work_list.empty()) {
+ RegExpNode* node = work_list.back();
+ work_list.pop_back();
node->set_on_work_list(false);
if (!node->label()->is_bound()) node->Emit(this, &new_trace);
}
@@ -1705,7 +1708,7 @@ static inline bool EmitAtomLetter(Isolate* isolate,
macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
}
Label ok;
- DCHECK(unibrow::Ecma262UnCanonicalize::kMaxWidth == 4);
+ DCHECK_EQ(4, unibrow::Ecma262UnCanonicalize::kMaxWidth);
switch (length) {
case 2: {
if (ShortCutEmitCharacterPair(macro_assembler, one_byte, chars[0],
@@ -2925,7 +2928,7 @@ void ChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
bool not_at_start) {
not_at_start = (not_at_start || not_at_start_);
int choice_count = alternatives_->length();
- DCHECK(choice_count > 0);
+ DCHECK_LT(0, choice_count);
alternatives_->at(0).node()->GetQuickCheckDetails(details,
compiler,
characters_filled_in,
@@ -3253,7 +3256,7 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
int TextNode::Length() {
TextElement elm = elements()->last();
- DCHECK(elm.cp_offset() >= 0);
+ DCHECK_LE(0, elm.cp_offset());
return elm.cp_offset() + elm.length();
}
@@ -3472,7 +3475,7 @@ void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
// Back edge of greedy optimized loop node graph.
int text_length =
GreedyLoopTextLengthForAlternative(&(alternatives_->at(0)));
- DCHECK(text_length != kNodeIsTooComplexForGreedyLoops);
+ DCHECK_NE(kNodeIsTooComplexForGreedyLoops, text_length);
// Update the counter-based backtracking info on the stack. This is an
// optimization for greedy loops (see below).
DCHECK(trace->cp_offset() == text_length);
@@ -3793,7 +3796,7 @@ void BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
Handle<ByteArray> boolean_skip_table = factory->NewByteArray(kSize, TENURED);
int skip_distance = GetSkipTable(
min_lookahead, max_lookahead, boolean_skip_table);
- DCHECK(skip_distance != 0);
+ DCHECK_NE(0, skip_distance);
Label cont, again;
masm->Bind(&again);
@@ -4748,10 +4751,10 @@ static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
const int* special_class,
int length) {
length--; // Remove final marker.
- DCHECK(special_class[length] == kRangeEndMarker);
- DCHECK(ranges->length() != 0);
- DCHECK(length != 0);
- DCHECK(special_class[0] != 0);
+ DCHECK_EQ(kRangeEndMarker, special_class[length]);
+ DCHECK_NE(0, ranges->length());
+ DCHECK_NE(0, length);
+ DCHECK_NE(0, special_class[0]);
if (ranges->length() != (length >> 1) + 1) {
return false;
}
@@ -4779,7 +4782,7 @@ static bool CompareRanges(ZoneList<CharacterRange>* ranges,
const int* special_class,
int length) {
length--; // Remove final marker.
- DCHECK(special_class[length] == kRangeEndMarker);
+ DCHECK_EQ(kRangeEndMarker, special_class[length]);
if (ranges->length() * 2 != length) {
return false;
}
@@ -5416,7 +5419,7 @@ class RegExpExpansionLimiter {
: compiler_(compiler),
saved_expansion_factor_(compiler->current_expansion_factor()),
ok_to_expand_(saved_expansion_factor_ <= kMaxExpansionFactor) {
- DCHECK(factor > 0);
+ DCHECK_LT(0, factor);
if (ok_to_expand_) {
if (factor > kMaxExpansionFactor) {
// Avoid integer overflow of the current expansion factor.
@@ -5505,7 +5508,7 @@ RegExpNode* RegExpQuantifier::ToNode(int min,
}
}
if (max <= kMaxUnrolledMaxMatches && min == 0) {
- DCHECK(max > 0); // Due to the 'if' above.
+ DCHECK_LT(0, max); // Due to the 'if' above.
RegExpExpansionLimiter limiter(compiler, max);
if (limiter.ok_to_expand()) {
// Unroll the optional matches up to max.
@@ -5802,7 +5805,7 @@ static void AddClass(const int* elmv,
ZoneList<CharacterRange>* ranges,
Zone* zone) {
elmc--;
- DCHECK(elmv[elmc] == kRangeEndMarker);
+ DCHECK_EQ(kRangeEndMarker, elmv[elmc]);
for (int i = 0; i < elmc; i += 2) {
DCHECK(elmv[i] < elmv[i + 1]);
ranges->Add(CharacterRange::Range(elmv[i], elmv[i + 1] - 1), zone);
@@ -5815,9 +5818,9 @@ static void AddClassNegated(const int *elmv,
ZoneList<CharacterRange>* ranges,
Zone* zone) {
elmc--;
- DCHECK(elmv[elmc] == kRangeEndMarker);
- DCHECK(elmv[0] != 0x0000);
- DCHECK(elmv[elmc - 1] != String::kMaxCodePoint);
+ DCHECK_EQ(kRangeEndMarker, elmv[elmc]);
+ DCHECK_NE(0x0000, elmv[0]);
+ DCHECK_NE(String::kMaxCodePoint, elmv[elmc - 1]);
uc16 last = 0x0000;
for (int i = 0; i < elmc; i += 2) {
DCHECK(last <= elmv[i] - 1);
@@ -5902,7 +5905,7 @@ Vector<const int> CharacterRange::GetWordBounds() {
return Vector<const int>(kWordRanges, kWordRangeCount - 1);
}
-
+// static
void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
ZoneList<CharacterRange>* ranges,
bool is_one_byte) {
@@ -5911,12 +5914,12 @@ void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
for (int i = 0; i < range_count; i++) {
CharacterRange range = ranges->at(i);
uc32 bottom = range.from();
- if (bottom > String::kMaxUtf16CodeUnit) return;
+ if (bottom > String::kMaxUtf16CodeUnit) continue;
uc32 top = Min(range.to(), String::kMaxUtf16CodeUnit);
// Nothing to be done for surrogates.
- if (bottom >= kLeadSurrogateStart && top <= kTrailSurrogateEnd) return;
+ if (bottom >= kLeadSurrogateStart && top <= kTrailSurrogateEnd) continue;
if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
- if (bottom > String::kMaxOneByteCharCode) return;
+ if (bottom > String::kMaxOneByteCharCode) continue;
if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
}
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
index 77d61ae17e..23dc8fac4b 100644
--- a/deps/v8/src/regexp/jsregexp.h
+++ b/deps/v8/src/regexp/jsregexp.h
@@ -406,8 +406,8 @@ class QuickCheckDetails {
int characters() { return characters_; }
void set_characters(int characters) { characters_ = characters; }
Position* positions(int index) {
- DCHECK(index >= 0);
- DCHECK(index < characters_);
+ DCHECK_LE(0, index);
+ DCHECK_GT(characters_, index);
return positions_ + index;
}
uint32_t mask() { return mask_; }
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index 1a8f2c8d8e..03ceb0ee75 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -6,9 +6,11 @@
#include "src/regexp/mips/regexp-macro-assembler-mips.h"
+#include "src/assembler-inl.h"
#include "src/code-stubs.h"
#include "src/log.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
@@ -143,8 +145,8 @@ void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(int by) {
void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
- DCHECK(reg >= 0);
- DCHECK(reg < num_registers_);
+ DCHECK_LE(0, reg);
+ DCHECK_GT(num_registers_, reg);
if (by != 0) {
__ lw(a0, register_location(reg));
__ Addu(a0, a0, Operand(by));
@@ -289,7 +291,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ Subu(current_input_offset(), current_input_offset(), Operand(t5));
}
} else {
- DCHECK(mode_ == UC16);
+ DCHECK_EQ(UC16, mode_);
// Put regexp engine registers on stack.
RegList regexp_registers_to_retain = current_input_offset().bit() |
current_character().bit() | backtrack_stackpointer().bit();
@@ -453,7 +455,7 @@ void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterMinusAnd(
uc16 minus,
uc16 mask,
Label* on_not_equal) {
- DCHECK(minus < String::kMaxUtf16CodeUnit);
+ DCHECK_GT(String::kMaxUtf16CodeUnit, minus);
__ Subu(a0, current_character(), Operand(minus));
__ And(a0, a0, Operand(mask));
BranchOrBacktrack(on_not_equal, ne, a0, Operand(c));
@@ -899,8 +901,8 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
- Handle<Code> code = isolate()->factory()->NewCode(
- code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+ Handle<Code> code = isolate()->factory()->NewCode(code_desc, Code::REGEXP,
+ masm_->CodeObject());
LOG(masm_->isolate(),
RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
@@ -1231,7 +1233,7 @@ void RegExpMacroAssemblerMIPS::SafeCallTarget(Label* name) {
void RegExpMacroAssemblerMIPS::Push(Register source) {
- DCHECK(!source.is(backtrack_stackpointer()));
+ DCHECK(source != backtrack_stackpointer());
__ Addu(backtrack_stackpointer(),
backtrack_stackpointer(),
Operand(-kPointerSize));
@@ -1240,7 +1242,7 @@ void RegExpMacroAssemblerMIPS::Push(Register source) {
void RegExpMacroAssemblerMIPS::Pop(Register target) {
- DCHECK(!target.is(backtrack_stackpointer()));
+ DCHECK(target != backtrack_stackpointer());
__ lw(target, MemOperand(backtrack_stackpointer()));
__ Addu(backtrack_stackpointer(), backtrack_stackpointer(), kPointerSize);
}
@@ -1276,12 +1278,12 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
}
// We assume that we cannot do unaligned loads on MIPS, so this function
// must only be used to load a single character at a time.
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
__ Addu(t5, end_of_input_address(), Operand(offset));
if (mode_ == LATIN1) {
__ lbu(current_character(), MemOperand(t5, 0));
} else {
- DCHECK(mode_ == UC16);
+ DCHECK_EQ(UC16, mode_);
__ lhu(current_character(), MemOperand(t5, 0));
}
}
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 651e3007fe..3b73f0bb56 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -6,9 +6,11 @@
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
+#include "src/assembler-inl.h"
#include "src/code-stubs.h"
#include "src/log.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
@@ -180,8 +182,8 @@ void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(int by) {
void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
- DCHECK(reg >= 0);
- DCHECK(reg < num_registers_);
+ DCHECK_LE(0, reg);
+ DCHECK_GT(num_registers_, reg);
if (by != 0) {
__ Ld(a0, register_location(reg));
__ Daddu(a0, a0, Operand(by));
@@ -484,7 +486,7 @@ void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterMinusAnd(
uc16 minus,
uc16 mask,
Label* on_not_equal) {
- DCHECK(minus < String::kMaxUtf16CodeUnit);
+ DCHECK_GT(String::kMaxUtf16CodeUnit, minus);
__ Dsubu(a0, current_character(), Operand(minus));
__ And(a0, a0, Operand(mask));
BranchOrBacktrack(on_not_equal, ne, a0, Operand(c));
@@ -937,8 +939,8 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
- Handle<Code> code = isolate()->factory()->NewCode(
- code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+ Handle<Code> code = isolate()->factory()->NewCode(code_desc, Code::REGEXP,
+ masm_->CodeObject());
LOG(masm_->isolate(),
RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
@@ -1269,7 +1271,7 @@ void RegExpMacroAssemblerMIPS::SafeCallTarget(Label* name) {
void RegExpMacroAssemblerMIPS::Push(Register source) {
- DCHECK(!source.is(backtrack_stackpointer()));
+ DCHECK(source != backtrack_stackpointer());
__ Daddu(backtrack_stackpointer(),
backtrack_stackpointer(),
Operand(-kIntSize));
@@ -1278,7 +1280,7 @@ void RegExpMacroAssemblerMIPS::Push(Register source) {
void RegExpMacroAssemblerMIPS::Pop(Register target) {
- DCHECK(!target.is(backtrack_stackpointer()));
+ DCHECK(target != backtrack_stackpointer());
__ Lw(target, MemOperand(backtrack_stackpointer()));
__ Daddu(backtrack_stackpointer(), backtrack_stackpointer(), kIntSize);
}
@@ -1314,7 +1316,7 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
}
// We assume that we cannot do unaligned loads on MIPS, so this function
// must only be used to load a single character at a time.
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
__ Daddu(t1, end_of_input_address(), Operand(offset));
if (mode_ == LATIN1) {
__ Lbu(current_character(), MemOperand(t1, 0));
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index a1425b4372..f8f5a0d2a3 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -6,6 +6,7 @@
#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
+#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/code-stubs.h"
#include "src/log.h"
@@ -149,8 +150,8 @@ void RegExpMacroAssemblerPPC::AdvanceCurrentPosition(int by) {
void RegExpMacroAssemblerPPC::AdvanceRegister(int reg, int by) {
- DCHECK(reg >= 0);
- DCHECK(reg < num_registers_);
+ DCHECK_LE(0, reg);
+ DCHECK_GT(num_registers_, reg);
if (by != 0) {
__ LoadP(r3, register_location(reg), r0);
__ mov(r0, Operand(by));
@@ -466,7 +467,7 @@ void RegExpMacroAssemblerPPC::CheckNotCharacterAfterAnd(unsigned c,
void RegExpMacroAssemblerPPC::CheckNotCharacterAfterMinusAnd(
uc16 c, uc16 minus, uc16 mask, Label* on_not_equal) {
- DCHECK(minus < String::kMaxUtf16CodeUnit);
+ DCHECK_GT(String::kMaxUtf16CodeUnit, minus);
__ subi(r3, current_character(), Operand(minus));
__ mov(r0, Operand(mask));
__ and_(r3, r3, r0);
@@ -933,8 +934,8 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
- Handle<Code> code = isolate()->factory()->NewCode(
- code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+ Handle<Code> code = isolate()->factory()->NewCode(code_desc, Code::REGEXP,
+ masm_->CodeObject());
PROFILE(masm_->isolate(),
RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
@@ -1231,13 +1232,13 @@ void RegExpMacroAssemblerPPC::SafeCallTarget(Label* name) {
void RegExpMacroAssemblerPPC::Push(Register source) {
- DCHECK(!source.is(backtrack_stackpointer()));
+ DCHECK(source != backtrack_stackpointer());
__ StorePU(source, MemOperand(backtrack_stackpointer(), -kPointerSize));
}
void RegExpMacroAssemblerPPC::Pop(Register target) {
- DCHECK(!target.is(backtrack_stackpointer()));
+ DCHECK(target != backtrack_stackpointer());
__ LoadP(target, MemOperand(backtrack_stackpointer()));
__ addi(backtrack_stackpointer(), backtrack_stackpointer(),
Operand(kPointerSize));
@@ -1286,7 +1287,7 @@ void RegExpMacroAssemblerPPC::LoadCurrentCharacterUnchecked(int cp_offset,
} else if (characters == 2) {
__ lhz(current_character(), MemOperand(current_character()));
} else {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
__ lbz(current_character(), MemOperand(current_character()));
}
} else {
@@ -1294,7 +1295,7 @@ void RegExpMacroAssemblerPPC::LoadCurrentCharacterUnchecked(int cp_offset,
if (characters == 2) {
__ lwz(current_character(), MemOperand(current_character()));
} else {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
__ lhz(current_character(), MemOperand(current_character()));
}
}
@@ -1305,7 +1306,7 @@ void RegExpMacroAssemblerPPC::LoadCurrentCharacterUnchecked(int cp_offset,
} else if (characters == 2) {
__ lhbrx(current_character(), MemOperand(r0, current_character()));
} else {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
__ lbz(current_character(), MemOperand(current_character()));
}
} else {
@@ -1314,7 +1315,7 @@ void RegExpMacroAssemblerPPC::LoadCurrentCharacterUnchecked(int cp_offset,
__ lwz(current_character(), MemOperand(current_character()));
__ rlwinm(current_character(), current_character(), 16, 0, 31);
} else {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
__ lhz(current_character(), MemOperand(current_character()));
}
}
diff --git a/deps/v8/src/regexp/regexp-ast.cc b/deps/v8/src/regexp/regexp-ast.cc
index 85babb1f74..7755593fbf 100644
--- a/deps/v8/src/regexp/regexp-ast.cc
+++ b/deps/v8/src/regexp/regexp-ast.cc
@@ -303,7 +303,7 @@ std::ostream& RegExpTree::Print(std::ostream& os, Zone* zone) { // NOLINT
RegExpDisjunction::RegExpDisjunction(ZoneList<RegExpTree*>* alternatives)
: alternatives_(alternatives) {
- DCHECK(alternatives->length() > 1);
+ DCHECK_LT(1, alternatives->length());
RegExpTree* first_alternative = alternatives->at(0);
min_match_ = first_alternative->min_match();
max_match_ = first_alternative->max_match();
@@ -326,7 +326,7 @@ static int IncreaseBy(int previous, int increase) {
RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
: nodes_(nodes) {
- DCHECK(nodes->length() > 1);
+ DCHECK_LT(1, nodes->length());
min_match_ = 0;
max_match_ = 0;
for (int i = 0; i < nodes->length(); i++) {
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
index 3316c33229..749393b782 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
@@ -69,8 +69,8 @@ void RegExpMacroAssemblerIrregexp::EmitOrLink(Label* l) {
void RegExpMacroAssemblerIrregexp::PopRegister(int register_index) {
- DCHECK(register_index >= 0);
- DCHECK(register_index <= kMaxRegister);
+ DCHECK_LE(0, register_index);
+ DCHECK_GE(kMaxRegister, register_index);
Emit(BC_POP_REGISTER, register_index);
}
@@ -78,16 +78,16 @@ void RegExpMacroAssemblerIrregexp::PopRegister(int register_index) {
void RegExpMacroAssemblerIrregexp::PushRegister(
int register_index,
StackCheckFlag check_stack_limit) {
- DCHECK(register_index >= 0);
- DCHECK(register_index <= kMaxRegister);
+ DCHECK_LE(0, register_index);
+ DCHECK_GE(kMaxRegister, register_index);
Emit(BC_PUSH_REGISTER, register_index);
}
void RegExpMacroAssemblerIrregexp::WriteCurrentPositionToRegister(
int register_index, int cp_offset) {
- DCHECK(register_index >= 0);
- DCHECK(register_index <= kMaxRegister);
+ DCHECK_LE(0, register_index);
+ DCHECK_GE(kMaxRegister, register_index);
Emit(BC_SET_REGISTER_TO_CP, register_index);
Emit32(cp_offset); // Current position offset.
}
@@ -103,24 +103,24 @@ void RegExpMacroAssemblerIrregexp::ClearRegisters(int reg_from, int reg_to) {
void RegExpMacroAssemblerIrregexp::ReadCurrentPositionFromRegister(
int register_index) {
- DCHECK(register_index >= 0);
- DCHECK(register_index <= kMaxRegister);
+ DCHECK_LE(0, register_index);
+ DCHECK_GE(kMaxRegister, register_index);
Emit(BC_SET_CP_TO_REGISTER, register_index);
}
void RegExpMacroAssemblerIrregexp::WriteStackPointerToRegister(
int register_index) {
- DCHECK(register_index >= 0);
- DCHECK(register_index <= kMaxRegister);
+ DCHECK_LE(0, register_index);
+ DCHECK_GE(kMaxRegister, register_index);
Emit(BC_SET_REGISTER_TO_SP, register_index);
}
void RegExpMacroAssemblerIrregexp::ReadStackPointerFromRegister(
int register_index) {
- DCHECK(register_index >= 0);
- DCHECK(register_index <= kMaxRegister);
+ DCHECK_LE(0, register_index);
+ DCHECK_GE(kMaxRegister, register_index);
Emit(BC_SET_SP_TO_REGISTER, register_index);
}
@@ -132,16 +132,16 @@ void RegExpMacroAssemblerIrregexp::SetCurrentPositionFromEnd(int by) {
void RegExpMacroAssemblerIrregexp::SetRegister(int register_index, int to) {
- DCHECK(register_index >= 0);
- DCHECK(register_index <= kMaxRegister);
+ DCHECK_LE(0, register_index);
+ DCHECK_GE(kMaxRegister, register_index);
Emit(BC_SET_REGISTER, register_index);
Emit32(to);
}
void RegExpMacroAssemblerIrregexp::AdvanceRegister(int register_index, int by) {
- DCHECK(register_index >= 0);
- DCHECK(register_index <= kMaxRegister);
+ DCHECK_LE(0, register_index);
+ DCHECK_GE(kMaxRegister, register_index);
Emit(BC_ADVANCE_REGISTER, register_index);
Emit32(by);
}
@@ -195,8 +195,8 @@ void RegExpMacroAssemblerIrregexp::Fail() {
void RegExpMacroAssemblerIrregexp::AdvanceCurrentPosition(int by) {
- DCHECK(by >= kMinCPOffset);
- DCHECK(by <= kMaxCPOffset);
+ DCHECK_LE(kMinCPOffset, by);
+ DCHECK_GE(kMaxCPOffset, by);
advance_current_start_ = pc_;
advance_current_offset_ = by;
Emit(BC_ADVANCE_CP, by);
@@ -215,8 +215,8 @@ void RegExpMacroAssemblerIrregexp::LoadCurrentCharacter(int cp_offset,
Label* on_failure,
bool check_bounds,
int characters) {
- DCHECK(cp_offset >= kMinCPOffset);
- DCHECK(cp_offset <= kMaxCPOffset);
+ DCHECK_LE(kMinCPOffset, cp_offset);
+ DCHECK_GE(kMaxCPOffset, cp_offset);
int bytecode;
if (check_bounds) {
if (characters == 4) {
@@ -224,7 +224,7 @@ void RegExpMacroAssemblerIrregexp::LoadCurrentCharacter(int cp_offset,
} else if (characters == 2) {
bytecode = BC_LOAD_2_CURRENT_CHARS;
} else {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
bytecode = BC_LOAD_CURRENT_CHAR;
}
} else {
@@ -233,7 +233,7 @@ void RegExpMacroAssemblerIrregexp::LoadCurrentCharacter(int cp_offset,
} else if (characters == 2) {
bytecode = BC_LOAD_2_CURRENT_CHARS_UNCHECKED;
} else {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
bytecode = BC_LOAD_CURRENT_CHAR_UNCHECKED;
}
}
@@ -373,8 +373,8 @@ void RegExpMacroAssemblerIrregexp::CheckBitInTable(
void RegExpMacroAssemblerIrregexp::CheckNotBackReference(int start_reg,
bool read_backward,
Label* on_not_equal) {
- DCHECK(start_reg >= 0);
- DCHECK(start_reg <= kMaxRegister);
+ DCHECK_LE(0, start_reg);
+ DCHECK_GE(kMaxRegister, start_reg);
Emit(read_backward ? BC_CHECK_NOT_BACK_REF_BACKWARD : BC_CHECK_NOT_BACK_REF,
start_reg);
EmitOrLink(on_not_equal);
@@ -383,8 +383,8 @@ void RegExpMacroAssemblerIrregexp::CheckNotBackReference(int start_reg,
void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase(
int start_reg, bool read_backward, bool unicode, Label* on_not_equal) {
- DCHECK(start_reg >= 0);
- DCHECK(start_reg <= kMaxRegister);
+ DCHECK_LE(0, start_reg);
+ DCHECK_GE(kMaxRegister, start_reg);
Emit(read_backward ? (unicode ? BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD
: BC_CHECK_NOT_BACK_REF_NO_CASE_BACKWARD)
: (unicode ? BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE
@@ -397,8 +397,8 @@ void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase(
void RegExpMacroAssemblerIrregexp::IfRegisterLT(int register_index,
int comparand,
Label* on_less_than) {
- DCHECK(register_index >= 0);
- DCHECK(register_index <= kMaxRegister);
+ DCHECK_LE(0, register_index);
+ DCHECK_GE(kMaxRegister, register_index);
Emit(BC_CHECK_REGISTER_LT, register_index);
Emit32(comparand);
EmitOrLink(on_less_than);
@@ -408,8 +408,8 @@ void RegExpMacroAssemblerIrregexp::IfRegisterLT(int register_index,
void RegExpMacroAssemblerIrregexp::IfRegisterGE(int register_index,
int comparand,
Label* on_greater_or_equal) {
- DCHECK(register_index >= 0);
- DCHECK(register_index <= kMaxRegister);
+ DCHECK_LE(0, register_index);
+ DCHECK_GE(kMaxRegister, register_index);
Emit(BC_CHECK_REGISTER_GE, register_index);
Emit32(comparand);
EmitOrLink(on_greater_or_equal);
@@ -418,8 +418,8 @@ void RegExpMacroAssemblerIrregexp::IfRegisterGE(int register_index,
void RegExpMacroAssemblerIrregexp::IfRegisterEqPos(int register_index,
Label* on_eq) {
- DCHECK(register_index >= 0);
- DCHECK(register_index <= kMaxRegister);
+ DCHECK_LE(0, register_index);
+ DCHECK_GE(kMaxRegister, register_index);
Emit(BC_CHECK_REGISTER_EQ_POS, register_index);
EmitOrLink(on_eq);
}
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 681acc1325..cb2199bf94 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -8,6 +8,7 @@
#include "src/isolate-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/simulator.h"
+#include "src/unicode-inl.h"
#ifdef V8_INTL_SUPPORT
#include "unicode/uchar.h"
@@ -36,7 +37,7 @@ int RegExpMacroAssembler::CaseInsensitiveCompareUC16(Address byte_offset1,
// This function is not allowed to cause a garbage collection.
// A GC might move the calling generated code and invalidate the
// return address on the stack.
- DCHECK(byte_length % 2 == 0);
+ DCHECK_EQ(0, byte_length % 2);
uc16* substring1 = reinterpret_cast<uc16*>(byte_offset1);
uc16* substring2 = reinterpret_cast<uc16*>(byte_offset2);
size_t length = byte_length >> 1;
@@ -137,8 +138,8 @@ const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
if (subject->IsThinString()) {
subject = ThinString::cast(subject)->actual();
}
- DCHECK(start_index >= 0);
- DCHECK(start_index <= subject->length());
+ DCHECK_LE(0, start_index);
+ DCHECK_LE(start_index, subject->length());
if (subject->IsSeqOneByteString()) {
return reinterpret_cast<const byte*>(
SeqOneByteString::cast(subject)->GetChars() + start_index);
@@ -223,8 +224,8 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
Isolate* isolate) {
DCHECK(subject->IsFlat());
- DCHECK(previous_index >= 0);
- DCHECK(previous_index <= subject->length());
+ DCHECK_LE(0, previous_index);
+ DCHECK_LE(previous_index, subject->length());
// No allocations before calling the regexp, but we can't use
// DisallowHeapAllocation, since regexps might be preempted, and another
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 6954b1f828..a7da50de20 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -4,6 +4,8 @@
#include "src/regexp/regexp-parser.h"
+#include <vector>
+
#include "src/char-predicates-inl.h"
#include "src/factory.h"
#include "src/isolate.h"
@@ -830,7 +832,7 @@ bool RegExpParser::CreateNamedCaptureAtIndex(const ZoneVector<uc16>* name,
}
RegExpCapture* capture = GetCapture(index);
- DCHECK(capture->name() == nullptr);
+ DCHECK_NULL(capture->name());
capture->set_name(name);
named_captures_->Add(capture, zone());
@@ -1276,30 +1278,30 @@ bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
// and 'value' is interpreted as one of the available property value names.
// - Aliases in PropertyAlias.txt and PropertyValueAlias.txt can be used.
// - Loose matching is not applied.
- List<char> first_part;
- List<char> second_part;
+ std::vector<char> first_part;
+ std::vector<char> second_part;
if (current() == '{') {
// Parse \p{[PropertyName=]PropertyNameValue}
for (Advance(); current() != '}' && current() != '='; Advance()) {
if (!has_next()) return false;
- first_part.Add(static_cast<char>(current()));
+ first_part.push_back(static_cast<char>(current()));
}
if (current() == '=') {
for (Advance(); current() != '}'; Advance()) {
if (!has_next()) return false;
- second_part.Add(static_cast<char>(current()));
+ second_part.push_back(static_cast<char>(current()));
}
- second_part.Add(0); // null-terminate string.
+ second_part.push_back(0); // null-terminate string.
}
} else {
return false;
}
Advance();
- first_part.Add(0); // null-terminate string.
+ first_part.push_back(0); // null-terminate string.
- if (second_part.is_empty()) {
+ if (second_part.empty()) {
// First attempt to interpret as general category property value name.
- const char* name = first_part.ToConstVector().start();
+ const char* name = first_part.data();
if (LookupPropertyValueName(UCHAR_GENERAL_CATEGORY_MASK, name, negate,
result, zone())) {
return true;
@@ -1317,8 +1319,8 @@ bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
} else {
// Both property name and value name are specified. Attempt to interpret
// the property name as enumerated property.
- const char* property_name = first_part.ToConstVector().start();
- const char* value_name = second_part.ToConstVector().start();
+ const char* property_name = first_part.data();
+ const char* value_name = second_part.data();
UProperty property = u_getPropertyEnum(property_name);
if (!IsExactPropertyAlias(property_name, property)) return false;
if (property == UCHAR_GENERAL_CATEGORY) {
@@ -1362,7 +1364,7 @@ bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value, uc32* value) {
uc32 RegExpParser::ParseClassCharacterEscape() {
- DCHECK(current() == '\\');
+ DCHECK_EQ('\\', current());
DCHECK(has_next() && !IsSpecialClassEscape(Next()));
Advance();
switch (current()) {
diff --git a/deps/v8/src/regexp/regexp-stack.h b/deps/v8/src/regexp/regexp-stack.h
index aea46cf673..03df6cd15e 100644
--- a/deps/v8/src/regexp/regexp-stack.h
+++ b/deps/v8/src/regexp/regexp-stack.h
@@ -45,7 +45,7 @@ class RegExpStack {
// Gives the top of the memory used as stack.
Address stack_base() {
- DCHECK(thread_local_.memory_size_ != 0);
+ DCHECK_NE(0, thread_local_.memory_size_);
return thread_local_.memory_ + thread_local_.memory_size_;
}
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index f31b217acf..8449c631d3 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -6,6 +6,7 @@
#if V8_TARGET_ARCH_S390
+#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/code-stubs.h"
#include "src/log.h"
@@ -143,8 +144,8 @@ void RegExpMacroAssemblerS390::AdvanceCurrentPosition(int by) {
}
void RegExpMacroAssemblerS390::AdvanceRegister(int reg, int by) {
- DCHECK(reg >= 0);
- DCHECK(reg < num_registers_);
+ DCHECK_LE(0, reg);
+ DCHECK_GT(num_registers_, reg);
if (by != 0) {
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_int8(by)) {
__ AddMI(register_location(reg), Operand(by));
@@ -443,7 +444,7 @@ void RegExpMacroAssemblerS390::CheckNotCharacterAfterAnd(unsigned c,
void RegExpMacroAssemblerS390::CheckNotCharacterAfterMinusAnd(
uc16 c, uc16 minus, uc16 mask, Label* on_not_equal) {
- DCHECK(minus < String::kMaxUtf16CodeUnit);
+ DCHECK_GT(String::kMaxUtf16CodeUnit, minus);
__ lay(r2, MemOperand(current_character(), -minus));
__ And(r2, Operand(mask));
if (c != 0) {
@@ -929,8 +930,8 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
- Handle<Code> code = isolate()->factory()->NewCode(
- code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+ Handle<Code> code = isolate()->factory()->NewCode(code_desc, Code::REGEXP,
+ masm_->CodeObject());
PROFILE(masm_->isolate(),
RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
@@ -1085,7 +1086,7 @@ void RegExpMacroAssemblerS390::CallCheckStackGuardState(Register scratch) {
// Helper function for reading a value out of a stack frame.
template <typename T>
static T& frame_entry(Address re_frame, int frame_offset) {
- DCHECK(sizeof(T) == kPointerSize);
+ DCHECK_EQ(kPointerSize, sizeof(T));
#ifdef V8_TARGET_ARCH_S390X
return reinterpret_cast<T&>(Memory::uint64_at(re_frame + frame_offset));
#else
@@ -1174,14 +1175,14 @@ void RegExpMacroAssemblerS390::SafeCallTarget(Label* name) {
}
void RegExpMacroAssemblerS390::Push(Register source) {
- DCHECK(!source.is(backtrack_stackpointer()));
+ DCHECK(source != backtrack_stackpointer());
__ lay(backtrack_stackpointer(),
MemOperand(backtrack_stackpointer(), -kPointerSize));
__ StoreP(source, MemOperand(backtrack_stackpointer()));
}
void RegExpMacroAssemblerS390::Pop(Register target) {
- DCHECK(!target.is(backtrack_stackpointer()));
+ DCHECK(target != backtrack_stackpointer());
__ LoadP(target, MemOperand(backtrack_stackpointer()));
__ la(backtrack_stackpointer(),
MemOperand(backtrack_stackpointer(), kPointerSize));
@@ -1207,7 +1208,7 @@ void RegExpMacroAssemblerS390::CheckStackLimit() {
void RegExpMacroAssemblerS390::CallCFunctionUsingStub(
ExternalReference function, int num_arguments) {
// Must pass all arguments in registers. The stub pushes on the stack.
- DCHECK(num_arguments <= 8);
+ DCHECK_GE(8, num_arguments);
__ mov(code_pointer(), Operand(function));
Label ret;
__ larl(r14, &ret);
@@ -1248,7 +1249,7 @@ void RegExpMacroAssemblerS390::LoadCurrentCharacterUnchecked(int cp_offset,
cp_offset * char_size()));
#endif
} else {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
__ LoadlB(current_character(),
MemOperand(current_input_offset(), end_of_input_address(),
cp_offset * char_size()));
@@ -1264,7 +1265,7 @@ void RegExpMacroAssemblerS390::LoadCurrentCharacterUnchecked(int cp_offset,
__ rll(current_character(), current_character(), Operand(16));
#endif
} else {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
__ LoadLogicalHalfWordP(
current_character(),
MemOperand(current_input_offset(), end_of_input_address(),
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 9b0352d863..5c03f65e3d 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -139,8 +139,8 @@ void RegExpMacroAssemblerX64::AdvanceCurrentPosition(int by) {
void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) {
- DCHECK(reg >= 0);
- DCHECK(reg < num_registers_);
+ DCHECK_LE(0, reg);
+ DCHECK_GT(num_registers_, reg);
if (by != 0) {
__ addp(register_location(reg), Immediate(by));
}
@@ -312,8 +312,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// size_t byte_length - length of capture in bytes(!)
// Isolate* isolate or 0 if unicode flag.
#ifdef _WIN64
- DCHECK(rcx.is(arg_reg_1));
- DCHECK(rdx.is(arg_reg_2));
+ DCHECK(rcx == arg_reg_1);
+ DCHECK(rdx == arg_reg_2);
// Compute and set byte_offset1 (start of capture).
__ leap(rcx, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
@@ -322,8 +322,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ subq(rdx, rbx);
}
#else // AMD64 calling convention
- DCHECK(rdi.is(arg_reg_1));
- DCHECK(rsi.is(arg_reg_2));
+ DCHECK(rdi == arg_reg_1);
+ DCHECK(rsi == arg_reg_2);
// Compute byte_offset2 (current position = rsi+rdi).
__ leap(rax, Operand(rsi, rdi, times_1, 0));
// Compute and set byte_offset1 (start of capture).
@@ -493,7 +493,7 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd(
uc16 minus,
uc16 mask,
Label* on_not_equal) {
- DCHECK(minus < String::kMaxUtf16CodeUnit);
+ DCHECK_GT(String::kMaxUtf16CodeUnit, minus);
__ leap(rax, Operand(current_character(), -minus));
__ andp(rax, Immediate(mask));
__ cmpl(rax, Immediate(c));
@@ -1007,9 +1007,8 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
CodeDesc code_desc;
Isolate* isolate = this->isolate();
masm_.GetCode(isolate, &code_desc);
- Handle<Code> code = isolate->factory()->NewCode(
- code_desc, Code::ComputeFlags(Code::REGEXP),
- masm_.CodeObject());
+ Handle<Code> code =
+ isolate->factory()->NewCode(code_desc, Code::REGEXP, masm_.CodeObject());
PROFILE(isolate, RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
}
@@ -1290,7 +1289,7 @@ void RegExpMacroAssemblerX64::SafeReturn() {
void RegExpMacroAssemblerX64::Push(Register source) {
- DCHECK(!source.is(backtrack_stackpointer()));
+ DCHECK(source != backtrack_stackpointer());
// Notice: This updates flags, unlike normal Push.
__ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), source);
@@ -1330,7 +1329,7 @@ void RegExpMacroAssemblerX64::Push(Label* backtrack_target) {
void RegExpMacroAssemblerX64::Pop(Register target) {
- DCHECK(!target.is(backtrack_stackpointer()));
+ DCHECK(target != backtrack_stackpointer());
__ movsxlq(target, Operand(backtrack_stackpointer(), 0));
// Notice: This updates flags, unlike normal Pop.
__ addp(backtrack_stackpointer(), Immediate(kIntSize));
@@ -1379,7 +1378,7 @@ void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
} else if (characters == 2) {
__ movzxwl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
} else {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
__ movzxbl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
}
} else {
@@ -1388,7 +1387,7 @@ void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
__ movl(current_character(),
Operand(rsi, rdi, times_1, cp_offset * sizeof(uc16)));
} else {
- DCHECK(characters == 1);
+ DCHECK_EQ(1, characters);
__ movzxwl(current_character(),
Operand(rsi, rdi, times_1, cp_offset * sizeof(uc16)));
}
diff --git a/deps/v8/src/register-configuration.cc b/deps/v8/src/register-configuration.cc
index c265704071..9c8869b1c2 100644
--- a/deps/v8/src/register-configuration.cc
+++ b/deps/v8/src/register-configuration.cc
@@ -18,11 +18,11 @@ static const int kMaxAllocatableDoubleRegisterCount =
ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_COUNT)0;
static const int kAllocatableGeneralCodes[] = {
-#define REGISTER_CODE(R) Register::kCode_##R,
+#define REGISTER_CODE(R) kRegCode_##R,
ALLOCATABLE_GENERAL_REGISTERS(REGISTER_CODE)};
#undef REGISTER_CODE
-#define REGISTER_CODE(R) DoubleRegister::kCode_##R,
+#define REGISTER_CODE(R) kDoubleCode_##R,
static const int kAllocatableDoubleCodes[] = {
ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_CODE)};
#if V8_TARGET_ARCH_ARM
@@ -58,11 +58,11 @@ static const char* const kSimd128RegisterNames[] = {
STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >=
Register::kNumRegisters);
STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
- FloatRegister::kMaxNumRegisters);
+ FloatRegister::kNumRegisters);
STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
- DoubleRegister::kMaxNumRegisters);
+ DoubleRegister::kNumRegisters);
STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
- Simd128Register::kMaxNumRegisters);
+ Simd128Register::kNumRegisters);
static int get_num_allocatable_general_registers() {
return
@@ -126,7 +126,7 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
public:
ArchDefaultRegisterConfiguration()
: RegisterConfiguration(
- Register::kNumRegisters, DoubleRegister::kMaxNumRegisters,
+ Register::kNumRegisters, DoubleRegister::kNumRegisters,
get_num_allocatable_general_registers(),
get_num_allocatable_double_registers(), kAllocatableGeneralCodes,
get_allocatable_double_codes(),
@@ -155,7 +155,7 @@ class RestrictedRegisterConfiguration : public RegisterConfiguration {
std::unique_ptr<int[]> allocatable_general_register_codes,
std::unique_ptr<char const* []> allocatable_general_register_names)
: RegisterConfiguration(
- Register::kNumRegisters, DoubleRegister::kMaxNumRegisters,
+ Register::kNumRegisters, DoubleRegister::kNumRegisters,
num_allocatable_general_registers,
get_num_allocatable_double_registers(),
allocatable_general_register_codes.get(),
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index b125f67778..815153a98a 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -21,46 +21,27 @@ namespace internal {
// Number of times a function has to be seen on the stack before it is
// optimized.
static const int kProfilerTicksBeforeOptimization = 2;
-// If the function optimization was disabled due to high deoptimization count,
-// but the function is hot and has been seen on the stack this number of times,
-// then we try to reenable optimization for this function.
-static const int kProfilerTicksBeforeReenablingOptimization = 250;
-// If a function does not have enough type info (according to
-// FLAG_type_info_threshold), but has seen a huge number of ticks,
-// optimize it as it is.
-static const int kTicksWhenNotEnoughTypeInfo = 100;
-// We only have one byte to store the number of ticks.
-STATIC_ASSERT(kProfilerTicksBeforeOptimization < 256);
-STATIC_ASSERT(kProfilerTicksBeforeReenablingOptimization < 256);
-STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
// The number of ticks required for optimizing a function increases with
// the size of the bytecode. This is in addition to the
// kProfilerTicksBeforeOptimization required for any function.
-static const int kCodeSizeAllowancePerTick =
- 50 * interpreter::Interpreter::kCodeSizeMultiplier;
+static const int kBytecodeSizeAllowancePerTick = 1200;
// Maximum size in bytes of generate code for a function to allow OSR.
-static const int kOSRCodeSizeAllowanceBase =
- 10 * interpreter::Interpreter::kCodeSizeMultiplier;
+static const int kOSRBytecodeSizeAllowanceBase = 180;
-static const int kOSRCodeSizeAllowancePerTick =
- 2 * interpreter::Interpreter::kCodeSizeMultiplier;
+static const int kOSRBytecodeSizeAllowancePerTick = 48;
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
-static const int kMaxSizeEarlyOpt =
- 5 * interpreter::Interpreter::kCodeSizeMultiplier;
+static const int kMaxBytecodeSizeForEarlyOpt = 90;
// Certain functions are simply too big to be worth optimizing.
-// We aren't using the code size multiplier here because there is no
-// "kMaxSizeOpt" with which we would need to normalize.
-static const int kMaxSizeOpt = 60 * KB;
+static const int kMaxBytecodeSizeForOpt = 60 * KB;
#define OPTIMIZATION_REASON_LIST(V) \
V(DoNotOptimize, "do not optimize") \
V(HotAndStable, "hot and stable") \
- V(HotWithoutMuchTypeInfo, "not much type info but very hot") \
V(SmallFunction, "small function")
enum class OptimizationReason : uint8_t {
@@ -92,29 +73,10 @@ RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
static void GetICCounts(JSFunction* function, int* ic_with_type_info_count,
int* ic_generic_count, int* ic_total_count,
int* type_info_percentage, int* generic_percentage) {
- *ic_total_count = 0;
- *ic_generic_count = 0;
- *ic_with_type_info_count = 0;
- if (function->code()->kind() == Code::FUNCTION) {
- Code* shared_code = function->shared()->code();
- Object* raw_info = shared_code->type_feedback_info();
- if (raw_info->IsTypeFeedbackInfo()) {
- TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
- *ic_with_type_info_count = info->ic_with_type_info_count();
- *ic_generic_count = info->ic_generic_count();
- *ic_total_count = info->ic_total_count();
- }
- }
-
- // Harvest vector-ics as well
+ // Harvest vector-ics.
FeedbackVector* vector = function->feedback_vector();
- int with = 0, gen = 0, type_vector_ic_count = 0;
- const bool is_interpreted = function->shared()->IsInterpreted();
-
- vector->ComputeCounts(&with, &gen, &type_vector_ic_count, is_interpreted);
- *ic_total_count += type_vector_ic_count;
- *ic_with_type_info_count += with;
- *ic_generic_count += gen;
+ vector->ComputeCounts(ic_with_type_info_count, ic_generic_count,
+ ic_total_count);
if (*ic_total_count > 0) {
*type_info_percentage = 100 * *ic_with_type_info_count / *ic_total_count;
@@ -225,9 +187,9 @@ bool RuntimeProfiler::MaybeOSR(JSFunction* function, JavaScriptFrame* frame) {
// Attempt OSR if we are still running interpreted code even though the
// the function has long been marked or even already been optimized.
int64_t allowance =
- kOSRCodeSizeAllowanceBase +
- static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTick;
- if (shared->bytecode_array()->Size() <= allowance) {
+ kOSRBytecodeSizeAllowanceBase +
+ static_cast<int64_t>(ticks) * kOSRBytecodeSizeAllowancePerTick;
+ if (shared->bytecode_array()->length() <= allowance) {
AttemptOnStackReplacement(frame);
}
return true;
@@ -240,50 +202,19 @@ OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction* function,
SharedFunctionInfo* shared = function->shared();
int ticks = function->feedback_vector()->profiler_ticks();
- if (shared->bytecode_array()->Size() > kMaxSizeOpt) {
+ if (shared->bytecode_array()->length() > kMaxBytecodeSizeForOpt) {
return OptimizationReason::kDoNotOptimize;
}
int ticks_for_optimization =
kProfilerTicksBeforeOptimization +
- (shared->bytecode_array()->Size() / kCodeSizeAllowancePerTick);
+ (shared->bytecode_array()->length() / kBytecodeSizeAllowancePerTick);
if (ticks >= ticks_for_optimization) {
- int typeinfo, generic, total, type_percentage, generic_percentage;
- GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
- &generic_percentage);
- if (type_percentage >= FLAG_type_info_threshold) {
- // If this particular function hasn't had any ICs patched for enough
- // ticks, optimize it now.
- return OptimizationReason::kHotAndStable;
- } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
- return OptimizationReason::kHotWithoutMuchTypeInfo;
- } else {
- if (FLAG_trace_opt_verbose) {
- PrintF("[not yet optimizing ");
- function->PrintName();
- PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total,
- type_percentage);
- }
- return OptimizationReason::kDoNotOptimize;
- }
+ return OptimizationReason::kHotAndStable;
} else if (!any_ic_changed_ &&
- shared->bytecode_array()->Size() < kMaxSizeEarlyOpt) {
+ shared->bytecode_array()->length() < kMaxBytecodeSizeForEarlyOpt) {
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
- int typeinfo, generic, total, type_percentage, generic_percentage;
- GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
- &generic_percentage);
- if (type_percentage < FLAG_type_info_threshold) {
- if (FLAG_trace_opt_verbose) {
- PrintF("[not yet optimizing ");
- function->PrintName();
- PrintF(
- ", not enough type info for small function optimization: %d/%d "
- "(%d%%)]\n",
- typeinfo, total, type_percentage);
- }
- return OptimizationReason::kDoNotOptimize;
- }
return OptimizationReason::kSmallFunction;
} else if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
@@ -294,7 +225,7 @@ OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction* function,
PrintF("ICs changed]\n");
} else {
PrintF(" too large for small function optimization: %d/%d]\n",
- shared->bytecode_array()->Size(), kMaxSizeEarlyOpt);
+ shared->bytecode_array()->length(), kMaxBytecodeSizeForEarlyOpt);
}
}
return OptimizationReason::kDoNotOptimize;
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index f11b47fb76..782acc72c5 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -354,15 +354,12 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
ALL_PROPERTIES);
for (PrototypeIterator iter(isolate, array, kStartAtReceiver);
!iter.IsAtEnd(); iter.Advance()) {
- if (PrototypeIterator::GetCurrent(iter)->IsJSProxy() ||
- PrototypeIterator::GetCurrent<JSObject>(iter)
- ->HasIndexedInterceptor()) {
- // Bail out if we find a proxy or interceptor, likely not worth
- // collecting keys in that case.
+ Handle<JSReceiver> current(PrototypeIterator::GetCurrent<JSReceiver>(iter));
+ if (current->HasComplexElements()) {
return *isolate->factory()->NewNumberFromUint(length);
}
- Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
- accumulator.CollectOwnElementIndices(array, current);
+ accumulator.CollectOwnElementIndices(array,
+ Handle<JSObject>::cast(current));
}
// Erase any keys >= length.
Handle<FixedArray> keys =
@@ -463,13 +460,25 @@ RUNTIME_FUNCTION(Runtime_NewArray) {
ElementsKind old_kind = array->GetElementsKind();
RETURN_FAILURE_ON_EXCEPTION(isolate,
ArrayConstructInitializeElements(array, &argv));
- if (!site.is_null() &&
- (old_kind != array->GetElementsKind() || !can_use_type_feedback ||
- !can_inline_array_constructor)) {
- // The arguments passed in caused a transition. This kind of complexity
- // can't be dealt with in the inlined hydrogen array constructor case.
- // We must mark the allocationsite as un-inlinable.
- site->SetDoNotInlineCall();
+ if (!site.is_null()) {
+ if ((old_kind != array->GetElementsKind() || !can_use_type_feedback ||
+ !can_inline_array_constructor)) {
+ // The arguments passed in caused a transition. This kind of complexity
+ // can't be dealt with in the inlined hydrogen array constructor case.
+ // We must mark the allocationsite as un-inlinable.
+ site->SetDoNotInlineCall();
+ }
+ } else {
+ if (old_kind != array->GetElementsKind() || !can_inline_array_constructor) {
+ // We don't have an AllocationSite for this Array constructor invocation,
+ // i.e. it might a call from Array#map or from an Array subclass, so we
+ // just flip the bit on the global protector cell instead.
+ // TODO(bmeurer): Find a better way to mark this. Global protectors
+ // tend to back-fire over time...
+ if (isolate->IsArrayConstructorIntact()) {
+ isolate->InvalidateArrayConstructorProtector();
+ }
+ }
}
return *array;
@@ -517,15 +526,7 @@ RUNTIME_FUNCTION(Runtime_HasComplexElements) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
for (PrototypeIterator iter(isolate, array, kStartAtReceiver);
!iter.IsAtEnd(); iter.Advance()) {
- if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
- return isolate->heap()->true_value();
- }
- Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
- if (current->HasIndexedInterceptor()) {
- return isolate->heap()->true_value();
- }
- if (!current->HasDictionaryElements()) continue;
- if (current->element_dictionary()->HasComplexElements()) {
+ if (PrototypeIterator::GetCurrent<JSReceiver>(iter)->HasComplexElements()) {
return isolate->heap()->true_value();
}
}
diff --git a/deps/v8/src/runtime/runtime-bigint.cc b/deps/v8/src/runtime/runtime-bigint.cc
new file mode 100644
index 0000000000..d6b7dfb550
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-bigint.cc
@@ -0,0 +1,70 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/runtime/runtime-utils.h"
+
+#include "src/arguments.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
+#include "src/objects/bigint.h"
+#include "src/parsing/token.h"
+
+namespace v8 {
+namespace internal {
+
+RUNTIME_FUNCTION(Runtime_BigIntEqual) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ bool result = lhs->IsBigInt() && rhs->IsBigInt() &&
+ BigInt::Equal(BigInt::cast(*lhs), BigInt::cast(*rhs));
+ return *isolate->factory()->ToBoolean(result);
+}
+
+RUNTIME_FUNCTION(Runtime_BigIntToBoolean) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(BigInt, bigint, 0);
+ return *isolate->factory()->ToBoolean(bigint->ToBoolean());
+}
+
+RUNTIME_FUNCTION(Runtime_BigIntBinaryOp) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, left_obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, right_obj, 1);
+ CONVERT_SMI_ARG_CHECKED(opcode, 2);
+
+ if (!left_obj->IsBigInt() || !right_obj->IsBigInt()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kBigIntMixedTypes));
+ }
+ Handle<BigInt> left(Handle<BigInt>::cast(left_obj));
+ Handle<BigInt> right(Handle<BigInt>::cast(right_obj));
+ MaybeHandle<BigInt> result;
+ switch (opcode) {
+ case Token::ADD:
+ result = BigInt::Add(left, right);
+ break;
+ case Token::SUB:
+ result = BigInt::Subtract(left, right);
+ break;
+ case Token::MUL:
+ result = BigInt::Multiply(left, right);
+ break;
+ case Token::DIV:
+ result = BigInt::Divide(left, right);
+ break;
+ case Token::MOD:
+ result = BigInt::Remainder(left, right);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ RETURN_RESULT_OR_FAILURE(isolate, result);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 4b57593227..1cc00f5b7e 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -119,104 +119,66 @@ RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
function->shared()->set_is_asm_wasm_broken(true);
DCHECK(function->code() ==
isolate->builtins()->builtin(Builtins::kInstantiateAsmJs));
- function->ReplaceCode(isolate->builtins()->builtin(Builtins::kCompileLazy));
+ function->set_code(isolate->builtins()->builtin(Builtins::kCompileLazy));
if (function->shared()->code() ==
isolate->builtins()->builtin(Builtins::kInstantiateAsmJs)) {
- function->shared()->ReplaceCode(
+ function->shared()->set_code(
isolate->builtins()->builtin(Builtins::kCompileLazy));
}
return Smi::kZero;
}
-RUNTIME_FUNCTION(Runtime_NotifyStubFailure) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- DCHECK(AllowHeapAllocation::IsAllowed());
- delete deoptimizer;
- return isolate->heap()->undefined_value();
-}
-
-class ActivationsFinder : public ThreadVisitor {
- public:
- Code* code_;
- bool has_code_activations_;
-
- explicit ActivationsFinder(Code* code)
- : code_(code), has_code_activations_(false) {}
-
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- JavaScriptFrameIterator it(isolate, top);
- VisitFrames(&it);
- }
-
- void VisitFrames(JavaScriptFrameIterator* it) {
- for (; !it->done(); it->Advance()) {
- JavaScriptFrame* frame = it->frame();
- if (code_->contains(frame->pc())) has_code_activations_ = true;
- }
- }
-};
-
+namespace {
-RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_SMI_ARG_CHECKED(type_arg, 0);
- Deoptimizer::BailoutType type =
- static_cast<Deoptimizer::BailoutType>(type_arg);
- Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
+void MaterializeHeapObjectsAndDeleteDeoptimizer(Isolate* isolate,
+ Deoptimizer* deoptimizer) {
DCHECK(AllowHeapAllocation::IsAllowed());
- TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
- TRACE_EVENT0("v8", "V8.DeoptimizeCode");
-
- Handle<JSFunction> function = deoptimizer->function();
- Handle<Code> optimized_code = deoptimizer->compiled_code();
-
- DCHECK(optimized_code->kind() == Code::OPTIMIZED_FUNCTION);
- DCHECK(optimized_code->is_turbofanned());
- DCHECK(type == deoptimizer->bailout_type());
DCHECK_NULL(isolate->context());
-
// TODO(turbofan): We currently need the native context to materialize
// the arguments object, but only to get to its map.
- isolate->set_context(function->native_context());
+ isolate->set_context(deoptimizer->function()->native_context());
// Make sure to materialize objects before causing any allocation.
- JavaScriptFrameIterator it(isolate);
- deoptimizer->MaterializeHeapObjects(&it);
+ deoptimizer->MaterializeHeapObjects();
delete deoptimizer;
// Ensure the context register is updated for materialized objects.
JavaScriptFrameIterator top_it(isolate);
JavaScriptFrame* top_frame = top_it.frame();
isolate->set_context(Context::cast(top_frame->context()));
+}
- if (type == Deoptimizer::LAZY) {
- return isolate->heap()->undefined_value();
- }
+} // namespace
- // Search for other activations of the same optimized code.
- // At this point {it} is at the topmost frame of all the frames materialized
- // by the deoptimizer. Note that this frame does not necessarily represent
- // an activation of {function} because of potential inlined tail-calls.
- ActivationsFinder activations_finder(*optimized_code);
- activations_finder.VisitFrames(&it);
- isolate->thread_manager()->IterateArchivedThreads(&activations_finder);
-
- if (!activations_finder.has_code_activations_) {
- Deoptimizer::UnlinkOptimizedCode(*optimized_code,
- function->context()->native_context());
-
- // Evict optimized code for this function from the cache so that it
- // doesn't get used for new closures.
- if (function->feedback_vector()->optimized_code() == *optimized_code) {
- function->ClearOptimizedCodeSlot("notify deoptimized");
- }
- } else {
- // TODO(titzer): we should probably do DeoptimizeCodeList(code)
- // unconditionally if the code is not already marked for deoptimization.
- // If there is an index by shared function info, all the better.
+RUNTIME_FUNCTION(Runtime_NotifyStubFailure) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
+ DCHECK(deoptimizer->compiled_code()->kind() == Code::OPTIMIZED_FUNCTION);
+ DCHECK(deoptimizer->compiled_code()->is_turbofanned());
+ MaterializeHeapObjectsAndDeleteDeoptimizer(isolate, deoptimizer);
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
+ DCHECK(deoptimizer->compiled_code()->kind() == Code::OPTIMIZED_FUNCTION);
+ DCHECK(deoptimizer->compiled_code()->is_turbofanned());
+
+ TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
+ TRACE_EVENT0("v8", "V8.DeoptimizeCode");
+ Handle<JSFunction> function = deoptimizer->function();
+ Deoptimizer::BailoutType type = deoptimizer->bailout_type();
+
+ MaterializeHeapObjectsAndDeleteDeoptimizer(isolate, deoptimizer);
+
+ // TODO(mstarzinger): The marking of the function for deoptimization is the
+ // only difference to {Runtime_NotifyStubFailure} by now and we should also
+ // do this if the top-most frame is a builtin stub to avoid deoptimization
+ // loops. This would also unify the two runtime functions.
+ if (type != Deoptimizer::LAZY) {
Deoptimizer::DeoptimizeFunction(*function);
}
@@ -335,7 +297,7 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
}
if (!function->IsOptimized()) {
- function->ReplaceCode(function->shared()->code());
+ function->set_code(function->shared()->code());
}
return NULL;
}
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 5d8852d943..9251fa3a7f 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -4,6 +4,8 @@
#include "src/runtime/runtime-utils.h"
+#include <vector>
+
#include "src/arguments.h"
#include "src/compiler.h"
#include "src/debug/debug-coverage.h"
@@ -19,8 +21,7 @@
#include "src/isolate-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/runtime/runtime.h"
-#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
namespace internal {
@@ -586,13 +587,13 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
}
}
- List<Handle<Object>> locals;
+ std::vector<Handle<Object>> locals;
// Fill in the values of the locals.
int i = 0;
for (; i < scope_info->StackLocalCount(); ++i) {
// Use the value from the stack.
if (ScopeInfo::VariableIsSynthetic(scope_info->LocalName(i))) continue;
- locals.Add(Handle<String>(scope_info->LocalName(i), isolate));
+ locals.emplace_back(scope_info->LocalName(i), isolate);
Handle<Object> value =
frame_inspector.GetExpression(scope_info->StackLocalIndex(i));
// TODO(yangguo): We convert optimized out values to {undefined} when they
@@ -600,9 +601,9 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
if (value->IsOptimizedOut(isolate)) {
value = isolate->factory()->undefined_value();
}
- locals.Add(value);
+ locals.push_back(value);
}
- if (locals.length() < local_count * 2) {
+ if (static_cast<int>(locals.size()) < local_count * 2) {
// Get the context containing declarations.
DCHECK(maybe_context->IsContext());
Handle<Context> context(Context::cast(*maybe_context)->closure_context());
@@ -613,11 +614,11 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
- locals.Add(name);
+ locals.push_back(name);
int context_slot_index = ScopeInfo::ContextSlotIndex(
scope_info, name, &mode, &init_flag, &maybe_assigned_flag);
Object* value = context->get(context_slot_index);
- locals.Add(Handle<Object>(value, isolate));
+ locals.emplace_back(value, isolate);
}
}
@@ -641,7 +642,8 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// information (except for what is collected above) is the same.
if ((inlined_frame_index == 0) &&
it.javascript_frame()->has_adapted_arguments()) {
- it.AdvanceToArgumentsFrame();
+ it.AdvanceOneFrame();
+ DCHECK(it.frame()->is_arguments_adaptor());
frame_inspector.SetArgumentsFrame(it.frame());
}
@@ -849,17 +851,18 @@ RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
}
FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
- List<Handle<JSObject>> result(4);
+ std::vector<Handle<JSObject>> result;
ScopeIterator it(isolate, &frame_inspector, option);
for (; !it.Done(); it.Next()) {
Handle<JSObject> details;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, details,
it.MaterializeScopeDetails());
- result.Add(details);
+ result.push_back(details);
}
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(result.length());
- for (int i = 0; i < result.length(); ++i) {
+ int result_size = static_cast<int>(result.size());
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(result_size);
+ for (int i = 0; i < result_size; ++i) {
array->set(i, *result[i]);
}
return *isolate->factory()->NewJSArrayWithElements(array);
@@ -1028,12 +1031,13 @@ RUNTIME_FUNCTION(Runtime_DebugPrintScopes) {
#ifdef DEBUG
// Print the scopes for the top frame.
- StackFrameLocator locator(isolate);
- JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- FrameInspector frame_inspector(frame, 0, isolate);
-
- for (ScopeIterator it(isolate, &frame_inspector); !it.Done(); it.Next()) {
- it.DebugPrint();
+ JavaScriptFrameIterator it(isolate);
+ if (!it.done()) {
+ JavaScriptFrame* frame = it.frame();
+ FrameInspector frame_inspector(frame, 0, isolate);
+ for (ScopeIterator si(isolate, &frame_inspector); !si.Done(); si.Next()) {
+ si.DebugPrint();
+ }
}
#endif
return isolate->heap()->undefined_value();
@@ -1298,7 +1302,7 @@ RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
CHECK(max_references >= 0);
- List<Handle<JSObject> > instances;
+ std::vector<Handle<JSObject>> instances;
Heap* heap = isolate->heap();
{
HeapIterator iterator(heap, HeapIterator::kFilterUnreachable);
@@ -1320,8 +1324,8 @@ RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
if (obj->IsJSGlobalObject()) {
obj = JSGlobalObject::cast(obj)->global_proxy();
}
- instances.Add(Handle<JSObject>(obj));
- if (instances.length() == max_references) break;
+ instances.emplace_back(obj);
+ if (static_cast<int32_t>(instances.size()) == max_references) break;
}
// Iterate the rest of the heap to satisfy HeapIterator constraints.
while (iterator.next()) {
@@ -1329,15 +1333,16 @@ RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
}
Handle<FixedArray> result;
- if (instances.length() == 1 && instances.last().is_identical_to(target)) {
+ if (instances.size() == 1 && instances.back().is_identical_to(target)) {
// Check for circular reference only. This can happen when the object is
// only referenced from mirrors and has a circular reference in which case
// the object is not really alive and would have been garbage collected if
// not referenced from the mirror.
result = isolate->factory()->empty_fixed_array();
} else {
- result = isolate->factory()->NewFixedArray(instances.length());
- for (int i = 0; i < instances.length(); ++i) result->set(i, *instances[i]);
+ int instances_size = static_cast<int>(instances.size());
+ result = isolate->factory()->NewFixedArray(instances_size);
+ for (int i = 0; i < instances_size; ++i) result->set(i, *instances[i]);
}
return *isolate->factory()->NewJSArrayWithElements(result);
}
@@ -1353,7 +1358,7 @@ RUNTIME_FUNCTION(Runtime_DebugConstructedBy) {
CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[1]);
CHECK(max_references >= 0);
- List<Handle<JSObject> > instances;
+ std::vector<Handle<JSObject>> instances;
Heap* heap = isolate->heap();
{
HeapIterator iterator(heap, HeapIterator::kFilterUnreachable);
@@ -1362,17 +1367,17 @@ RUNTIME_FUNCTION(Runtime_DebugConstructedBy) {
if (!heap_obj->IsJSObject()) continue;
JSObject* obj = JSObject::cast(heap_obj);
if (obj->map()->GetConstructor() != *constructor) continue;
- instances.Add(Handle<JSObject>(obj));
- if (instances.length() == max_references) break;
+ instances.emplace_back(obj);
+ if (static_cast<int32_t>(instances.size()) == max_references) break;
}
// Iterate the rest of the heap to satisfy HeapIterator constraints.
while (iterator.next()) {
}
}
- Handle<FixedArray> result =
- isolate->factory()->NewFixedArray(instances.length());
- for (int i = 0; i < instances.length(); ++i) result->set(i, *instances[i]);
+ int instances_size = static_cast<int>(instances.size());
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(instances_size);
+ for (int i = 0; i < instances_size; ++i) result->set(i, *instances[i]);
return *isolate->factory()->NewJSArrayWithElements(result);
}
@@ -1919,9 +1924,9 @@ RUNTIME_FUNCTION(Runtime_DebugCollectCoverage) {
// Collect coverage data.
std::unique_ptr<Coverage> coverage;
if (isolate->is_best_effort_code_coverage()) {
- coverage.reset(Coverage::CollectBestEffort(isolate));
+ coverage = Coverage::CollectBestEffort(isolate);
} else {
- coverage.reset(Coverage::CollectPrecise(isolate));
+ coverage = Coverage::CollectPrecise(isolate);
}
Factory* factory = isolate->factory();
// Turn the returned data structure into JavaScript.
@@ -1984,8 +1989,6 @@ RUNTIME_FUNCTION(Runtime_IncBlockCounter) {
CONVERT_ARG_CHECKED(JSFunction, function, 0);
CONVERT_SMI_ARG_CHECKED(coverage_array_slot_index, 1);
- DCHECK(FLAG_block_coverage);
-
// It's quite possible that a function contains IncBlockCounter bytecodes, but
// no coverage info exists. This happens e.g. by selecting the best-effort
// coverage collection mode, which triggers deletion of all coverage infos in
diff --git a/deps/v8/src/runtime/runtime-forin.cc b/deps/v8/src/runtime/runtime-forin.cc
index 08f6d86819..5c67372801 100644
--- a/deps/v8/src/runtime/runtime-forin.cc
+++ b/deps/v8/src/runtime/runtime-forin.cc
@@ -111,35 +111,6 @@ RUNTIME_FUNCTION(Runtime_ForInEnumerate) {
}
-RUNTIME_FUNCTION_RETURN_TRIPLE(Runtime_ForInPrepare) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
- Handle<Object> cache_type;
- if (!Enumerate(receiver).ToHandle(&cache_type)) {
- return MakeTriple(isolate->heap()->exception(), nullptr, nullptr);
- }
- Handle<FixedArray> cache_array;
- int cache_length;
- if (cache_type->IsMap()) {
- Handle<Map> cache_map = Handle<Map>::cast(cache_type);
- Handle<DescriptorArray> descriptors(cache_map->instance_descriptors(),
- isolate);
- cache_length = cache_map->EnumLength();
- if (cache_length && descriptors->HasEnumCache()) {
- cache_array = handle(descriptors->GetEnumCache(), isolate);
- } else {
- cache_array = isolate->factory()->empty_fixed_array();
- cache_length = 0;
- }
- } else {
- cache_array = Handle<FixedArray>::cast(cache_type);
- cache_length = cache_array->length();
- cache_type = handle(Smi::FromInt(1), isolate);
- }
- return MakeTriple(*cache_type, *cache_array, Smi::FromInt(cache_length));
-}
-
RUNTIME_FUNCTION(Runtime_ForInHasProperty) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -151,14 +122,5 @@ RUNTIME_FUNCTION(Runtime_ForInHasProperty) {
return isolate->heap()->ToBoolean(!result->IsUndefined(isolate));
}
-RUNTIME_FUNCTION(Runtime_ForInFilter) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- RETURN_RESULT_OR_FAILURE(isolate,
- HasEnumerableProperty(isolate, receiver, key));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index 1869584e44..bbb54404a1 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -9,7 +9,6 @@
#include "src/compiler.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
-#include "src/wasm/wasm-module.h"
namespace v8 {
namespace internal {
@@ -136,7 +135,7 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
// Set the code, scope info, formal parameter count, and the length
// of the target shared function info.
- target_shared->ReplaceCode(source_shared->code());
+ target_shared->set_code(source_shared->code());
if (source_shared->HasBytecodeArray()) {
target_shared->set_bytecode_array(source_shared->bytecode_array());
}
@@ -162,8 +161,7 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
SharedFunctionInfo::SetScript(target_shared, source_script);
// Set the code of the target function.
- target->ReplaceCode(source_shared->code());
- DCHECK(target->next_function_link()->IsUndefined(isolate));
+ target->set_code(source_shared->code());
Handle<Context> context(source->context());
target->set_context(*context);
@@ -206,18 +204,6 @@ RUNTIME_FUNCTION(Runtime_IsConstructor) {
return isolate->heap()->ToBoolean(object->IsConstructor());
}
-RUNTIME_FUNCTION(Runtime_SetForceInlineFlag) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, object, 0);
-
- if (object->IsJSFunction()) {
- JSFunction* func = JSFunction::cast(object);
- func->shared()->set_force_inline(true);
- }
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_Call) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index 4e1645887c..9323d236bc 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -20,7 +20,6 @@ RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
// Underlying function needs to have bytecode available.
DCHECK(function->shared()->HasBytecodeArray());
- DCHECK(!function->shared()->HasBaselineCode());
int size = function->shared()->bytecode_array()->register_count();
Handle<FixedArray> register_file = isolate->factory()->NewFixedArray(size);
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 9c92a54718..813a774611 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -17,7 +17,7 @@
#include "src/messages.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
-#include "src/wasm/wasm-module.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
@@ -503,6 +503,42 @@ RUNTIME_FUNCTION(Runtime_CreateListFromArrayLike) {
isolate, object, ElementTypes::kAll));
}
+RUNTIME_FUNCTION(Runtime_DeserializeLazy) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
+ DCHECK(FLAG_lazy_deserialization);
+
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ int builtin_id = shared->lazy_deserialization_builtin_id();
+
+ // At this point, the builtins table should definitely have DeserializeLazy
+ // set at the position of the target builtin. Also, we should never lazily
+ // deserialize DeserializeLazy.
+
+ DCHECK_NE(Builtins::kDeserializeLazy, builtin_id);
+ DCHECK_EQ(Builtins::kDeserializeLazy,
+ isolate->builtins()->builtin(builtin_id)->builtin_index());
+
+ // The DeserializeLazy builtin tail-calls the deserialized builtin. This only
+ // works with JS-linkage.
+ DCHECK(Builtins::IsLazy(builtin_id));
+ DCHECK_EQ(Builtins::TFJ, Builtins::KindOf(builtin_id));
+
+ if (FLAG_trace_lazy_deserialization) {
+ PrintF("Lazy-deserializing %s\n", Builtins::name(builtin_id));
+ }
+
+ Code* code = Snapshot::DeserializeBuiltin(isolate, builtin_id);
+ DCHECK_EQ(builtin_id, code->builtin_index());
+ DCHECK_EQ(code, isolate->builtins()->builtin(builtin_id));
+ shared->set_code(code);
+ function->set_code(code);
+
+ return code;
+}
+
RUNTIME_FUNCTION(Runtime_IncrementUseCounter) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -606,5 +642,14 @@ RUNTIME_FUNCTION(Runtime_CreateAsyncFromSyncIterator) {
Handle<JSReceiver>::cast(sync_iterator));
}
+RUNTIME_FUNCTION(Runtime_GetTemplateObject) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(TemplateObjectDescription, description, 0);
+
+ return *TemplateObjectDescription::GetTemplateObject(
+ description, isolate->native_context());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index e384c4872d..0dc15793a9 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -451,10 +451,9 @@ Handle<Object> InnerCreateBoilerplate(Isolate* isolate,
template <typename Boilerplate>
MaybeHandle<JSObject> CreateLiteral(Isolate* isolate,
- Handle<JSFunction> closure,
+ Handle<FeedbackVector> vector,
int literals_index,
Handle<HeapObject> description, int flags) {
- Handle<FeedbackVector> vector(closure->feedback_vector(), isolate);
FeedbackSlot literals_slot(FeedbackVector::ToSlot(literals_index));
CHECK(literals_slot.ToInt() < vector->length());
Handle<Object> literal_site(vector->Get(literals_slot), isolate);
@@ -521,36 +520,35 @@ MaybeHandle<JSObject> CreateLiteral(Isolate* isolate,
RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
+ CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(BoilerplateDescription, description, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
RETURN_RESULT_OR_FAILURE(
- isolate, CreateLiteral<ObjectBoilerplate>(
- isolate, closure, literals_index, description, flags));
+ isolate, CreateLiteral<ObjectBoilerplate>(isolate, vector, literals_index,
+ description, flags));
}
RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
+ CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(ConstantElementsPair, elements, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
RETURN_RESULT_OR_FAILURE(
- isolate, CreateLiteral<ArrayBoilerplate>(isolate, closure, literals_index,
+ isolate, CreateLiteral<ArrayBoilerplate>(isolate, vector, literals_index,
elements, flags));
}
RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
+ CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 0);
CONVERT_SMI_ARG_CHECKED(index, 1);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
- Handle<FeedbackVector> vector(closure->feedback_vector(), isolate);
FeedbackSlot literal_slot(FeedbackVector::ToSlot(index));
// Check if boilerplate exists. If not, create it first.
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index 72d2403c17..e8c1d00573 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -52,20 +52,7 @@ RUNTIME_FUNCTION(Runtime_StringParseInt) {
return isolate->heap()->nan_value();
}
- double result;
- {
- DisallowHeapAllocation no_gc;
- String::FlatContent flat = subject->GetFlatContent();
-
- if (flat.IsOneByte()) {
- result = StringToInt(isolate->unicode_cache(), flat.ToOneByteVector(),
- radix32);
- } else {
- result =
- StringToInt(isolate->unicode_cache(), flat.ToUC16Vector(), radix32);
- }
- }
-
+ double result = StringToInt(isolate, subject, radix32);
return *isolate->factory()->NewNumber(result);
}
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 3f4a104d44..4c8805eb25 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -9,6 +9,7 @@
#include "src/debug/debug.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
+#include "src/objects/property-descriptor-object.h"
#include "src/property-descriptor.h"
#include "src/runtime/runtime.h"
@@ -162,16 +163,23 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
if (details.location() == kField) {
isolate->heap()->NotifyObjectLayoutChange(*receiver, map->instance_size(),
no_allocation);
- Object* filler = isolate->heap()->one_pointer_filler_map();
FieldIndex index = FieldIndex::ForPropertyIndex(map, details.field_index());
- JSObject::cast(*receiver)->RawFastPropertyAtPut(index, filler);
- // We must clear any recorded slot for the deleted property, because
- // subsequent object modifications might put a raw double there.
- // Slot clearing is the reason why this entire function cannot currently
- // be implemented in the DeleteProperty stub.
- if (index.is_inobject() && !map->IsUnboxedDoubleField(index)) {
- isolate->heap()->ClearRecordedSlot(
- *receiver, HeapObject::RawField(*receiver, index.offset()));
+ // Special case deleting the last out-of object property.
+ if (!index.is_inobject() && index.outobject_array_index() == 0) {
+ DCHECK(!Map::cast(backpointer)->HasOutOfObjectProperties());
+ // Clear out the properties backing store.
+ receiver->SetProperties(isolate->heap()->empty_fixed_array());
+ } else {
+ Object* filler = isolate->heap()->one_pointer_filler_map();
+ JSObject::cast(*receiver)->RawFastPropertyAtPut(index, filler);
+ // We must clear any recorded slot for the deleted property, because
+ // subsequent object modifications might put a raw double there.
+ // Slot clearing is the reason why this entire function cannot currently
+ // be implemented in the DeleteProperty stub.
+ if (index.is_inobject() && !map->IsUnboxedDoubleField(index)) {
+ isolate->heap()->ClearRecordedSlot(
+ *receiver, HeapObject::RawField(*receiver, index.offset()));
+ }
}
}
// If the map was marked stable before, then there could be optimized code
@@ -181,6 +189,10 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
map->NotifyLeafMapLayoutChange();
// Finally, perform the map rollback.
receiver->synchronized_set_map(Map::cast(backpointer));
+#if VERIFY_HEAP
+ receiver->HeapObjectVerify();
+ receiver->property_array()->PropertyArrayVerify();
+#endif
return true;
}
@@ -467,7 +479,7 @@ RUNTIME_FUNCTION(Runtime_AddNamedProperty) {
LookupIterator it(object, name, object, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
if (!maybe.IsJust()) return isolate->heap()->exception();
- CHECK(!it.IsFound());
+ DCHECK(!it.IsFound());
#endif
RETURN_RESULT_OR_FAILURE(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
@@ -493,11 +505,11 @@ RUNTIME_FUNCTION(Runtime_AddElement) {
LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
if (!maybe.IsJust()) return isolate->heap()->exception();
- CHECK(!it.IsFound());
+ DCHECK(!it.IsFound());
if (object->IsJSArray()) {
Handle<JSArray> array = Handle<JSArray>::cast(object);
- CHECK(!JSArray::WouldChangeReadOnlyLength(array, index));
+ DCHECK(!JSArray::WouldChangeReadOnlyLength(array, index));
}
#endif
@@ -811,12 +823,14 @@ RUNTIME_FUNCTION(Runtime_CollectTypeProfile) {
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 2);
- DCHECK(FLAG_type_profile);
-
Handle<String> type = Object::TypeOf(isolate, value);
if (value->IsJSReceiver()) {
Handle<JSReceiver> object = Handle<JSReceiver>::cast(value);
type = JSReceiver::GetConstructorName(object);
+ } else if (value->IsNull(isolate)) {
+ // typeof(null) is object. But it's more user-friendly to annotate
+ // null as type "null".
+ type = Handle<String>(isolate->heap()->null_string());
}
DCHECK(vector->metadata()->HasTypeProfileSlot());
@@ -1198,5 +1212,21 @@ RUNTIME_FUNCTION(Runtime_IterableToListCanBeElided) {
return isolate->heap()->ToBoolean(!obj->IterationHasObservableEffects());
}
+RUNTIME_FUNCTION(Runtime_GetOwnPropertyDescriptor) {
+ HandleScope scope(isolate);
+
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+
+ PropertyDescriptor desc;
+ Maybe<bool> found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, object, name, &desc);
+ MAYBE_RETURN(found, isolate->heap()->exception());
+
+ if (!found.FromJust()) return isolate->heap()->undefined_value();
+ return *desc.ToPropertyDescriptorObject(isolate);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
index 7f8419940a..855f5360fe 100644
--- a/deps/v8/src/runtime/runtime-promise.cc
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -26,7 +26,7 @@ void PromiseRejectEvent(Isolate* isolate, Handle<JSPromise> promise,
// Report only if we don't actually have a handler.
if (!promise->has_handler()) {
- isolate->ReportPromiseReject(Handle<JSObject>::cast(promise), value,
+ isolate->ReportPromiseReject(promise, value,
v8::kPromiseRejectWithNoHandler);
}
}
@@ -58,8 +58,7 @@ RUNTIME_FUNCTION(Runtime_ReportPromiseReject) {
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- isolate->ReportPromiseReject(Handle<JSObject>::cast(promise), value,
- v8::kPromiseRejectWithNoHandler);
+ isolate->ReportPromiseReject(promise, value, v8::kPromiseRejectWithNoHandler);
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index 62c2e4070f..472cbdf79d 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -51,11 +51,11 @@ RUNTIME_FUNCTION(Runtime_GetPropertyWithReceiver) {
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 2);
- bool success;
- LookupIterator it = LookupIterator::PropertyOrElement(isolate, receiver, name,
+ bool success = false;
+ LookupIterator it = LookupIterator::PropertyOrElement(isolate, receiver, key,
&success, holder);
if (!success) {
DCHECK(isolate->has_pending_exception());
@@ -64,16 +64,41 @@ RUNTIME_FUNCTION(Runtime_GetPropertyWithReceiver) {
RETURN_RESULT_OR_FAILURE(isolate, Object::GetProperty(&it));
}
-RUNTIME_FUNCTION(Runtime_CheckProxyGetTrapResult) {
+RUNTIME_FUNCTION(Runtime_SetPropertyWithReceiver) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_EQ(5, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 3);
+ CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 4);
+
+ bool success = false;
+ LookupIterator it = LookupIterator::PropertyOrElement(isolate, receiver, key,
+ &success, holder);
+ if (!success) {
+ DCHECK(isolate->has_pending_exception());
+ return isolate->heap()->exception();
+ }
+ Maybe<bool> result = Object::SetSuperProperty(
+ &it, value, language_mode, Object::MAY_BE_STORE_FROM_KEYED);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+RUNTIME_FUNCTION(Runtime_CheckProxyGetSetTrapResult) {
+ HandleScope scope(isolate);
+
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(Name, name, 0);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, target, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, trap_result, 2);
+ CONVERT_NUMBER_CHECKED(int64_t, access_kind, Int64, args[3]);
- RETURN_RESULT_OR_FAILURE(
- isolate, JSProxy::CheckGetTrapResult(isolate, name, target, trap_result));
+ RETURN_RESULT_OR_FAILURE(isolate, JSProxy::CheckGetSetTrapResult(
+ isolate, name, target, trap_result,
+ JSProxy::AccessKind(access_kind)));
}
RUNTIME_FUNCTION(Runtime_CheckProxyHasTrap) {
@@ -83,10 +108,9 @@ RUNTIME_FUNCTION(Runtime_CheckProxyHasTrap) {
CONVERT_ARG_HANDLE_CHECKED(Name, name, 0);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, target, 1);
- RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
- isolate, JSProxy::CheckHasTrap(isolate, name, target),
- *isolate->factory()->undefined_value());
- return *isolate->factory()->undefined_value();
+ Maybe<bool> result = JSProxy::CheckHasTrap(isolate, name, target);
+ if (!result.IsJust()) return isolate->heap()->exception();
+ return isolate->heap()->ToBoolean(result.FromJust());
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 0ca8b5694f..bb4a6457c8 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -53,11 +53,9 @@ class CompiledReplacement {
explicit CompiledReplacement(Zone* zone)
: parts_(1, zone), replacement_substrings_(0, zone), zone_(zone) {}
- // Return whether the replacement is simple. Can also fail and return Nothing
- // if the given replacement string is invalid (and requires throwing a
- // SyntaxError).
- Maybe<bool> Compile(Handle<JSRegExp> regexp, Handle<String> replacement,
- int capture_count, int subject_length);
+ // Return whether the replacement is simple.
+ bool Compile(Handle<JSRegExp> regexp, Handle<String> replacement,
+ int capture_count, int subject_length);
// Use Apply only if Compile returned false.
void Apply(ReplacementStringBuilder* builder, int match_from, int match_to,
@@ -75,6 +73,7 @@ class CompiledReplacement {
SUBJECT_CAPTURE,
REPLACEMENT_SUBSTRING,
REPLACEMENT_STRING,
+ EMPTY_REPLACEMENT,
NUMBER_OF_PART_TYPES
};
@@ -94,9 +93,12 @@ class CompiledReplacement {
static inline ReplacementPart ReplacementString() {
return ReplacementPart(REPLACEMENT_STRING, 0);
}
+ static inline ReplacementPart EmptyReplacement() {
+ return ReplacementPart(EMPTY_REPLACEMENT, 0);
+ }
static inline ReplacementPart ReplacementSubString(int from, int to) {
- DCHECK(from >= 0);
- DCHECK(to > from);
+ DCHECK_LE(0, from);
+ DCHECK_GT(to, from);
return ReplacementPart(-from, to);
}
@@ -116,6 +118,7 @@ class CompiledReplacement {
// tag == REPLACEMENT_SUBSTRING ||
// tag == REPLACEMENT_STRING: data is index into array of substrings
// of the replacement string.
+ // tag == EMPTY_REPLACEMENT: data is unused.
// tag <= 0: Temporary representation of the substring of the replacement
// string ranging over -tag .. data.
// Is replaced by REPLACEMENT_{SUB,}STRING when we create the
@@ -124,11 +127,10 @@ class CompiledReplacement {
};
template <typename Char>
- Maybe<bool> ParseReplacementPattern(ZoneList<ReplacementPart>* parts,
- Vector<Char> characters,
- FixedArray* capture_name_map,
- int capture_count, int subject_length,
- Zone* zone) {
+ bool ParseReplacementPattern(ZoneList<ReplacementPart>* parts,
+ Vector<Char> characters,
+ FixedArray* capture_name_map, int capture_count,
+ int subject_length, Zone* zone) {
// Equivalent to String::GetSubstitution, except that this method converts
// the replacement string into an internal representation that avoids
// repeated parsing when used repeatedly.
@@ -228,8 +230,8 @@ class CompiledReplacement {
break;
}
- // Scan until the next '>', throwing a SyntaxError exception if one
- // is not found, and let the enclosed substring be groupName.
+ // Scan until the next '>', and let the enclosed substring be the
+ // groupName.
const int name_start_index = next_index + 1;
int closing_bracket_index = -1;
@@ -240,8 +242,12 @@ class CompiledReplacement {
}
}
- // Throw a SyntaxError for invalid replacement strings.
- if (closing_bracket_index == -1) return Nothing<bool>();
+ // If no closing bracket is found, '$<' is treated as a string
+ // literal.
+ if (closing_bracket_index == -1) {
+ i = next_index;
+ break;
+ }
Vector<Char> requested_name =
characters.SubVector(name_start_index, closing_bracket_index);
@@ -254,21 +260,21 @@ class CompiledReplacement {
},
capture_name_map);
- // If ? HasProperty(_namedCaptures_, _groupName_) is *false*, throw
- // a *SyntaxError* exception.
- if (capture_index == -1) return Nothing<bool>();
-
- // If capture is undefined, replace the text through the following
- // '>' with the empty string.
+ // If capture is undefined or does not exist, replace the text
+ // through the following '>' with the empty string.
// Otherwise, replace the text through the following '>' with
// ? ToString(capture).
- DCHECK(1 <= capture_index && capture_index <= capture_count);
+ DCHECK(capture_index == -1 ||
+ (1 <= capture_index && capture_index <= capture_count));
if (i > last) {
parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
}
- parts->Add(ReplacementPart::SubjectCapture(capture_index), zone);
+ parts->Add((capture_index == -1)
+ ? ReplacementPart::EmptyReplacement()
+ : ReplacementPart::SubjectCapture(capture_index),
+ zone);
last = closing_bracket_index + 1;
i = closing_bracket_index;
break;
@@ -282,12 +288,12 @@ class CompiledReplacement {
if (length > last) {
if (last == 0) {
// Replacement is simple. Do not use Apply to do the replacement.
- return Just(true);
+ return true;
} else {
parts->Add(ReplacementPart::ReplacementSubString(last, length), zone);
}
}
- return Just(false);
+ return false;
}
ZoneList<ReplacementPart> parts_;
@@ -295,10 +301,9 @@ class CompiledReplacement {
Zone* zone_;
};
-Maybe<bool> CompiledReplacement::Compile(Handle<JSRegExp> regexp,
- Handle<String> replacement,
- int capture_count,
- int subject_length) {
+bool CompiledReplacement::Compile(Handle<JSRegExp> regexp,
+ Handle<String> replacement, int capture_count,
+ int subject_length) {
{
DisallowHeapAllocation no_gc;
String::FlatContent content = replacement->GetFlatContent();
@@ -314,7 +319,7 @@ Maybe<bool> CompiledReplacement::Compile(Handle<JSRegExp> regexp,
}
}
- Maybe<bool> simple = Nothing<bool>();
+ bool simple;
if (content.IsOneByte()) {
simple = ParseReplacementPattern(&parts_, content.ToOneByteVector(),
capture_name_map, capture_count,
@@ -325,7 +330,7 @@ Maybe<bool> CompiledReplacement::Compile(Handle<JSRegExp> regexp,
capture_name_map, capture_count,
subject_length, zone());
}
- if (simple.IsNothing() || simple.FromJust()) return simple;
+ if (simple) return true;
}
Isolate* isolate = replacement->GetIsolate();
@@ -347,7 +352,7 @@ Maybe<bool> CompiledReplacement::Compile(Handle<JSRegExp> regexp,
substring_index++;
}
}
- return Just(false);
+ return false;
}
@@ -380,6 +385,8 @@ void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
case REPLACEMENT_STRING:
builder->AddString(replacement_substrings_[part.data]);
break;
+ case EMPTY_REPLACEMENT:
+ break;
default:
UNREACHABLE();
}
@@ -388,7 +395,7 @@ void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
void FindOneByteStringIndices(Vector<const uint8_t> subject, uint8_t pattern,
std::vector<int>* indices, unsigned int limit) {
- DCHECK(limit > 0);
+ DCHECK_LT(0, limit);
// Collect indices of pattern in subject using memchr.
// Stop after finding at most limit values.
const uint8_t* subject_start = subject.start();
@@ -406,7 +413,7 @@ void FindOneByteStringIndices(Vector<const uint8_t> subject, uint8_t pattern,
void FindTwoByteStringIndices(const Vector<const uc16> subject, uc16 pattern,
std::vector<int>* indices, unsigned int limit) {
- DCHECK(limit > 0);
+ DCHECK_LT(0, limit);
const uc16* subject_start = subject.start();
const uc16* subject_end = subject_start + subject.length();
for (const uc16* pos = subject_start; pos < subject_end && limit > 0; pos++) {
@@ -421,7 +428,7 @@ template <typename SubjectChar, typename PatternChar>
void FindStringIndices(Isolate* isolate, Vector<const SubjectChar> subject,
Vector<const PatternChar> pattern,
std::vector<int>* indices, unsigned int limit) {
- DCHECK(limit > 0);
+ DCHECK_LT(0, limit);
// Collect indices of pattern in subject.
// Stop after finding at most limit values.
int pattern_length = pattern.length();
@@ -608,15 +615,8 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithString(
// CompiledReplacement uses zone allocation.
Zone zone(isolate->allocator(), ZONE_NAME);
CompiledReplacement compiled_replacement(&zone);
- Maybe<bool> maybe_simple_replace = compiled_replacement.Compile(
+ const bool simple_replace = compiled_replacement.Compile(
regexp, replacement, capture_count, subject_length);
- if (maybe_simple_replace.IsNothing()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewSyntaxError(MessageTemplate::kRegExpInvalidReplaceString,
- replacement));
- }
-
- const bool simple_replace = maybe_simple_replace.FromJust();
// Shortcut for simple non-regexp global replacements
if (typeTag == JSRegExp::ATOM && simple_replace) {
@@ -827,11 +827,11 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
- CHECK(limit > 0);
+ CHECK_LT(0, limit);
int subject_length = subject->length();
int pattern_length = pattern->length();
- CHECK(pattern_length > 0);
+ CHECK_LT(0, pattern_length);
if (limit == 0xffffffffu) {
FixedArray* last_match_cache_unused;
@@ -938,8 +938,8 @@ RUNTIME_FUNCTION(Runtime_RegExpExec) {
CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 3);
// Due to the way the JS calls are constructed this must be less than the
// length of a string, i.e. it is always a Smi. We check anyway for security.
- CHECK(index >= 0);
- CHECK(index <= subject->length());
+ CHECK_LE(0, index);
+ CHECK_GE(subject->length(), index);
isolate->counters()->regexp_entry_runtime()->Increment();
RETURN_RESULT_OR_FAILURE(
isolate, RegExpImpl::Exec(regexp, subject, index, last_match_info));
@@ -1262,7 +1262,7 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
isolate->factory()->NewSubString(subject, start, end);
elements->set(cursor++, *substring);
} else {
- DCHECK(current_match[i * 2 + 1] < 0);
+ DCHECK_GT(0, current_match[i * 2 + 1]);
elements->set(cursor++, isolate->heap()->undefined_value());
}
}
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 007dd712c4..d4bfceb257 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -387,7 +387,10 @@ std::unique_ptr<Handle<Object>[]> GetCallerArguments(Isolate* isolate,
return param_data;
} else {
- it.AdvanceToArgumentsFrame();
+ if (it.frame()->has_adapted_arguments()) {
+ it.AdvanceOneFrame();
+ DCHECK(it.frame()->is_arguments_adaptor());
+ }
frame = it.frame();
int args_count = frame->ComputeParametersCount();
@@ -607,15 +610,20 @@ RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
RUNTIME_FUNCTION(Runtime_NewArgumentsElements) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+ DCHECK_EQ(3, args.length());
Object** frame = reinterpret_cast<Object**>(args[0]);
CONVERT_SMI_ARG_CHECKED(length, 1);
+ CONVERT_SMI_ARG_CHECKED(mapped_count, 2);
Handle<FixedArray> result =
isolate->factory()->NewUninitializedFixedArray(length);
int const offset = length + 1;
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- for (int index = 0; index < length; ++index) {
+ int number_of_holes = Min(mapped_count, length);
+ for (int index = 0; index < number_of_holes; ++index) {
+ result->set_the_hole(isolate, index);
+ }
+ for (int index = number_of_holes; index < length; ++index) {
result->set(index, frame[offset - index], mode);
}
return *result;
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index b5c09b94a9..1382362cce 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -45,8 +45,6 @@ RUNTIME_FUNCTION(Runtime_GetSubstitution) {
MaybeHandle<String> GetNamedCapture(Handle<String> name,
CaptureState* state) override {
UNREACHABLE();
- *state = INVALID;
- return MaybeHandle<String>();
}
private:
@@ -137,6 +135,15 @@ RUNTIME_FUNCTION(Runtime_StringReplaceOneCharWithString) {
return isolate->StackOverflow();
}
+RUNTIME_FUNCTION(Runtime_StringTrim) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<String> string = args.at<String>(0);
+ CONVERT_SMI_ARG_CHECKED(mode, 1);
+ String::TrimMode trim_mode = static_cast<String::TrimMode>(mode);
+ return *String::Trim(string, trim_mode);
+}
+
// ES6 #sec-string.prototype.includes
// String.prototype.includes(searchString [, position])
RUNTIME_FUNCTION(Runtime_StringIncludes) {
@@ -258,7 +265,7 @@ RUNTIME_FUNCTION(Runtime_InternalizeString) {
return *isolate->factory()->InternalizeString(string);
}
-RUNTIME_FUNCTION(Runtime_StringCharCodeAtRT) {
+RUNTIME_FUNCTION(Runtime_StringCharCodeAt) {
HandleScope handle_scope(isolate);
DCHECK_EQ(2, args.length());
@@ -771,15 +778,6 @@ RUNTIME_FUNCTION(Runtime_StringCharFromCode) {
return isolate->heap()->empty_string();
}
-RUNTIME_FUNCTION(Runtime_StringCharCodeAt) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(2, args.length());
- if (!args[0]->IsString()) return isolate->heap()->undefined_value();
- if (!args[1]->IsNumber()) return isolate->heap()->undefined_value();
- if (std::isinf(args.number_at(1))) return isolate->heap()->nan_value();
- return __RT_impl_Runtime_StringCharCodeAtRT(args, isolate);
-}
-
RUNTIME_FUNCTION(Runtime_StringMaxLength) {
SealHandleScope shs(isolate);
return Smi::FromInt(String::kMaxLength);
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 145059c37e..19a4af50d1 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -6,6 +6,7 @@
#include <memory>
+#include "src/api.h"
#include "src/arguments.h"
#include "src/assembler-inl.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
@@ -16,8 +17,9 @@
#include "src/runtime-profiler.h"
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/natives.h"
+#include "src/wasm/memory-tracing.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
namespace {
struct WasmCompileControls {
@@ -185,10 +187,6 @@ RUNTIME_FUNCTION(Runtime_TypeProfile) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- if (!FLAG_type_profile) {
- return isolate->heap()->undefined_value();
- }
-
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
if (function->has_feedback_vector()) {
FeedbackVector* vector = function->feedback_vector();
@@ -217,11 +215,10 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
}
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
- // The following condition was lifted from the DCHECK inside
+ // The following conditions were lifted (in part) from the DCHECK inside
// JSFunction::MarkForOptimization().
- if (!(function->shared()->allows_lazy_compilation() ||
- (function->code()->kind() == Code::FUNCTION &&
- !function->shared()->optimization_disabled()))) {
+
+ if (!function->shared()->allows_lazy_compilation()) {
return isolate->heap()->undefined_value();
}
@@ -240,7 +237,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
if (function->HasOptimizedCode()) {
if (!function->IsInterpreted()) {
// For non I+TF path, install a shim which checks the optimization marker.
- function->ReplaceCode(
+ function->set_code(
isolate->builtins()->builtin(Builtins::kCheckOptimizationMarker));
}
DCHECK(function->ChecksOptimizationMarker());
@@ -301,8 +298,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
}
// Make the profiler arm all back edges in unoptimized code.
- if (it.frame()->type() == StackFrame::JAVA_SCRIPT ||
- it.frame()->type() == StackFrame::INTERPRETED) {
+ if (it.frame()->type() == StackFrame::INTERPRETED) {
isolate->runtime_profiler()->AttemptOnStackReplacement(
it.frame(), AbstractCode::kMaxLoopNestingMarker);
}
@@ -856,10 +852,15 @@ bool DisallowCodegenFromStringsCallback(v8::Local<v8::Context> context,
RUNTIME_FUNCTION(Runtime_DisallowCodegenFromStrings) {
SealHandleScope shs(isolate);
- DCHECK_EQ(0, args.length());
+ DCHECK_EQ(1, args.length());
+ CONVERT_BOOLEAN_ARG_CHECKED(flag, 0);
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- v8_isolate->SetAllowCodeGenerationFromStringsCallback(
- DisallowCodegenFromStringsCallback);
+ if (flag) {
+ v8_isolate->SetAllowCodeGenerationFromStringsCallback(
+ DisallowCodegenFromStringsCallback);
+ } else {
+ v8_isolate->SetAllowCodeGenerationFromStringsCallback(nullptr);
+ }
return isolate->heap()->undefined_value();
}
@@ -871,12 +872,27 @@ RUNTIME_FUNCTION(Runtime_IsWasmCode) {
return isolate->heap()->ToBoolean(is_js_to_wasm);
}
+RUNTIME_FUNCTION(Runtime_IsWasmTrapHandlerEnabled) {
+ DisallowHeapAllocation no_gc;
+ DCHECK_EQ(0, args.length());
+ bool is_enabled = trap_handler::UseTrapHandler();
+ return isolate->heap()->ToBoolean(is_enabled);
+}
+
+RUNTIME_FUNCTION(Runtime_GetWasmRecoveredTrapCount) {
+ HandleScope shs(isolate);
+ DCHECK_EQ(0, args.length());
+ size_t trap_count = trap_handler::GetRecoveredTrapCount();
+ return *isolate->factory()->NewNumberFromSize(trap_count);
+}
+
#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
RUNTIME_FUNCTION(Runtime_Has##Name) { \
CONVERT_ARG_CHECKED(JSObject, obj, 0); \
return isolate->heap()->ToBoolean(obj->Has##Name()); \
}
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SmiElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ObjectElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SmiOrObjectElements)
@@ -967,8 +983,8 @@ RUNTIME_FUNCTION(Runtime_ValidateWasmInstancesChain) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Smi, instance_count, 1);
- wasm::testing::ValidateInstancesChain(isolate, module_obj,
- instance_count->value());
+ WasmInstanceObject::ValidateInstancesChainForTesting(isolate, module_obj,
+ instance_count->value());
return isolate->heap()->ToBoolean(true);
}
@@ -976,7 +992,7 @@ RUNTIME_FUNCTION(Runtime_ValidateWasmModuleState) {
HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
- wasm::testing::ValidateModuleState(isolate, module_obj);
+ WasmModuleObject::ValidateStateForTesting(isolate, module_obj);
return isolate->heap()->ToBoolean(true);
}
@@ -984,7 +1000,7 @@ RUNTIME_FUNCTION(Runtime_ValidateWasmOrphanedInstance) {
HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- wasm::testing::ValidateOrphanedInstance(isolate, instance);
+ WasmInstanceObject::ValidateOrphanedInstanceForTesting(isolate, instance);
return isolate->heap()->ToBoolean(true);
}
@@ -1026,5 +1042,45 @@ RUNTIME_FUNCTION(Runtime_RedirectToWasmInterpreter) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
+ HandleScope hs(isolate);
+ DCHECK_EQ(4, args.length());
+ CONVERT_SMI_ARG_CHECKED(is_store, 0);
+ CONVERT_SMI_ARG_CHECKED(mem_rep, 1);
+ CONVERT_SMI_ARG_CHECKED(addr_low, 2);
+ CONVERT_SMI_ARG_CHECKED(addr_high, 3);
+
+ // Find the caller wasm frame.
+ StackTraceFrameIterator it(isolate);
+ DCHECK(!it.done());
+ DCHECK(it.is_wasm());
+ WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
+
+ uint32_t addr = (static_cast<uint32_t>(addr_low) & 0xffff) |
+ (static_cast<uint32_t>(addr_high) << 16);
+ uint8_t* mem_start = reinterpret_cast<uint8_t*>(
+ frame->wasm_instance()->memory_buffer()->allocation_base());
+ int func_index = frame->function_index();
+ int pos = frame->position();
+ // TODO(titzer): eliminate dependency on WasmModule definition here.
+ int func_start =
+ frame->wasm_instance()->module()->functions[func_index].code.offset();
+ tracing::TraceMemoryOperation(tracing::kWasmCompiled, is_store,
+ MachineRepresentation(mem_rep), addr,
+ func_index, pos - func_start, mem_start);
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTracking) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ object->map()->CompleteInobjectSlackTracking();
+
+ return isolate->heap()->undefined_value();
+}
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 63a83383bf..8dfa8f166c 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -55,17 +55,15 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
RUNTIME_FUNCTION(Runtime_TypedArrayCopyElements) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, destination, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, target, 0);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, source, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(length_obj, 2);
size_t length;
CHECK(TryNumberToSize(*length_obj, &length));
- Handle<JSTypedArray> destination_ta = Handle<JSTypedArray>::cast(destination);
-
- ElementsAccessor* accessor = destination_ta->GetElementsAccessor();
- return accessor->CopyElements(source, destination, length);
+ ElementsAccessor* accessor = target->GetElementsAccessor();
+ return accessor->CopyElements(source, target, length);
}
#define BUFFER_VIEW_GETTER(Type, getter, accessor) \
@@ -96,196 +94,6 @@ RUNTIME_FUNCTION(Runtime_TypedArrayGetBuffer) {
}
-// Return codes for Runtime_TypedArraySetFastCases.
-// Should be synchronized with typedarray.js natives.
-enum TypedArraySetResultCodes {
- // Set from typed array of the same type.
- // This is processed by TypedArraySetFastCases
- TYPED_ARRAY_SET_TYPED_ARRAY_SAME_TYPE = 0,
- // Set from typed array of the different type, overlapping in memory.
- TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING = 1,
- // Set from typed array of the different type, non-overlapping.
- TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING = 2,
- // Set from non-typed array.
- TYPED_ARRAY_SET_NON_TYPED_ARRAY = 3
-};
-
-// TypedArraySetFromArrayLike(target, source, source_length, offset);
-RUNTIME_FUNCTION(Runtime_TypedArraySetFromArrayLike) {
- HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, source, 1);
-
- CONVERT_INT32_ARG_CHECKED(source_length, 2);
- DCHECK_GE(source_length, 0);
-
- CONVERT_INT32_ARG_CHECKED(offset, 3);
- DCHECK_GE(offset, 0);
-
- for (int i = 0; i < source_length; i++) {
- Handle<Object> value;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
- Object::GetElement(isolate, source, i));
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, value,
- Object::SetElement(isolate, target, offset + i, value,
- LanguageMode::STRICT));
- }
-
- return *target;
-}
-
-// TypedArraySetFromOverlappingTypedArray(target, source, offset);
-RUNTIME_FUNCTION(Runtime_TypedArraySetFromOverlapping) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, source, 1);
-
- CONVERT_INT32_ARG_CHECKED(offset, 2);
- DCHECK_GE(offset, 0);
-
- size_t sourceElementSize = source->element_size();
- size_t targetElementSize = target->element_size();
-
- uint32_t source_length = source->length_value();
- if (source_length == 0) return *target;
-
- // Copy left part.
-
- // First un-mutated byte after the next write
- uint32_t target_ptr = 0;
- CHECK(target->byte_offset()->ToUint32(&target_ptr));
- target_ptr += (offset + 1) * targetElementSize;
-
- // Next read at sourcePtr. We do not care for memory changing before
- // sourcePtr - we have already copied it.
- uint32_t source_ptr = 0;
- CHECK(source->byte_offset()->ToUint32(&source_ptr));
-
- uint32_t left_index;
- for (left_index = 0; left_index < source_length && target_ptr <= source_ptr;
- left_index++) {
- Handle<Object> value;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, value, Object::GetElement(isolate, source, left_index));
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, value,
- Object::SetElement(isolate, target, offset + left_index, value,
- LanguageMode::STRICT));
-
- target_ptr += targetElementSize;
- source_ptr += sourceElementSize;
- }
-
- // Copy right part;
- // First unmutated byte before the next write
- CHECK(target->byte_offset()->ToUint32(&target_ptr));
- target_ptr += (offset + source_length - 1) * targetElementSize;
-
- // Next read before sourcePtr. We do not care for memory changing after
- // sourcePtr - we have already copied it.
- CHECK(target->byte_offset()->ToUint32(&source_ptr));
- source_ptr += source_length * sourceElementSize;
-
- uint32_t right_index;
- DCHECK_GE(source_length, 1);
- for (right_index = source_length - 1;
- right_index > left_index && target_ptr >= source_ptr; right_index--) {
- Handle<Object> value;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, value, Object::GetElement(isolate, source, right_index));
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, value,
- Object::SetElement(isolate, target, offset + right_index, value,
- LanguageMode::STRICT));
-
- target_ptr -= targetElementSize;
- source_ptr -= sourceElementSize;
- }
-
- std::vector<Handle<Object>> temp(right_index + 1 - left_index);
-
- for (uint32_t i = left_index; i <= right_index; i++) {
- Handle<Object> value;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
- Object::GetElement(isolate, source, i));
- temp[i - left_index] = value;
- }
-
- for (uint32_t i = left_index; i <= right_index; i++) {
- Handle<Object> value;
-
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, value,
- Object::SetElement(isolate, target, offset + i, temp[i - left_index],
- LanguageMode::STRICT));
- }
-
- return *target;
-}
-
-RUNTIME_FUNCTION(Runtime_TypedArraySetFastCases) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- if (!args[0]->IsJSTypedArray()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotTypedArray));
- }
-
- if (!args[1]->IsJSTypedArray()) {
- return Smi::FromInt(TYPED_ARRAY_SET_NON_TYPED_ARRAY);
- }
-
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, source, 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset_obj, 2);
-
- size_t offset = 0;
- CHECK(TryNumberToSize(*offset_obj, &offset));
- size_t target_length = target->length_value();
- size_t source_length = source->length_value();
- size_t target_byte_length = NumberToSize(target->byte_length());
- size_t source_byte_length = NumberToSize(source->byte_length());
- if (offset > target_length || offset + source_length > target_length ||
- offset + source_length < offset) { // overflow
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kTypedArraySetSourceTooLarge));
- }
-
- size_t target_offset = NumberToSize(target->byte_offset());
- size_t source_offset = NumberToSize(source->byte_offset());
- uint8_t* target_base =
- static_cast<uint8_t*>(target->GetBuffer()->backing_store()) +
- target_offset;
- uint8_t* source_base =
- static_cast<uint8_t*>(source->GetBuffer()->backing_store()) +
- source_offset;
-
- // Typed arrays of the same type: use memmove.
- if (target->type() == source->type()) {
- memmove(target_base + offset * target->element_size(), source_base,
- source_byte_length);
- return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_SAME_TYPE);
- }
-
- // Typed arrays of different types over the same backing store
- if ((source_base <= target_base &&
- source_base + source_byte_length > target_base) ||
- (target_base <= source_base &&
- target_base + target_byte_length > source_base)) {
- // We do not support overlapping ArrayBuffers
- DCHECK(target->GetBuffer()->backing_store() ==
- source->GetBuffer()->backing_store());
- return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING);
- } else { // Non-overlapping typed arrays
- return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING);
- }
-}
-
namespace {
template <typename T>
@@ -348,14 +156,6 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
return *array;
}
-RUNTIME_FUNCTION(Runtime_TypedArrayMaxSizeInHeap) {
- DCHECK_EQ(0, args.length());
- DCHECK_OBJECT_SIZE(FLAG_typed_array_max_size_in_heap +
- FixedTypedArrayBase::kDataOffset);
- return Smi::FromInt(FLAG_typed_array_max_size_in_heap);
-}
-
-
RUNTIME_FUNCTION(Runtime_IsTypedArray) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -414,5 +214,148 @@ RUNTIME_FUNCTION(Runtime_TypedArraySpeciesCreateByLength) {
return *result_array;
}
+namespace {
+
+Object* TypedArraySetFromOverlapping(Isolate* isolate,
+ Handle<JSTypedArray> target,
+ Handle<JSTypedArray> source,
+ uint32_t offset) {
+#ifdef DEBUG
+ Handle<FixedTypedArrayBase> source_elements(
+ FixedTypedArrayBase::cast(source->elements()));
+ Handle<FixedTypedArrayBase> target_elements(
+ FixedTypedArrayBase::cast(target->elements()));
+ uint8_t* source_data = static_cast<uint8_t*>(source_elements->DataPtr());
+ uint8_t* target_data = static_cast<uint8_t*>(target_elements->DataPtr());
+ size_t source_byte_length = NumberToSize(source->byte_length());
+ size_t target_byte_length = NumberToSize(target->byte_length());
+
+ CHECK_LE(offset + source->length(), target->length());
+ CHECK_GE(target->length(), source->length());
+ CHECK(source->length()->IsSmi());
+
+ CHECK(!target->WasNeutered());
+ CHECK(!source->WasNeutered());
+
+ // Assert that target and source in fact overlapping.
+ CHECK(target_data + target_byte_length > source_data &&
+ source_data + source_byte_length > target_data);
+#endif
+
+ size_t sourceElementSize = source->element_size();
+ size_t targetElementSize = target->element_size();
+
+ uint32_t source_length = source->length_value();
+ if (source_length == 0) return isolate->heap()->undefined_value();
+
+ // Copy left part.
+
+ // First un-mutated byte after the next write
+ uint32_t target_ptr = 0;
+ CHECK(target->byte_offset()->ToUint32(&target_ptr));
+ target_ptr += (offset + 1) * targetElementSize;
+
+ // Next read at sourcePtr. We do not care for memory changing before
+ // sourcePtr - we have already copied it.
+ uint32_t source_ptr = 0;
+ CHECK(source->byte_offset()->ToUint32(&source_ptr));
+
+ ElementsAccessor* source_accessor = source->GetElementsAccessor();
+ ElementsAccessor* target_accessor = target->GetElementsAccessor();
+
+ uint32_t left_index;
+ for (left_index = 0; left_index < source_length && target_ptr <= source_ptr;
+ left_index++) {
+ Handle<Object> value = source_accessor->Get(source, left_index);
+ target_accessor->Set(target, offset + left_index, *value);
+
+ target_ptr += targetElementSize;
+ source_ptr += sourceElementSize;
+ }
+
+ // Copy right part;
+ // First unmutated byte before the next write
+ CHECK(target->byte_offset()->ToUint32(&target_ptr));
+ target_ptr += (offset + source_length - 1) * targetElementSize;
+
+ // Next read before sourcePtr. We do not care for memory changing after
+ // sourcePtr - we have already copied it.
+ CHECK(target->byte_offset()->ToUint32(&source_ptr));
+ source_ptr += source_length * sourceElementSize;
+
+ uint32_t right_index;
+ DCHECK_GE(source_length, 1);
+ for (right_index = source_length - 1;
+ right_index > left_index && target_ptr >= source_ptr; right_index--) {
+ Handle<Object> value = source_accessor->Get(source, right_index);
+ target_accessor->Set(target, offset + right_index, *value);
+
+ target_ptr -= targetElementSize;
+ source_ptr -= sourceElementSize;
+ }
+
+ std::vector<Handle<Object>> temp(right_index + 1 - left_index);
+
+ for (uint32_t i = left_index; i <= right_index; i++) {
+ temp[i - left_index] = source_accessor->Get(source, i);
+ }
+
+ for (uint32_t i = left_index; i <= right_index; i++) {
+ target_accessor->Set(target, offset + i, *temp[i - left_index]);
+ }
+
+ return isolate->heap()->undefined_value();
+}
+
+} // namespace
+
+// 22.2.3.23 %TypedArray%.prototype.set ( overloaded [ , offset ] )
+RUNTIME_FUNCTION(Runtime_TypedArraySet) {
+ HandleScope scope(isolate);
+ Handle<JSTypedArray> target = args.at<JSTypedArray>(0);
+ Handle<Object> obj = args.at(1);
+ Handle<Smi> offset = args.at<Smi>(2);
+
+ DCHECK(!target->WasNeutered()); // Checked in TypedArrayPrototypeSet.
+ DCHECK_LE(0, offset->value());
+
+ const uint32_t uint_offset = static_cast<uint32_t>(offset->value());
+
+ if (obj->IsNumber()) {
+ // For number as a first argument, throw TypeError
+ // instead of silently ignoring the call, so that
+ // users know they did something wrong.
+ // (Consistent with Firefox and Blink/WebKit)
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kInvalidArgument));
+ } else if (obj->IsJSTypedArray()) {
+ // The non-overlapping case is handled in CSA.
+ Handle<JSTypedArray> source = Handle<JSTypedArray>::cast(obj);
+ return TypedArraySetFromOverlapping(isolate, target, source, uint_offset);
+ }
+
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, obj,
+ Object::ToObject(isolate, obj));
+
+ Handle<Object> len;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, len,
+ Object::GetProperty(obj, isolate->factory()->length_string()));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, len,
+ Object::ToLength(isolate, len));
+
+ if (uint_offset + len->Number() > target->length_value()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kTypedArraySetSourceTooLarge));
+ }
+
+ uint32_t int_l;
+ CHECK(DoubleToUint32IfEqualToSelf(len->Number(), &int_l));
+
+ Handle<JSReceiver> source = Handle<JSReceiver>::cast(obj);
+ ElementsAccessor* accessor = target->GetElementsAccessor();
+ return accessor->CopyElements(source, target, int_l, uint_offset);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-utils.h b/deps/v8/src/runtime/runtime-utils.h
index 8c7714a0f6..4218510a26 100644
--- a/deps/v8/src/runtime/runtime-utils.h
+++ b/deps/v8/src/runtime/runtime-utils.h
@@ -152,22 +152,6 @@ static inline ObjectPair MakePair(Object* x, Object* y) {
}
#endif
-
-// A mechanism to return a triple of Object pointers. In all calling
-// conventions, a struct of two pointers is returned in memory,
-// allocated by the caller, and passed as a pointer in a hidden first parameter.
-struct ObjectTriple {
- Object* x;
- Object* y;
- Object* z;
-};
-
-static inline ObjectTriple MakeTriple(Object* x, Object* y, Object* z) {
- ObjectTriple result = {x, y, z};
- // ObjectTriple is assigned to a hidden first argument.
- return result;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index d959523eb4..8ed4e7c57d 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -15,7 +15,7 @@
#include "src/objects/frame-array-inl.h"
#include "src/trap-handler/trap-handler.h"
#include "src/v8memory.h"
-#include "src/wasm/wasm-module.h"
+#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-opcodes.h"
@@ -24,8 +24,6 @@ namespace internal {
namespace {
-constexpr int kInvalidExceptionTag = -1;
-
WasmInstanceObject* GetWasmInstanceOnStackTop(Isolate* isolate) {
DisallowHeapAllocation no_allocation;
const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
@@ -33,7 +31,8 @@ WasmInstanceObject* GetWasmInstanceOnStackTop(Isolate* isolate) {
Memory::Address_at(entry + StandardFrameConstants::kCallerPCOffset);
Code* code = isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
DCHECK_EQ(Code::WASM_FUNCTION, code->kind());
- WasmInstanceObject* owning_instance = wasm::GetOwningWasmInstance(code);
+ WasmInstanceObject* owning_instance =
+ WasmInstanceObject::GetOwningInstance(code);
CHECK_NOT_NULL(owning_instance);
return owning_instance;
}
@@ -44,14 +43,6 @@ Context* GetWasmContextOnStackTop(Isolate* isolate) {
}
} // namespace
-RUNTIME_FUNCTION(Runtime_WasmMemorySize) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
-
- int32_t mem_size = GetWasmInstanceOnStackTop(isolate)->GetMemorySize();
- return *isolate->factory()->NewNumberFromInt(mem_size);
-}
-
RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -94,7 +85,7 @@ Object* ThrowRuntimeError(Isolate* isolate, int message_id, int byte_offset,
Handle<FrameArray> stack_elements(
FrameArray::cast(JSArray::cast(*stack_trace_obj)->elements()));
DCHECK(stack_elements->Code(0)->kind() == AbstractCode::WASM_FUNCTION);
- DCHECK(stack_elements->Offset(0)->value() >= 0);
+ DCHECK_LE(0, stack_elements->Offset(0)->value());
stack_elements->SetOffset(0, Smi::FromInt(-1 - byte_offset));
}
@@ -144,64 +135,120 @@ RUNTIME_FUNCTION(Runtime_WasmThrowTypeError) {
isolate, NewTypeError(MessageTemplate::kWasmTrapTypeError));
}
-RUNTIME_FUNCTION(Runtime_WasmThrow) {
+RUNTIME_FUNCTION(Runtime_WasmThrowCreate) {
+ // TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
HandleScope scope(isolate);
DCHECK_NULL(isolate->context());
isolate->set_context(GetWasmContextOnStackTop(isolate));
-
- DCHECK_EQ(1, args.length());
- Handle<Object> tag = args.at(0);
- Handle<Object> except = isolate->factory()->NewWasmRuntimeError(
+ DCHECK_EQ(2, args.length());
+ Handle<Object> exception = isolate->factory()->NewWasmRuntimeError(
static_cast<MessageTemplate::Template>(
MessageTemplate::kWasmExceptionError));
- DCHECK(tag->IsSmi());
- CHECK(!JSReceiver::SetProperty(
- except, isolate->factory()->WasmExceptionTag_string(), tag, STRICT)
+ isolate->set_wasm_caught_exception(*exception);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, id, 0);
+ CHECK(!JSReceiver::SetProperty(exception,
+ isolate->factory()->InternalizeUtf8String(
+ wasm::WasmException::kRuntimeIdStr),
+ id, STRICT)
+ .is_null());
+ CONVERT_SMI_ARG_CHECKED(size, 1);
+ Handle<JSTypedArray> values =
+ isolate->factory()->NewJSTypedArray(ElementsKind::UINT16_ELEMENTS, size);
+ CHECK(!JSReceiver::SetProperty(exception,
+ isolate->factory()->InternalizeUtf8String(
+ wasm::WasmException::kRuntimeValuesStr),
+ values, STRICT)
.is_null());
- return isolate->Throw(*except);
+ return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_WasmRethrow) {
+RUNTIME_FUNCTION(Runtime_WasmThrow) {
+ // TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
HandleScope scope(isolate);
+ DCHECK_NULL(isolate->context());
+ isolate->set_context(GetWasmContextOnStackTop(isolate));
DCHECK_EQ(0, args.length());
- Object* exception = isolate->get_wasm_caught_exception();
+ Handle<Object> exception(isolate->get_wasm_caught_exception(), isolate);
+ CHECK(!exception.is_null());
isolate->clear_wasm_caught_exception();
- return isolate->Throw(exception);
+ return isolate->Throw(*exception);
}
-RUNTIME_FUNCTION(Runtime_WasmGetExceptionTag) {
+RUNTIME_FUNCTION(Runtime_WasmGetExceptionRuntimeId) {
+ // TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- Object* exception = args[0];
- DCHECK(isolate->is_catchable_by_wasm(exception));
- Handle<Object> exception_handle(exception, isolate);
- Handle<Object> tag_handle;
- if (JSReceiver::GetProperty(Handle<JSReceiver>::cast(exception_handle),
- isolate->factory()->WasmExceptionTag_string())
- .ToHandle(&tag_handle)) {
- if (tag_handle->IsSmi()) return *tag_handle;
+ DCHECK_NULL(isolate->context());
+ isolate->set_context(GetWasmContextOnStackTop(isolate));
+ Handle<Object> except_obj(isolate->get_wasm_caught_exception(), isolate);
+ if (!except_obj.is_null() && except_obj->IsJSReceiver()) {
+ Handle<JSReceiver> exception(JSReceiver::cast(*except_obj));
+ Handle<Object> tag;
+ if (JSReceiver::GetProperty(exception,
+ isolate->factory()->InternalizeUtf8String(
+ wasm::WasmException::kRuntimeIdStr))
+ .ToHandle(&tag)) {
+ if (tag->IsSmi()) {
+ return *tag;
+ }
+ }
}
- return Smi::FromInt(kInvalidExceptionTag);
+ return Smi::FromInt(wasm::WasmModule::kInvalidExceptionTag);
}
-RUNTIME_FUNCTION(Runtime_WasmSetCaughtExceptionValue) {
- // TODO(kschimpf): Implement stack of caught exceptions, rather than
- // just innermost.
+RUNTIME_FUNCTION(Runtime_WasmExceptionGetElement) {
+ // TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
HandleScope scope(isolate);
+ DCHECK_NULL(isolate->context());
+ isolate->set_context(GetWasmContextOnStackTop(isolate));
DCHECK_EQ(1, args.length());
- Object* exception = args[0];
- DCHECK(isolate->is_catchable_by_wasm(exception));
- isolate->set_wasm_caught_exception(exception);
- return exception;
-}
-
-RUNTIME_FUNCTION(Runtime_SetThreadInWasm) {
- trap_handler::SetThreadInWasm();
- return isolate->heap()->undefined_value();
+ Handle<Object> except_obj(isolate->get_wasm_caught_exception(), isolate);
+ if (!except_obj.is_null() && except_obj->IsJSReceiver()) {
+ Handle<JSReceiver> exception(JSReceiver::cast(*except_obj));
+ Handle<Object> values_obj;
+ if (JSReceiver::GetProperty(exception,
+ isolate->factory()->InternalizeUtf8String(
+ wasm::WasmException::kRuntimeValuesStr))
+ .ToHandle(&values_obj)) {
+ if (values_obj->IsJSTypedArray()) {
+ Handle<JSTypedArray> values = Handle<JSTypedArray>::cast(values_obj);
+ CHECK_EQ(values->type(), kExternalUint16Array);
+ CONVERT_SMI_ARG_CHECKED(index, 0);
+ CHECK_LT(index, Smi::ToInt(values->length()));
+ auto* vals =
+ reinterpret_cast<uint16_t*>(values->GetBuffer()->allocation_base());
+ return Smi::FromInt(vals[index]);
+ }
+ }
+ }
+ return Smi::FromInt(0);
}
-RUNTIME_FUNCTION(Runtime_ClearThreadInWasm) {
- trap_handler::ClearThreadInWasm();
+RUNTIME_FUNCTION(Runtime_WasmExceptionSetElement) {
+ // TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ DCHECK_NULL(isolate->context());
+ isolate->set_context(GetWasmContextOnStackTop(isolate));
+ Handle<Object> except_obj(isolate->get_wasm_caught_exception(), isolate);
+ if (!except_obj.is_null() && except_obj->IsJSReceiver()) {
+ Handle<JSReceiver> exception(JSReceiver::cast(*except_obj));
+ Handle<Object> values_obj;
+ if (JSReceiver::GetProperty(exception,
+ isolate->factory()->InternalizeUtf8String(
+ wasm::WasmException::kRuntimeValuesStr))
+ .ToHandle(&values_obj)) {
+ if (values_obj->IsJSTypedArray()) {
+ Handle<JSTypedArray> values = Handle<JSTypedArray>::cast(values_obj);
+ CHECK_EQ(values->type(), kExternalUint16Array);
+ CONVERT_SMI_ARG_CHECKED(index, 0);
+ CHECK_LT(index, Smi::ToInt(values->length()));
+ CONVERT_SMI_ARG_CHECKED(value, 1);
+ auto* vals =
+ reinterpret_cast<uint16_t*>(values->GetBuffer()->allocation_base());
+ vals[index] = static_cast<uint16_t>(value);
+ }
+ }
+ }
return isolate->heap()->undefined_value();
}
@@ -268,7 +315,7 @@ RUNTIME_FUNCTION(Runtime_WasmStackGuard) {
}
RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
HandleScope scope(isolate);
return *wasm::CompileLazy(isolate);
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index 38f1805656..e6fa5a19cf 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -29,13 +29,6 @@ FOR_EACH_INTRINSIC_RETURN_OBJECT(F)
FOR_EACH_INTRINSIC_RETURN_PAIR(P)
#undef P
-#define T(name, number_of_args, result_size) \
- ObjectTriple Runtime_##name(int args_length, Object** args_object, \
- Isolate* isolate);
-FOR_EACH_INTRINSIC_RETURN_TRIPLE(T)
-#undef T
-
-
#define F(name, number_of_args, result_size) \
{ \
Runtime::k##name, Runtime::RUNTIME, #name, FUNCTION_ADDR(Runtime_##name), \
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index a78966f226..e7084a8cca 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -68,6 +68,11 @@ namespace internal {
F(AtomicsNumWaitersForTesting, 2, 1) \
F(SetAllowAtomicsWait, 1, 1)
+#define FOR_EACH_INTRINSIC_BIGINT(F) \
+ F(BigIntEqual, 2, 1) \
+ F(BigIntToBoolean, 1, 1) \
+ F(BigIntBinaryOp, 3, 1)
+
#define FOR_EACH_INTRINSIC_CLASSES(F) \
F(ThrowUnsupportedSuperError, 0, 1) \
F(ThrowConstructorNonCallableError, 1, 1) \
@@ -113,7 +118,7 @@ namespace internal {
F(CompileOptimized_NotConcurrent, 1, 1) \
F(EvictOptimizedCodeSlot, 1, 1) \
F(NotifyStubFailure, 0, 1) \
- F(NotifyDeoptimized, 1, 1) \
+ F(NotifyDeoptimized, 0, 1) \
F(CompileForOnStackReplacement, 1, 1) \
F(TryInstallOptimizedCode, 1, 1) \
F(ResolvePossiblyDirectEval, 6, 1) \
@@ -195,7 +200,6 @@ namespace internal {
#define FOR_EACH_INTRINSIC_FORIN(F) \
F(ForInEnumerate, 1, 1) \
- F(ForInFilter, 2, 1) \
F(ForInHasProperty, 2, 1)
#ifdef V8_TRACE_IGNITION
@@ -223,7 +227,6 @@ namespace internal {
F(SetCode, 2, 1) \
F(SetNativeFlag, 1, 1) \
F(IsConstructor, 1, 1) \
- F(SetForceInlineFlag, 1, 1) \
F(Call, -1 /* >= 2 */, 1) \
F(ConvertReceiver, 1, 1) \
F(IsFunction, 1, 1) \
@@ -284,6 +287,7 @@ namespace internal {
F(CheckIsBootstrapping, 0, 1) \
F(CreateAsyncFromSyncIterator, 1, 1) \
F(CreateListFromArrayLike, 1, 1) \
+ F(DeserializeLazy, 1, 1) \
F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
F(ExportFromRuntime, 1, 1) \
F(IncrementUseCounter, 1, 1) \
@@ -325,7 +329,8 @@ namespace internal {
F(ThrowUndefinedOrNullToObject, 1, 1) \
F(Typeof, 1, 1) \
F(UnwindAndFindExceptionHandler, 0, 1) \
- F(AllowDynamicFunction, 1, 1)
+ F(AllowDynamicFunction, 1, 1) \
+ F(GetTemplateObject, 1, 1)
#define FOR_EACH_INTRINSIC_LITERALS(F) \
F(CreateRegExpLiteral, 4, 1) \
@@ -422,7 +427,8 @@ namespace internal {
F(HasInPrototypeChain, 2, 1) \
F(CreateIterResultObject, 2, 1) \
F(CreateDataProperty, 3, 1) \
- F(IterableToListCanBeElided, 1, 1)
+ F(IterableToListCanBeElided, 1, 1) \
+ F(GetOwnPropertyDescriptor, 2, 1)
#define FOR_EACH_INTRINSIC_OPERATORS(F) \
F(Multiply, 2, 1) \
@@ -467,8 +473,9 @@ namespace internal {
F(JSProxyGetHandler, 1, 1) \
F(JSProxyRevoke, 1, 1) \
F(GetPropertyWithReceiver, 2, 1) \
- F(CheckProxyGetTrapResult, 2, 1) \
- F(CheckProxyHasTrap, 2, 1)
+ F(CheckProxyHasTrap, 2, 1) \
+ F(SetPropertyWithReceiver, 5, 1) \
+ F(CheckProxyGetSetTrapResult, 2, 1)
#define FOR_EACH_INTRINSIC_REGEXP(F) \
F(IsRegExp, 1, 1) \
@@ -494,7 +501,7 @@ namespace internal {
F(NewStrictArguments, 1, 1) \
F(NewRestParameter, 1, 1) \
F(NewSloppyArguments, 3, 1) \
- F(NewArgumentsElements, 2, 1) \
+ F(NewArgumentsElements, 3, 1) \
F(NewClosure, 3, 1) \
F(NewClosure_Tenured, 3, 1) \
F(NewScriptContext, 2, 1) \
@@ -514,13 +521,14 @@ namespace internal {
F(GetSubstitution, 5, 1) \
F(StringReplaceOneCharWithString, 3, 1) \
F(StringIncludes, 3, 1) \
+ F(StringTrim, 2, 1) \
F(StringIndexOf, 3, 1) \
F(StringIndexOfUnchecked, 3, 1) \
F(StringLastIndexOf, 2, 1) \
F(SubString, 3, 1) \
F(StringAdd, 2, 1) \
F(InternalizeString, 1, 1) \
- F(StringCharCodeAtRT, 2, 1) \
+ F(StringCharCodeAt, 2, 1) \
F(StringCompare, 2, 1) \
F(StringBuilderConcat, 3, 1) \
F(StringBuilderJoin, 3, 1) \
@@ -534,7 +542,6 @@ namespace internal {
F(StringNotEqual, 2, 1) \
F(FlattenString, 1, 1) \
F(StringCharFromCode, 1, 1) \
- F(StringCharCodeAt, 2, 1) \
F(StringMaxLength, 0, 1)
#define FOR_EACH_INTRINSIC_SYMBOL(F) \
@@ -580,6 +587,7 @@ namespace internal {
F(TraceExit, 1, 1) \
F(HaveSameMap, 2, 1) \
F(InNewSpace, 1, 1) \
+ F(HasFastElements, 1, 1) \
F(HasSmiElements, 1, 1) \
F(HasObjectElements, 1, 1) \
F(HasSmiOrObjectElements, 1, 1) \
@@ -603,7 +611,9 @@ namespace internal {
F(DeserializeWasmModule, 2, 1) \
F(IsAsmWasmCode, 1, 1) \
F(IsWasmCode, 1, 1) \
- F(DisallowCodegenFromStrings, 0, 1) \
+ F(IsWasmTrapHandlerEnabled, 0, 1) \
+ F(GetWasmRecoveredTrapCount, 0, 1) \
+ F(DisallowCodegenFromStrings, 1, 1) \
F(ValidateWasmInstancesChain, 2, 1) \
F(ValidateWasmModuleState, 1, 1) \
F(ValidateWasmOrphanedInstance, 1, 1) \
@@ -611,7 +621,9 @@ namespace internal {
F(SetWasmInstantiateControls, 0, 1) \
F(HeapObjectVerify, 1, 1) \
F(WasmNumInterpretedCalls, 1, 1) \
- F(RedirectToWasmInterpreter, 2, 1)
+ F(RedirectToWasmInterpreter, 2, 1) \
+ F(WasmTraceMemory, 4, 1) \
+ F(CompleteInobjectSlackTracking, 1, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
F(ArrayBufferGetByteLength, 1, 1) \
@@ -622,40 +634,32 @@ namespace internal {
F(ArrayBufferViewWasNeutered, 1, 1) \
F(TypedArrayGetLength, 1, 1) \
F(TypedArrayGetBuffer, 1, 1) \
- F(TypedArraySetFromArrayLike, 4, 1) \
- F(TypedArraySetFromOverlapping, 3, 1) \
- F(TypedArraySetFastCases, 3, 1) \
F(TypedArraySortFast, 1, 1) \
- F(TypedArrayMaxSizeInHeap, 0, 1) \
+ F(TypedArraySet, 2, 1) \
F(IsTypedArray, 1, 1) \
F(IsSharedTypedArray, 1, 1) \
F(IsSharedIntegerTypedArray, 1, 1) \
F(IsSharedInteger32TypedArray, 1, 1) \
F(TypedArraySpeciesCreateByLength, 2, 1)
-#define FOR_EACH_INTRINSIC_WASM(F) \
- F(WasmGrowMemory, 1, 1) \
- F(WasmMemorySize, 0, 1) \
- F(ThrowWasmError, 2, 1) \
- F(ThrowWasmErrorFromTrapIf, 1, 1) \
- F(ThrowWasmStackOverflow, 0, 1) \
- F(WasmThrowTypeError, 0, 1) \
- F(WasmThrow, 1, 1) \
- F(WasmRethrow, 0, 1) \
- F(WasmGetExceptionTag, 1, 1) \
- F(WasmSetCaughtExceptionValue, 1, 1) \
- F(WasmRunInterpreter, 3, 1) \
- F(WasmStackGuard, 0, 1) \
- F(SetThreadInWasm, 0, 1) \
- F(ClearThreadInWasm, 0, 1) \
+#define FOR_EACH_INTRINSIC_WASM(F) \
+ F(WasmGrowMemory, 1, 1) \
+ F(ThrowWasmError, 2, 1) \
+ F(ThrowWasmErrorFromTrapIf, 1, 1) \
+ F(ThrowWasmStackOverflow, 0, 1) \
+ F(WasmThrowTypeError, 0, 1) \
+ F(WasmThrowCreate, 2, 1) \
+ F(WasmThrow, 0, 1) \
+ F(WasmGetExceptionRuntimeId, 0, 1) \
+ F(WasmExceptionSetElement, 2, 1) \
+ F(WasmExceptionGetElement, 1, 1) \
+ F(WasmRunInterpreter, 3, 1) \
+ F(WasmStackGuard, 0, 1) \
F(WasmCompileLazy, 0, 1)
#define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
F(LoadLookupSlotForCall, 1, 2)
-#define FOR_EACH_INTRINSIC_RETURN_TRIPLE(F) \
- F(ForInPrepare, 1, 3)
-
// Most intrinsics are implemented in the runtime/ directory, but ICs are
// implemented in ic.cc for now.
#define FOR_EACH_INTRINSIC_IC(F) \
@@ -677,6 +681,7 @@ namespace internal {
FOR_EACH_INTRINSIC_IC(F) \
FOR_EACH_INTRINSIC_ARRAY(F) \
FOR_EACH_INTRINSIC_ATOMICS(F) \
+ FOR_EACH_INTRINSIC_BIGINT(F) \
FOR_EACH_INTRINSIC_CLASSES(F) \
FOR_EACH_INTRINSIC_COLLECTIONS(F) \
FOR_EACH_INTRINSIC_COMPILER(F) \
@@ -709,7 +714,6 @@ namespace internal {
// FOR_EACH_INTRINSIC defines the list of all intrinsics, coming in 2 flavors,
// either returning an object or a pair.
#define FOR_EACH_INTRINSIC(F) \
- FOR_EACH_INTRINSIC_RETURN_TRIPLE(F) \
FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
FOR_EACH_INTRINSIC_RETURN_OBJECT(F)
diff --git a/deps/v8/src/s390/assembler-s390-inl.h b/deps/v8/src/s390/assembler-s390-inl.h
index da6eee78af..bd364b33d5 100644
--- a/deps/v8/src/s390/assembler-s390-inl.h
+++ b/deps/v8/src/s390/assembler-s390-inl.h
@@ -233,105 +233,24 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
}
// Operand constructors
-Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) {
- rm_ = no_reg;
- value_.immediate = immediate;
- rmode_ = rmode;
-}
-
-Operand::Operand(const ExternalReference& f) {
- rm_ = no_reg;
- value_.immediate = reinterpret_cast<intptr_t>(f.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-Operand::Operand(Smi* value) {
- rm_ = no_reg;
- value_.immediate = reinterpret_cast<intptr_t>(value);
- rmode_ = kRelocInfo_NONEPTR;
-}
-
-Operand::Operand(Register rm) {
- rm_ = rm;
- rmode_ = kRelocInfo_NONEPTR; // S390 -why doesn't ARM do this?
-}
-
-void Assembler::CheckBuffer() {
- if (buffer_space() <= kGap) {
- GrowBuffer();
- }
-}
+Operand::Operand(Register rm) : rm_(rm), rmode_(kRelocInfo_NONEPTR) {}
int32_t Assembler::emit_code_target(Handle<Code> target,
RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
RecordRelocInfo(rmode);
- int current = code_targets_.length();
+ size_t current = code_targets_.size();
if (current > 0 && !target.is_null() &&
- code_targets_.last().is_identical_to(target)) {
+ code_targets_.back().address() == target.address()) {
// Optimization if we keep jumping to the same code target.
current--;
} else {
- code_targets_.Add(target);
+ code_targets_.push_back(target);
}
return current;
}
-// Helper to emit the binary encoding of a 2 byte instruction
-void Assembler::emit2bytes(uint16_t x) {
- CheckBuffer();
-#if V8_TARGET_LITTLE_ENDIAN
- // We need to emit instructions in big endian format as disassembler /
- // simulator require the first byte of the instruction in order to decode
- // the instruction length. Swap the bytes.
- x = ((x & 0x00FF) << 8) | ((x & 0xFF00) >> 8);
-#endif
- *reinterpret_cast<uint16_t*>(pc_) = x;
- pc_ += 2;
-}
-
-// Helper to emit the binary encoding of a 4 byte instruction
-void Assembler::emit4bytes(uint32_t x) {
- CheckBuffer();
-#if V8_TARGET_LITTLE_ENDIAN
- // We need to emit instructions in big endian format as disassembler /
- // simulator require the first byte of the instruction in order to decode
- // the instruction length. Swap the bytes.
- x = ((x & 0x000000FF) << 24) | ((x & 0x0000FF00) << 8) |
- ((x & 0x00FF0000) >> 8) | ((x & 0xFF000000) >> 24);
-#endif
- *reinterpret_cast<uint32_t*>(pc_) = x;
- pc_ += 4;
-}
-
-// Helper to emit the binary encoding of a 6 byte instruction
-void Assembler::emit6bytes(uint64_t x) {
- CheckBuffer();
-#if V8_TARGET_LITTLE_ENDIAN
- // We need to emit instructions in big endian format as disassembler /
- // simulator require the first byte of the instruction in order to decode
- // the instruction length. Swap the bytes.
- x = (static_cast<uint64_t>(x & 0xFF) << 40) |
- (static_cast<uint64_t>((x >> 8) & 0xFF) << 32) |
- (static_cast<uint64_t>((x >> 16) & 0xFF) << 24) |
- (static_cast<uint64_t>((x >> 24) & 0xFF) << 16) |
- (static_cast<uint64_t>((x >> 32) & 0xFF) << 8) |
- (static_cast<uint64_t>((x >> 40) & 0xFF));
- x |= (*reinterpret_cast<uint64_t*>(pc_) >> 48) << 48;
-#else
- // We need to pad two bytes of zeros in order to get the 6-bytes
- // stored from low address.
- x = x << 16;
- x |= *reinterpret_cast<uint64_t*>(pc_) & 0xFFFF;
-#endif
- // It is safe to store 8-bytes, as CheckBuffer() guarantees we have kGap
- // space left over.
- *reinterpret_cast<uint64_t*>(pc_) = x;
- pc_ += 6;
-}
-
-bool Operand::is_reg() const { return rm_.is_valid(); }
// Fetch the 32bit value from the FIXED_SEQUENCE IIHF / IILF
Address Assembler::target_address_at(Address pc, Address constant_pool) {
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
index 0bf1e3c4f5..d33fc7144a 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -310,17 +310,11 @@ Operand Operand::EmbeddedNumber(double value) {
return result;
}
-MemOperand::MemOperand(Register rn, int32_t offset) {
- baseRegister = rn;
- indexRegister = r0;
- offset_ = offset;
-}
+MemOperand::MemOperand(Register rn, int32_t offset)
+ : baseRegister(rn), indexRegister(r0), offset_(offset) {}
-MemOperand::MemOperand(Register rx, Register rb, int32_t offset) {
- baseRegister = rb;
- indexRegister = rx;
- offset_ = offset;
-}
+MemOperand::MemOperand(Register rx, Register rb, int32_t offset)
+ : baseRegister(rb), indexRegister(rx), offset_(offset) {}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
for (auto& request : heap_object_requests_) {
@@ -349,9 +343,9 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
// Specific instructions, constants, and masks.
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
- : AssemblerBase(isolate_data, buffer, buffer_size),
- code_targets_(100) {
+ : AssemblerBase(isolate_data, buffer, buffer_size) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+ code_targets_.reserve(100);
last_bound_pos_ = 0;
relocations_.reserve(128);
@@ -1636,7 +1630,7 @@ void Assembler::EnsureSpaceFor(int space_needed) {
// Rotate Left Single Logical (32)
void Assembler::rll(Register r1, Register r3, Register opnd) {
- DCHECK(!opnd.is(r0));
+ DCHECK(opnd != r0);
rsy_form(RLL, r1, r3, opnd, 0);
}
@@ -1653,7 +1647,7 @@ void Assembler::rll(Register r1, Register r3, Register r2,
// Rotate Left Single Logical (64)
void Assembler::rllg(Register r1, Register r3, Register opnd) {
- DCHECK(!opnd.is(r0));
+ DCHECK(opnd != r0);
rsy_form(RLLG, r1, r3, opnd, 0);
}
@@ -1670,7 +1664,7 @@ void Assembler::rllg(Register r1, Register r3, Register r2,
// Shift Left Single Logical (32)
void Assembler::sll(Register r1, Register opnd) {
- DCHECK(!opnd.is(r0));
+ DCHECK(opnd != r0);
rs_form(SLL, r1, r0, opnd, 0);
}
@@ -1681,7 +1675,7 @@ void Assembler::sll(Register r1, const Operand& opnd) {
// Shift Left Single Logical (32)
void Assembler::sllk(Register r1, Register r3, Register opnd) {
- DCHECK(!opnd.is(r0));
+ DCHECK(opnd != r0);
rsy_form(SLLK, r1, r3, opnd, 0);
}
@@ -1692,7 +1686,7 @@ void Assembler::sllk(Register r1, Register r3, const Operand& opnd) {
// Shift Left Single Logical (64)
void Assembler::sllg(Register r1, Register r3, Register opnd) {
- DCHECK(!opnd.is(r0));
+ DCHECK(opnd != r0);
rsy_form(SLLG, r1, r3, opnd, 0);
}
@@ -1709,7 +1703,7 @@ void Assembler::sldl(Register r1, Register b2, const Operand& opnd) {
// Shift Right Single Logical (32)
void Assembler::srl(Register r1, Register opnd) {
- DCHECK(!opnd.is(r0));
+ DCHECK(opnd != r0);
rs_form(SRL, r1, r0, opnd, 0);
}
@@ -1732,7 +1726,7 @@ void Assembler::srl(Register r1, const Operand& opnd) {
// Shift Right Single Logical (32)
void Assembler::srlk(Register r1, Register r3, Register opnd) {
- DCHECK(!opnd.is(r0));
+ DCHECK(opnd != r0);
rsy_form(SRLK, r1, r3, opnd, 0);
}
@@ -1743,7 +1737,7 @@ void Assembler::srlk(Register r1, Register r3, const Operand& opnd) {
// Shift Right Single Logical (64)
void Assembler::srlg(Register r1, Register r3, Register opnd) {
- DCHECK(!opnd.is(r0));
+ DCHECK(opnd != r0);
rsy_form(SRLG, r1, r3, opnd, 0);
}
@@ -1754,7 +1748,7 @@ void Assembler::srlg(Register r1, Register r3, const Operand& opnd) {
// Shift Left Single (32)
void Assembler::sla(Register r1, Register opnd) {
- DCHECK(!opnd.is(r0));
+ DCHECK(opnd != r0);
rs_form(SLA, r1, r0, opnd, 0);
}
@@ -1765,7 +1759,7 @@ void Assembler::sla(Register r1, const Operand& opnd) {
// Shift Left Single (32)
void Assembler::slak(Register r1, Register r3, Register opnd) {
- DCHECK(!opnd.is(r0));
+ DCHECK(opnd != r0);
rsy_form(SLAK, r1, r3, opnd, 0);
}
@@ -1776,7 +1770,7 @@ void Assembler::slak(Register r1, Register r3, const Operand& opnd) {
// Shift Left Single (64)
void Assembler::slag(Register r1, Register r3, Register opnd) {
- DCHECK(!opnd.is(r0));
+ DCHECK(opnd != r0);
rsy_form(SLAG, r1, r3, opnd, 0);
}
@@ -1787,7 +1781,7 @@ void Assembler::slag(Register r1, Register r3, const Operand& opnd) {
// Shift Right Single (32)
void Assembler::sra(Register r1, Register opnd) {
- DCHECK(!opnd.is(r0));
+ DCHECK(opnd != r0);
rs_form(SRA, r1, r0, opnd, 0);
}
@@ -1798,7 +1792,7 @@ void Assembler::sra(Register r1, const Operand& opnd) {
// Shift Right Single (32)
void Assembler::srak(Register r1, Register r3, Register opnd) {
- DCHECK(!opnd.is(r0));
+ DCHECK(opnd != r0);
rsy_form(SRAK, r1, r3, opnd, 0);
}
@@ -1809,7 +1803,7 @@ void Assembler::srak(Register r1, Register r3, const Operand& opnd) {
// Shift Right Single (64)
void Assembler::srag(Register r1, Register r3, Register opnd) {
- DCHECK(!opnd.is(r0));
+ DCHECK(opnd != r0);
rsy_form(SRAG, r1, r3, opnd, 0);
}
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index 584816a869..f022b41072 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -48,6 +48,8 @@
#include <fcntl.h>
#include <unistd.h>
+#include <vector>
+
#include "src/assembler.h"
#include "src/s390/constants-s390.h"
@@ -96,6 +98,10 @@ namespace internal {
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) V(d0)
+
+#define C_REGISTERS(V) \
+ V(cr0) V(cr1) V(cr2) V(cr3) V(cr4) V(cr5) V(cr6) V(cr7) \
+ V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
// clang-format on
// Register list in load/store instructions
@@ -185,8 +191,8 @@ const int kNumSafepointRegisters = 16;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-// const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-// const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
// The following constants describe the stack frame linkage area as
// defined by the ABI.
@@ -233,70 +239,15 @@ const int kCalleeRegisterSaveAreaSize = 96;
const int kCalleeRegisterSaveAreaSize = 0;
#endif
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-
-struct Register {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = -1
- };
- static constexpr int kNumRegisters = Code::kAfterLast;
-
-#define REGISTER_COUNT(R) 1 +
- static constexpr int kNumAllocatable =
- ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT) 0;
-#undef REGISTER_COUNT
-
-#define REGISTER_BIT(R) 1 << kCode_##R |
- static constexpr RegList kAllocatable =
- ALLOCATABLE_GENERAL_REGISTERS(REGISTER_BIT) 0;
-#undef REGISTER_BIT
-
- static Register from_code(int code) {
- DCHECK(code >= 0);
- DCHECK(code < kNumRegisters);
- Register r = {code};
- return r;
- }
-
- bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
- bool is(Register reg) const { return reg_code == reg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
-
- void set_code(int code) {
- reg_code = code;
- DCHECK(is_valid());
- }
+ kRegAfterLast
+};
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ public:
#if V8_TARGET_LITTLE_ENDIAN
static constexpr int kMantissaOffset = 0;
static constexpr int kExponentOffset = 4;
@@ -305,16 +256,20 @@ struct Register {
static constexpr int kExponentOffset = 0;
#endif
- // Unfortunately we can't make this private in a struct.
- int reg_code;
+ private:
+ friend class RegisterBase;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
};
-typedef struct Register Register;
+static_assert(IS_TRIVIALLY_COPYABLE(Register) &&
+ sizeof(Register) == sizeof(int),
+ "Register can efficiently be passed by value");
-#define DEFINE_REGISTER(R) constexpr Register R = {Register::kCode_##R};
+#define DEFINE_REGISTER(R) \
+ constexpr Register R = Register::from_code<kRegCode_##R>();
GENERAL_REGISTERS(DEFINE_REGISTER)
#undef DEFINE_REGISTER
-constexpr Register no_reg = {Register::kCode_no_reg};
+constexpr Register no_reg = Register::no_reg();
// Register aliases
constexpr Register kLithiumScratch = r1; // lithium scratch.
@@ -324,83 +279,67 @@ constexpr Register cp = r13; // JavaScript context pointer.
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
-// Double word FP register.
-struct DoubleRegister {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = -1
- };
-
- static constexpr int kNumRegisters = Code::kAfterLast;
- static constexpr int kMaxNumRegisters = kNumRegisters;
-
- bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
- bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
-
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
+ kDoubleAfterLast
+};
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
+// Double word VFP register.
+class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
+ public:
+ // A few double registers are reserved: one as a scratch register and one to
+ // hold 0.0, that does not fit in the immediate field of vmov instructions.
+ // d14: 0.0
+ // d15: scratch register.
+ static constexpr int kSizeInBytes = 8;
+ inline static int NumRegisters();
- static DoubleRegister from_code(int code) {
- DoubleRegister r = {code};
- return r;
- }
+ private:
+ friend class RegisterBase;
- int reg_code;
+ explicit constexpr DoubleRegister(int code) : RegisterBase(code) {}
};
+static_assert(IS_TRIVIALLY_COPYABLE(DoubleRegister) &&
+ sizeof(DoubleRegister) == sizeof(int),
+ "DoubleRegister can efficiently be passed by value");
+
typedef DoubleRegister FloatRegister;
// TODO(john.yan) Define SIMD registers.
typedef DoubleRegister Simd128Register;
#define DEFINE_REGISTER(R) \
- constexpr DoubleRegister R = {DoubleRegister::kCode_##R};
+ constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
DOUBLE_REGISTERS(DEFINE_REGISTER)
#undef DEFINE_REGISTER
-constexpr Register no_dreg = {Register::kCode_no_reg};
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
constexpr DoubleRegister kDoubleRegZero = d14;
constexpr DoubleRegister kScratchDoubleReg = d13;
Register ToRegister(int num);
-// Coprocessor register
-struct CRegister {
- bool is_valid() const { return 0 <= reg_code && reg_code < 8; }
- bool is(CRegister creg) const { return reg_code == creg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
-
- // Unfortunately we can't make this private in a struct.
- int reg_code;
+enum CRegisterCode {
+#define REGISTER_CODE(R) kCCode_##R,
+ C_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kCAfterLast
};
-constexpr CRegister no_creg = {-1};
+// Coprocessor register
+class CRegister : public RegisterBase<CRegister, kCAfterLast> {
+ friend class RegisterBase;
+ explicit constexpr CRegister(int code) : RegisterBase(code) {}
+};
-constexpr CRegister cr0 = {0};
-constexpr CRegister cr1 = {1};
-constexpr CRegister cr2 = {2};
-constexpr CRegister cr3 = {3};
-constexpr CRegister cr4 = {4};
-constexpr CRegister cr5 = {5};
-constexpr CRegister cr6 = {6};
-constexpr CRegister cr7 = {7};
+constexpr CRegister no_creg = CRegister::no_reg();
+#define DECLARE_C_REGISTER(R) \
+ constexpr CRegister R = CRegister::from_code<kCCode_##R>();
+C_REGISTERS(DECLARE_C_REGISTER)
+#undef DECLARE_C_REGISTER
// -----------------------------------------------------------------------------
// Machine instruction Operands
@@ -429,11 +368,19 @@ class Operand BASE_EMBEDDED {
public:
// immediate
INLINE(explicit Operand(intptr_t immediate,
- RelocInfo::Mode rmode = kRelocInfo_NONEPTR));
+ RelocInfo::Mode rmode = kRelocInfo_NONEPTR)
+ : rmode_(rmode)) {
+ value_.immediate = immediate;
+ }
INLINE(static Operand Zero()) { return Operand(static_cast<intptr_t>(0)); }
- INLINE(explicit Operand(const ExternalReference& f));
+ INLINE(explicit Operand(const ExternalReference& f)
+ : rmode_(RelocInfo::EXTERNAL_REFERENCE)) {
+ value_.immediate = reinterpret_cast<intptr_t>(f.address());
+ }
explicit Operand(Handle<HeapObject> handle);
- INLINE(explicit Operand(Smi* value));
+ INLINE(explicit Operand(Smi* value) : rmode_(kRelocInfo_NONEPTR)) {
+ value_.immediate = reinterpret_cast<intptr_t>(value);
+ }
// rm
INLINE(explicit Operand(Register rm));
@@ -441,7 +388,7 @@ class Operand BASE_EMBEDDED {
static Operand EmbeddedNumber(double value); // Smi or HeapNumber
// Return true if this is a register operand.
- INLINE(bool is_reg() const);
+ INLINE(bool is_reg() const) { return rm_.is_valid(); }
bool must_output_reloc_info(const Assembler* assembler) const;
@@ -474,7 +421,7 @@ class Operand BASE_EMBEDDED {
RelocInfo::Mode rmode() const { return rmode_; }
private:
- Register rm_;
+ Register rm_ = no_reg;
union Value {
Value() {}
HeapObjectRequest heap_object_request; // if is_heap_object_request_
@@ -505,7 +452,7 @@ class MemOperand BASE_EMBEDDED {
// Base register
Register rb() const {
- DCHECK(!baseRegister.is(no_reg));
+ DCHECK(baseRegister != no_reg);
return baseRegister;
}
@@ -513,7 +460,7 @@ class MemOperand BASE_EMBEDDED {
// Index Register
Register rx() const {
- DCHECK(!indexRegister.is(no_reg));
+ DCHECK(indexRegister != no_reg);
return indexRegister;
}
Register getIndexRegister() const { return rx(); }
@@ -1499,7 +1446,11 @@ class Assembler : public AssemblerBase {
int last_bound_pos_;
// Code emission
- inline void CheckBuffer();
+ void CheckBuffer() {
+ if (buffer_space() <= kGap) {
+ GrowBuffer();
+ }
+ }
void GrowBuffer(int needed = 0);
inline void TrackBranch();
inline void UntrackBranch();
@@ -1507,10 +1458,58 @@ class Assembler : public AssemblerBase {
inline int32_t emit_code_target(
Handle<Code> target, RelocInfo::Mode rmode);
- // Helpers to emit binary encoding of 2/4/6 byte instructions.
- inline void emit2bytes(uint16_t x);
- inline void emit4bytes(uint32_t x);
- inline void emit6bytes(uint64_t x);
+ // Helper to emit the binary encoding of a 2 byte instruction
+ void emit2bytes(uint16_t x) {
+ CheckBuffer();
+#if V8_TARGET_LITTLE_ENDIAN
+ // We need to emit instructions in big endian format as disassembler /
+ // simulator require the first byte of the instruction in order to decode
+ // the instruction length. Swap the bytes.
+ x = ((x & 0x00FF) << 8) | ((x & 0xFF00) >> 8);
+#endif
+ *reinterpret_cast<uint16_t*>(pc_) = x;
+ pc_ += 2;
+ }
+
+ // Helper to emit the binary encoding of a 4 byte instruction
+ void emit4bytes(uint32_t x) {
+ CheckBuffer();
+#if V8_TARGET_LITTLE_ENDIAN
+ // We need to emit instructions in big endian format as disassembler /
+ // simulator require the first byte of the instruction in order to decode
+ // the instruction length. Swap the bytes.
+ x = ((x & 0x000000FF) << 24) | ((x & 0x0000FF00) << 8) |
+ ((x & 0x00FF0000) >> 8) | ((x & 0xFF000000) >> 24);
+#endif
+ *reinterpret_cast<uint32_t*>(pc_) = x;
+ pc_ += 4;
+ }
+
+ // Helper to emit the binary encoding of a 6 byte instruction
+ void emit6bytes(uint64_t x) {
+ CheckBuffer();
+#if V8_TARGET_LITTLE_ENDIAN
+ // We need to emit instructions in big endian format as disassembler /
+ // simulator require the first byte of the instruction in order to decode
+ // the instruction length. Swap the bytes.
+ x = (static_cast<uint64_t>(x & 0xFF) << 40) |
+ (static_cast<uint64_t>((x >> 8) & 0xFF) << 32) |
+ (static_cast<uint64_t>((x >> 16) & 0xFF) << 24) |
+ (static_cast<uint64_t>((x >> 24) & 0xFF) << 16) |
+ (static_cast<uint64_t>((x >> 32) & 0xFF) << 8) |
+ (static_cast<uint64_t>((x >> 40) & 0xFF));
+ x |= (*reinterpret_cast<uint64_t*>(pc_) >> 48) << 48;
+#else
+ // We need to pad two bytes of zeros in order to get the 6-bytes
+ // stored from low address.
+ x = x << 16;
+ x |= *reinterpret_cast<uint64_t*>(pc_) & 0xFFFF;
+#endif
+ // It is safe to store 8-bytes, as CheckBuffer() guarantees we have kGap
+ // space left over.
+ *reinterpret_cast<uint64_t*>(pc_) = x;
+ pc_ += 6;
+ }
// Helpers to emit binary encoding for various instruction formats.
@@ -1595,7 +1594,7 @@ class Assembler : public AssemblerBase {
friend class RelocInfo;
friend class CodePatcher;
- List<Handle<Code> > code_targets_;
+ std::vector<Handle<Code>> code_targets_;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index 20c95a92cd..753dd2b77a 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -5,6 +5,7 @@
#if V8_TARGET_ARCH_S390
#include "src/api-arguments.h"
+#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
@@ -53,7 +54,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ push(scratch);
// Account for saved regs if input is sp.
- if (input_reg.is(sp)) double_offset += kPointerSize;
+ if (input_reg == sp) double_offset += kPointerSize;
if (!skip_fastpath()) {
// Load double input.
@@ -69,7 +70,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ Push(scratch_high, scratch_low);
// Account for saved regs if input is sp.
- if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
+ if (input_reg == sp) double_offset += 2 * kPointerSize;
__ LoadlW(scratch_high,
MemOperand(input_reg, double_offset + Register::kExponentOffset));
@@ -186,7 +187,7 @@ void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(exponent.is(r4));
+ DCHECK(exponent == r4);
const DoubleRegister double_base = d1;
const DoubleRegister double_exponent = d2;
const DoubleRegister double_result = d3;
@@ -356,8 +357,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Pass buffer for return value on stack if necessary
bool needs_return_buffer =
- result_size() > 2 ||
- (result_size() == 2 && !ABI_RETURNS_OBJECTPAIR_IN_REGS);
+ result_size() == 2 && !ABI_RETURNS_OBJECTPAIR_IN_REGS;
if (needs_return_buffer) {
arg_stack_space += result_size();
}
@@ -415,7 +415,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// If return value is on the stack, pop it to registers.
if (needs_return_buffer) {
- if (result_size() > 2) __ LoadP(r4, MemOperand(r2, 2 * kPointerSize));
__ LoadP(r3, MemOperand(r2, kPointerSize));
__ LoadP(r2, MemOperand(r2));
}
@@ -444,14 +443,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// r2:r3: result
// sp: stack pointer
// fp: frame pointer
- Register argc;
- if (argv_in_register()) {
- // We don't want to pop arguments so set argc to no_reg.
- argc = no_reg;
- } else {
- // r6: still holds argc (callee-saved).
- argc = r6;
- }
+ Register argc = argv_in_register()
+ // We don't want to pop arguments so set argc to no_reg.
+ ? no_reg
+ // r6: still holds argc (callee-saved).
+ : r6;
__ LeaveExitFrame(save_doubles(), argc, true);
__ b(r14);
@@ -845,7 +841,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(
__ AddP(tmp, properties, ip);
__ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
- DCHECK(!tmp.is(entity_name));
+ DCHECK(tmp != entity_name);
__ CompareRoot(entity_name, Heap::kUndefinedValueRootIndex);
__ beq(done);
@@ -989,6 +985,64 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
stub2.GetCode();
}
+RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
+ int32_t first_instr_length =
+ Instruction::InstructionLength(stub->instruction_start());
+ int32_t second_instr_length = Instruction::InstructionLength(
+ stub->instruction_start() + first_instr_length);
+
+ uint64_t first_instr = Assembler::instr_at(stub->instruction_start());
+ uint64_t second_instr =
+ Assembler::instr_at(stub->instruction_start() + first_instr_length);
+
+ DCHECK(first_instr_length == 4 || first_instr_length == 6);
+ DCHECK(second_instr_length == 4 || second_instr_length == 6);
+
+ bool isFirstInstrNOP = isBranchNop(first_instr, first_instr_length);
+ bool isSecondInstrNOP = isBranchNop(second_instr, second_instr_length);
+
+ // STORE_BUFFER_ONLY has NOP on both branches
+ if (isSecondInstrNOP && isFirstInstrNOP) return STORE_BUFFER_ONLY;
+ // INCREMENTAL_COMPACTION has NOP on second branch.
+ else if (isFirstInstrNOP && !isSecondInstrNOP)
+ return INCREMENTAL_COMPACTION;
+ // INCREMENTAL has NOP on first branch.
+ else if (!isFirstInstrNOP && isSecondInstrNOP)
+ return INCREMENTAL;
+
+ DCHECK(false);
+ return STORE_BUFFER_ONLY;
+}
+
+void RecordWriteStub::Patch(Code* stub, Mode mode) {
+ MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
+ stub->instruction_size(), CodeObjectRequired::kNo);
+
+ // Get instruction lengths of two branches
+ int32_t first_instr_length = masm.instr_length_at(0);
+ int32_t second_instr_length = masm.instr_length_at(first_instr_length);
+
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ DCHECK(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+
+ PatchBranchCondMask(&masm, 0, CC_NOP);
+ PatchBranchCondMask(&masm, first_instr_length, CC_NOP);
+ break;
+ case INCREMENTAL:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchBranchCondMask(&masm, 0, CC_ALWAYS);
+ break;
+ case INCREMENTAL_COMPACTION:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchBranchCondMask(&masm, first_instr_length, CC_ALWAYS);
+ break;
+ }
+ DCHECK(GetMode(stub) == mode);
+ Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
+ first_instr_length + second_instr_length);
+}
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
@@ -1009,8 +1063,7 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
__ b(CC_NOP, &skip_to_incremental_compacting);
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
}
__ Ret();
@@ -1044,8 +1097,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm);
regs_.Restore(masm);
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
__ bind(&dont_need_remembered_set);
}
@@ -1061,10 +1113,9 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
- Register address =
- r2.is(regs_.address()) ? regs_.scratch0() : regs_.address();
- DCHECK(!address.is(regs_.object()));
- DCHECK(!address.is(r2));
+ Register address = r2 == regs_.address() ? regs_.scratch0() : regs_.address();
+ DCHECK(address != regs_.object());
+ DCHECK(address != r2);
__ LoadRR(address, regs_.address());
__ LoadRR(r2, regs_.object());
__ LoadRR(r3, address);
@@ -1077,6 +1128,10 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
+void RecordWriteStub::Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+}
+
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode) {
@@ -1091,8 +1146,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ Ret();
}
@@ -1131,8 +1185,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ Ret();
}
@@ -1554,7 +1607,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
ExternalReference::handle_scope_level_address(isolate), next_address);
// Additional parameter is the address of the actual callback.
- DCHECK(function_address.is(r3) || function_address.is(r4));
+ DCHECK(function_address == r3 || function_address == r4);
Register scratch = r5;
__ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate)));
@@ -1773,7 +1826,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- DCHECK(!api_function_address.is(r2) && !scratch.is(r2));
+ DCHECK(api_function_address != r2 && scratch != r2);
// r2 = FunctionCallbackInfo&
// Arguments is after the return address.
__ AddP(r2, sp, Operand(kFunctionCallbackInfoOffset));
diff --git a/deps/v8/src/s390/code-stubs-s390.h b/deps/v8/src/s390/code-stubs-s390.h
index 94df2b9418..b40991d6e9 100644
--- a/deps/v8/src/s390/code-stubs-s390.h
+++ b/deps/v8/src/s390/code-stubs-s390.h
@@ -108,64 +108,9 @@ class RecordWriteStub : public PlatformCodeStub {
return false;
}
- static Mode GetMode(Code* stub) {
- int32_t first_instr_length =
- Instruction::InstructionLength(stub->instruction_start());
- int32_t second_instr_length = Instruction::InstructionLength(
- stub->instruction_start() + first_instr_length);
-
- uint64_t first_instr = Assembler::instr_at(stub->instruction_start());
- uint64_t second_instr =
- Assembler::instr_at(stub->instruction_start() + first_instr_length);
-
- DCHECK(first_instr_length == 4 || first_instr_length == 6);
- DCHECK(second_instr_length == 4 || second_instr_length == 6);
-
- bool isFirstInstrNOP = isBranchNop(first_instr, first_instr_length);
- bool isSecondInstrNOP = isBranchNop(second_instr, second_instr_length);
-
- // STORE_BUFFER_ONLY has NOP on both branches
- if (isSecondInstrNOP && isFirstInstrNOP) return STORE_BUFFER_ONLY;
- // INCREMENTAL_COMPACTION has NOP on second branch.
- else if (isFirstInstrNOP && !isSecondInstrNOP)
- return INCREMENTAL_COMPACTION;
- // INCREMENTAL has NOP on first branch.
- else if (!isFirstInstrNOP && isSecondInstrNOP)
- return INCREMENTAL;
-
- DCHECK(false);
- return STORE_BUFFER_ONLY;
- }
+ static Mode GetMode(Code* stub);
- static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
- stub->instruction_size(), CodeObjectRequired::kNo);
-
- // Get instruction lengths of two branches
- int32_t first_instr_length = masm.instr_length_at(0);
- int32_t second_instr_length = masm.instr_length_at(first_instr_length);
-
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
-
- PatchBranchCondMask(&masm, 0, CC_NOP);
- PatchBranchCondMask(&masm, first_instr_length, CC_NOP);
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchBranchCondMask(&masm, 0, CC_ALWAYS);
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchBranchCondMask(&masm, first_instr_length, CC_ALWAYS);
- break;
- }
- DCHECK(GetMode(stub) == mode);
- Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
- first_instr_length + second_instr_length);
- }
+ static void Patch(Code* stub, Mode mode);
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
@@ -176,7 +121,10 @@ class RecordWriteStub : public PlatformCodeStub {
class RegisterAllocation {
public:
RegisterAllocation(Register object, Register address, Register scratch0)
- : object_(object), address_(address), scratch0_(scratch0) {
+ : object_(object),
+ address_(address),
+ scratch0_(scratch0),
+ scratch1_(no_reg) {
DCHECK(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
@@ -240,9 +188,7 @@ class RecordWriteStub : public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- void Activate(Code* code) override {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
+ void Activate(Code* code) override;
Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_));
diff --git a/deps/v8/src/s390/deoptimizer-s390.cc b/deps/v8/src/s390/deoptimizer-s390.cc
index 40bc686b8b..e4f9c773e1 100644
--- a/deps/v8/src/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/s390/deoptimizer-s390.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/assembler-inl.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/register-configuration.h"
@@ -210,9 +211,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ ld(dreg, MemOperand(r3, src_offset));
}
- // Push state, pc, and continuation from the last output frame.
- __ LoadP(r8, MemOperand(r4, FrameDescription::state_offset()));
- __ push(r8);
+ // Push pc and continuation from the last output frame.
__ LoadP(r8, MemOperand(r4, FrameDescription::pc_offset()));
__ push(r8);
__ LoadP(r8, MemOperand(r4, FrameDescription::continuation_offset()));
@@ -253,6 +252,8 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ StoreP(ip, MemOperand(sp));
}
+bool Deoptimizer::PadTopOfStackRegister() { return false; }
+
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
diff --git a/deps/v8/src/s390/frame-constants-s390.cc b/deps/v8/src/s390/frame-constants-s390.cc
index d8aae11197..c087c5d85e 100644
--- a/deps/v8/src/s390/frame-constants-s390.cc
+++ b/deps/v8/src/s390/frame-constants-s390.cc
@@ -20,6 +20,10 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
+int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
+ return register_count;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index 3b1725960e..5e27f59226 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -22,9 +22,14 @@ void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // TODO(albertnetymk): Use default for now; should call
- // RestrictAllocatableRegisters like src/x64/interface-descriptors-x64.cc
- DefaultInitializePlatformSpecific(data, kParameterCount);
+ const Register default_stub_registers[] = {r2, r3, r4, r5, r6};
+
+ data->RestrictAllocatableRegisters(default_stub_registers,
+ arraysize(default_stub_registers));
+
+ CHECK_LE(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
const Register FastNewFunctionContextDescriptor::FunctionRegister() {
@@ -81,24 +86,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastCloneRegExpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r5, r4, r3, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r5, r4, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r5, r4, r3, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3};
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index 931233b2d5..ab95f503a8 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -10,6 +10,7 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
+#include "src/callable.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
@@ -26,45 +27,91 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, size, create_code_object) {}
-void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1, Register exclusion2,
- Register exclusion3) {
+TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
+ }
+}
+
+int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) const {
+ int bytes = 0;
RegList exclusions = 0;
- if (!exclusion1.is(no_reg)) {
+ if (exclusion1 != no_reg) {
exclusions |= exclusion1.bit();
- if (!exclusion2.is(no_reg)) {
+ if (exclusion2 != no_reg) {
exclusions |= exclusion2.bit();
- if (!exclusion3.is(no_reg)) {
+ if (exclusion3 != no_reg) {
exclusions |= exclusion3.bit();
}
}
}
- MultiPush(kJSCallerSaved & ~exclusions);
+ RegList list = kJSCallerSaved & ~exclusions;
+ bytes += NumRegs(list) * kPointerSize;
if (fp_mode == kSaveFPRegs) {
- MultiPushDoubles(kCallerSavedDoubles);
+ bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
}
+
+ return bytes;
}
-void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ int bytes = 0;
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = kJSCallerSaved & ~exclusions;
+ MultiPush(list);
+ bytes += NumRegs(list) * kPointerSize;
+
+ if (fp_mode == kSaveFPRegs) {
+ MultiPushDoubles(kCallerSavedDoubles);
+ bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
+ }
+
+ return bytes;
+}
+
+int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
if (fp_mode == kSaveFPRegs) {
MultiPopDoubles(kCallerSavedDoubles);
+ bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
}
RegList exclusions = 0;
- if (!exclusion1.is(no_reg)) {
+ if (exclusion1 != no_reg) {
exclusions |= exclusion1.bit();
- if (!exclusion2.is(no_reg)) {
+ if (exclusion2 != no_reg) {
exclusions |= exclusion2.bit();
- if (!exclusion3.is(no_reg)) {
+ if (exclusion3 != no_reg) {
exclusions |= exclusion3.bit();
}
}
}
- MultiPop(kJSCallerSaved & ~exclusions);
+ RegList list = kJSCallerSaved & ~exclusions;
+ MultiPop(list);
+ bytes += NumRegs(list) * kPointerSize;
+
+ return bytes;
}
void TurboAssembler::Jump(Register target) { b(target); }
@@ -113,7 +160,7 @@ void TurboAssembler::Call(Register target) {
}
void MacroAssembler::CallJSEntry(Register target) {
- DCHECK(target.is(ip));
+ DCHECK(target == ip);
Call(target);
}
@@ -215,13 +262,13 @@ void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
}
void TurboAssembler::Move(Register dst, Register src, Condition cond) {
- if (!dst.is(src)) {
+ if (dst != src) {
LoadRR(dst, src);
}
}
void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
- if (!dst.is(src)) {
+ if (dst != src) {
ldr(dst, src);
}
}
@@ -289,11 +336,12 @@ void MacroAssembler::InNewSpace(Register object, Register scratch,
CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
}
-void MacroAssembler::RecordWriteField(
- Register object, int offset, Register value, Register dst,
- LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action, SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
+void MacroAssembler::RecordWriteField(Register object, int offset,
+ Register value, Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -317,7 +365,7 @@ void MacroAssembler::RecordWriteField(
}
RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
- OMIT_SMI_CHECK, pointers_to_here_check_for_value);
+ OMIT_SMI_CHECK);
bind(&done);
@@ -329,80 +377,76 @@ void MacroAssembler::RecordWriteField(
}
}
-// Will clobber 4 registers: object, map, dst, ip. The
-// register 'object' contains a heap object pointer.
-void MacroAssembler::RecordWriteForMap(Register object, Register map,
- Register dst,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode fp_mode) {
- if (emit_debug_code()) {
- LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
- CmpP(dst, Operand(isolate()->factory()->meta_map()));
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
- }
-
- if (!FLAG_incremental_marking) {
- return;
+void TurboAssembler::SaveRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
}
+ MultiPush(regs);
+}
- if (emit_debug_code()) {
- CmpP(map, FieldMemOperand(object, HeapObject::kMapOffset));
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+void TurboAssembler::RestoreRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
}
+ MultiPop(regs);
+}
- Label done;
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
+ // i.e. always emit remember set and save FP registers in RecordWriteStub. If
+ // large performance regression is observed, we should use these values to
+ // avoid unnecessary work.
- // A single check of the map's pages interesting flag suffices, since it is
- // only set during incremental collection, and then it's also guaranteed that
- // the from object's page's interesting flag is also set. This optimization
- // relies on the fact that maps can never be in new space.
- CheckPageFlag(map,
- map, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
+ RegList registers = callable.descriptor().allocatable_registers();
- lay(dst, MemOperand(object, HeapObject::kMapOffset - kHeapObjectTag));
- if (emit_debug_code()) {
- Label ok;
- AndP(r0, dst, Operand(kPointerSize - 1));
- beq(&ok, Label::kNear);
- stop("Unaligned cell in write barrier");
- bind(&ok);
- }
+ SaveRegisters(registers);
+ Register object_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kObject));
+ Register slot_parameter(
+ callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register isolate_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kIsolate));
+ Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kFPMode));
- // Record the actual write.
- if (lr_status == kLRHasNotBeenSaved) {
- push(r14);
- }
- RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
- fp_mode);
- CallStub(&stub);
- if (lr_status == kLRHasNotBeenSaved) {
- pop(r14);
- }
+ Push(object);
+ Push(address);
- bind(&done);
+ Pop(slot_parameter);
+ Pop(object_parameter);
- // Count number of write barriers in generated code.
- isolate()->counters()->write_barriers_static()->Increment();
- IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
+ mov(isolate_parameter,
+ Operand(ExternalReference::isolate_address(isolate())));
+ Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
+ Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
+ Call(callable.code(), RelocInfo::CODE_TARGET);
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12)));
- mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16)));
- }
+ RestoreRegisters(registers);
}
// Will clobber 4 registers: object, address, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWrite(
- Register object, Register address, Register value,
- LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action, SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
- DCHECK(!object.is(value));
+void MacroAssembler::RecordWrite(Register object, Register address,
+ Register value, LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ DCHECK(object != value);
if (emit_debug_code()) {
CmpP(value, MemOperand(address));
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
@@ -420,11 +464,9 @@ void MacroAssembler::RecordWrite(
JumpIfSmi(value, &done);
}
- if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
- }
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
@@ -433,9 +475,13 @@ void MacroAssembler::RecordWrite(
if (lr_status == kLRHasNotBeenSaved) {
push(r14);
}
+#ifdef V8_CSA_WRITE_BARRIER
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+#else
RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
fp_mode);
CallStub(&stub);
+#endif
if (lr_status == kLRHasNotBeenSaved) {
pop(r14);
}
@@ -457,8 +503,7 @@ void MacroAssembler::RecordWrite(
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address, Register scratch,
- SaveFPRegsMode fp_mode,
- RememberedSetFinalAction and_then) {
+ SaveFPRegsMode fp_mode) {
Label done;
if (emit_debug_code()) {
Label ok;
@@ -480,20 +525,13 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Check for end of buffer.
AndP(scratch, Operand(StoreBuffer::kStoreBufferMask));
- if (and_then == kFallThroughAtEnd) {
- bne(&done, Label::kNear);
- } else {
- DCHECK(and_then == kReturnAtEnd);
- bne(&done, Label::kNear);
- }
+ bne(&done, Label::kNear);
push(r14);
StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
CallStub(&store_buffer_overflow);
pop(r14);
bind(&done);
- if (and_then == kReturnAtEnd) {
- Ret();
- }
+ Ret();
}
void TurboAssembler::PushCommonFrame(Register marker_reg) {
@@ -541,10 +579,6 @@ void TurboAssembler::RestoreFrameStateForTailCall() {
LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
}
-const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
-const int MacroAssembler::kNumSafepointSavedRegisters =
- Register::kNumAllocatable;
-
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
@@ -585,7 +619,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
// Turn potential sNaN into qNaN
- if (!dst.is(src)) ldr(dst, src);
+ if (dst != src) ldr(dst, src);
lzdr(kDoubleRegZero);
sdbr(dst, kDoubleRegZero);
}
@@ -907,7 +941,7 @@ void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
}
void TurboAssembler::Prologue(Register base, int prologue_offset) {
- DCHECK(!base.is(no_reg));
+ DCHECK(base != no_reg);
PushStandardFrame(r3);
}
@@ -1192,8 +1226,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// passed in registers.
// ARM has some sanity checks as per below, considering add them for S390
- // DCHECK(actual.is_immediate() || actual.reg().is(r2));
- // DCHECK(expected.is_immediate() || expected.reg().is(r4));
+ // DCHECK(actual.is_immediate() || actual.reg() == r2);
+ // DCHECK(expected.is_immediate() || expected.reg() == r4);
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -1287,8 +1321,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- DCHECK(function.is(r3));
- DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r5));
+ DCHECK(function == r3);
+ DCHECK_IMPLIES(new_target.is_valid(), new_target == r5);
// On function call, call into the debugger if necessary.
CheckDebugHook(function, new_target, expected, actual);
@@ -1328,7 +1362,7 @@ void MacroAssembler::InvokeFunction(Register fun, Register new_target,
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in r3.
- DCHECK(fun.is(r3));
+ DCHECK(fun == r3);
Register expected_reg = r4;
Register temp_reg = r6;
@@ -1350,7 +1384,7 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in r3.
- DCHECK(function.is(r3));
+ DCHECK(function == r3);
// Get the function and setup the context.
LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
@@ -1408,98 +1442,9 @@ void MacroAssembler::PopStackHandler() {
StoreP(r3, MemOperand(ip));
}
-void MacroAssembler::Allocate(int object_size, Register result,
- Register scratch1, Register scratch2,
- Label* gc_required, AllocationFlags flags) {
- DCHECK(object_size <= kMaxRegularHeapObjectSize);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- LoadImmP(result, Operand(0x7091));
- LoadImmP(scratch1, Operand(0x7191));
- LoadImmP(scratch2, Operand(0x7291));
- }
- b(gc_required);
- return;
- }
-
- DCHECK(!AreAliased(result, scratch1, scratch2, ip));
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
-
- // Check relative positions of allocation top and limit addresses.
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
- intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
- DCHECK((limit - top) == kPointerSize);
-
- // Set up allocation top address register.
- Register top_address = scratch1;
- Register result_end = scratch2;
- mov(top_address, Operand(allocation_top));
-
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into ip.
- LoadP(result, MemOperand(top_address));
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry.
- CmpP(result, MemOperand(top_address));
- Check(eq, kUnexpectedAllocationTop);
- }
- }
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
-// Align the next allocation. Storing the filler map without checking top is
-// safe in new-space because the limit of the heap is aligned there.
-#if V8_TARGET_ARCH_S390X
- STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
-#else
- STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- AndP(result_end, result, Operand(kDoubleAlignmentMask));
- Label aligned;
- beq(&aligned, Label::kNear);
- if ((flags & PRETENURE) != 0) {
- CmpLogicalP(result, MemOperand(top_address, limit - top));
- bge(gc_required);
- }
- mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
- StoreW(result_end, MemOperand(result));
- AddP(result, result, Operand(kDoubleSize / 2));
- bind(&aligned);
-#endif
- }
-
- AddP(result_end, result, Operand(object_size));
-
- // Compare with allocation limit.
- CmpLogicalP(result_end, MemOperand(top_address, limit - top));
- bge(gc_required);
-
- StoreP(result_end, MemOperand(top_address));
-
- if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
- // Prefetch the allocation_top's next cache line in advance to
- // help alleviate potential cache misses.
- // Mode 2 - Prefetch the data into a cache line for store access.
- pfd(static_cast<Condition>(2), MemOperand(result, 256));
- }
-
- // Tag object.
- la(result, MemOperand(result, kHeapObjectTag));
-}
-
void MacroAssembler::CompareObjectType(Register object, Register map,
Register type_reg, InstanceType type) {
- const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
+ const Register temp = type_reg == no_reg ? r0 : type_reg;
LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(map, temp, type);
@@ -1522,41 +1467,6 @@ void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
ConvertIntToDouble(value, ip);
}
-void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
- Label* early_success) {
- LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- CompareMap(obj, map, early_success);
-}
-
-void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
- Label* early_success) {
- mov(r0, Operand(map));
- CmpP(r0, FieldMemOperand(obj_map, HeapObject::kMapOffset));
-}
-
-void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
- Label* fail, SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
-
- Label success;
- CompareMap(obj, scratch, map, &success);
- bne(fail);
- bind(&success);
-}
-
-void MacroAssembler::CheckMap(Register obj, Register scratch,
- Heap::RootListIndex index, Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
- LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- CompareRoot(scratch, index);
- bne(fail);
-}
-
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
mov(value, Operand(cell));
LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
@@ -1604,7 +1514,7 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
Register scratch,
DoubleRegister double_scratch) {
Label done;
- DCHECK(!double_input.is(double_scratch));
+ DCHECK(double_input != double_scratch);
ConvertDoubleToInt64(result, double_input);
@@ -1703,15 +1613,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-void MacroAssembler::SetCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- mov(scratch1, Operand(value));
- mov(scratch2, Operand(ExternalReference(counter)));
- StoreW(scratch1, MemOperand(scratch2));
- }
-}
-
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK(value > 0 && is_int8(value));
@@ -1784,22 +1685,6 @@ void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
LoadP(dst, ContextMemOperand(dst, index));
}
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch) {
- // Load the initial map. The global functions all have initial maps.
- LoadP(map,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
- b(&ok);
- bind(&fail);
- Abort(kGlobalFunctionsMustHaveInitialMap);
- bind(&ok);
- }
-}
-
void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
Label* smi_case) {
STATIC_ASSERT(kSmiTag == 0);
@@ -1909,20 +1794,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
}
-void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
- Register first, Register second, Register scratch1, Register scratch2,
- Label* failure) {
- // Test that both first and second are sequential one-byte strings.
- // Assume that they are non-smis.
- LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
- LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
- LoadlB(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- LoadlB(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
-
- JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
- scratch2, failure);
-}
-
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
Label* not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
@@ -1935,46 +1806,6 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
bind(&succeed);
}
-void MacroAssembler::AllocateJSValue(Register result, Register constructor,
- Register value, Register scratch1,
- Register scratch2, Label* gc_required) {
- DCHECK(!result.is(constructor));
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!result.is(value));
-
- // Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Initialize the JSValue.
- LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
- StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0);
- LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOrHashOffset),
- r0);
- StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0);
- StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0);
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-}
-
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
- Register first, Register second, Register scratch1, Register scratch2,
- Label* failure) {
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- if (!scratch1.is(first)) LoadRR(scratch1, first);
- if (!scratch2.is(second)) LoadRR(scratch2, second);
- nilf(scratch1, Operand(kFlatOneByteStringMask));
- CmpP(scratch1, Operand(kFlatOneByteStringTag));
- bne(failure);
- nilf(scratch2, Operand(kFlatOneByteStringMask));
- CmpP(scratch2, Operand(kFlatOneByteStringTag));
- bne(failure);
-}
-
static const int kRegisterPassedArguments = 5;
int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -2023,8 +1854,8 @@ void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
- if (src2.is(d0)) {
- DCHECK(!src1.is(d2));
+ if (src2 == d0) {
+ DCHECK(src1 != d2);
Move(d2, src2);
Move(d0, src1);
} else {
@@ -2347,7 +2178,7 @@ void TurboAssembler::MulHigh32(Register dst, Register src1,
}
void TurboAssembler::MulHigh32(Register dst, Register src1, Register src2) {
- if (dst.is(src2)) {
+ if (dst == src2) {
std::swap(src1, src2);
}
Generate_MulHigh32(msgfr);
@@ -2396,14 +2227,14 @@ void TurboAssembler::MulHighU32(Register dst, Register src1,
void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
const MemOperand& src2) {
Register result = dst;
- if (src2.rx().is(dst) || src2.rb().is(dst)) dst = r0;
+ if (src2.rx() == dst || src2.rb() == dst) dst = r0;
Generate_Mul32WithOverflowIfCCUnequal(msgf);
- if (!result.is(dst)) llgfr(result, dst);
+ if (result != dst) llgfr(result, dst);
}
void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
Register src2) {
- if (dst.is(src2)) {
+ if (dst == src2) {
std::swap(src1, src2);
}
Generate_Mul32WithOverflowIfCCUnequal(msgfr);
@@ -2434,9 +2265,9 @@ void TurboAssembler::Mul(Register dst, Register src1, Register src2) {
if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
MulPWithCondition(dst, src1, src2);
} else {
- if (dst.is(src2)) {
+ if (dst == src2) {
MulP(dst, src1);
- } else if (dst.is(src1)) {
+ } else if (dst == src1) {
MulP(dst, src2);
} else {
Move(dst, src1);
@@ -2690,7 +2521,7 @@ void TurboAssembler::AddP(Register dst, const Operand& opnd) {
// Add 32-bit (Register dst = Register src + Immediate opnd)
void TurboAssembler::Add32(Register dst, Register src, const Operand& opnd) {
- if (!dst.is(src)) {
+ if (dst != src) {
if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
ahik(dst, src, opnd);
return;
@@ -2709,7 +2540,7 @@ void TurboAssembler::Add32_RRI(Register dst, Register src,
// Add Pointer Size (Register dst = Register src + Immediate opnd)
void TurboAssembler::AddP(Register dst, Register src, const Operand& opnd) {
- if (!dst.is(src)) {
+ if (dst != src) {
if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
AddPImm_RRI(dst, src, opnd);
return;
@@ -2739,7 +2570,7 @@ void TurboAssembler::AddP_ExtendSrc(Register dst, Register src) {
// Add 32-bit (Register dst = Register src1 + Register src2)
void TurboAssembler::Add32(Register dst, Register src1, Register src2) {
- if (!dst.is(src1) && !dst.is(src2)) {
+ if (dst != src1 && dst != src2) {
// We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
// as AR is a smaller instruction
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
@@ -2748,7 +2579,7 @@ void TurboAssembler::Add32(Register dst, Register src1, Register src2) {
} else {
lr(dst, src1);
}
- } else if (dst.is(src2)) {
+ } else if (dst == src2) {
src2 = src1;
}
ar(dst, src2);
@@ -2756,7 +2587,7 @@ void TurboAssembler::Add32(Register dst, Register src1, Register src2) {
// Add Pointer Size (Register dst = Register src1 + Register src2)
void TurboAssembler::AddP(Register dst, Register src1, Register src2) {
- if (!dst.is(src1) && !dst.is(src2)) {
+ if (dst != src1 && dst != src2) {
// We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
// as AR is a smaller instruction
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
@@ -2765,7 +2596,7 @@ void TurboAssembler::AddP(Register dst, Register src1, Register src2) {
} else {
LoadRR(dst, src1);
}
- } else if (dst.is(src2)) {
+ } else if (dst == src2) {
src2 = src1;
}
AddRR(dst, src2);
@@ -2779,12 +2610,12 @@ void TurboAssembler::AddP(Register dst, Register src1, Register src2) {
void TurboAssembler::AddP_ExtendSrc(Register dst, Register src1,
Register src2) {
#if V8_TARGET_ARCH_S390X
- if (dst.is(src2)) {
+ if (dst == src2) {
// The source we need to sign extend is the same as result.
lgfr(dst, src2);
agr(dst, src1);
} else {
- if (!dst.is(src1)) LoadRR(dst, src1);
+ if (dst != src1) LoadRR(dst, src1);
agfr(dst, src2);
}
#else
@@ -2851,32 +2682,32 @@ void TurboAssembler::AddP(const MemOperand& opnd, const Operand& imm) {
// Add Logical With Carry 32-bit (Register dst = Register src1 + Register src2)
void TurboAssembler::AddLogicalWithCarry32(Register dst, Register src1,
Register src2) {
- if (!dst.is(src2) && !dst.is(src1)) {
+ if (dst != src2 && dst != src1) {
lr(dst, src1);
alcr(dst, src2);
- } else if (!dst.is(src2)) {
+ } else if (dst != src2) {
// dst == src1
- DCHECK(dst.is(src1));
+ DCHECK(dst == src1);
alcr(dst, src2);
} else {
// dst == src2
- DCHECK(dst.is(src2));
+ DCHECK(dst == src2);
alcr(dst, src1);
}
}
// Add Logical 32-bit (Register dst = Register src1 + Register src2)
void TurboAssembler::AddLogical32(Register dst, Register src1, Register src2) {
- if (!dst.is(src2) && !dst.is(src1)) {
+ if (dst != src2 && dst != src1) {
lr(dst, src1);
alr(dst, src2);
- } else if (!dst.is(src2)) {
+ } else if (dst != src2) {
// dst == src1
- DCHECK(dst.is(src1));
+ DCHECK(dst == src1);
alr(dst, src2);
} else {
// dst == src2
- DCHECK(dst.is(src2));
+ DCHECK(dst == src2);
alr(dst, src1);
}
}
@@ -2922,16 +2753,16 @@ void TurboAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
// src2)
void TurboAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
Register src2) {
- if (!dst.is(src2) && !dst.is(src1)) {
+ if (dst != src2 && dst != src1) {
lr(dst, src1);
slbr(dst, src2);
- } else if (!dst.is(src2)) {
+ } else if (dst != src2) {
// dst == src1
- DCHECK(dst.is(src1));
+ DCHECK(dst == src1);
slbr(dst, src2);
} else {
// dst == src2
- DCHECK(dst.is(src2));
+ DCHECK(dst == src2);
lr(r0, dst);
SubLogicalWithBorrow32(dst, src1, r0);
}
@@ -2939,16 +2770,16 @@ void TurboAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
// Subtract Logical 32-bit (Register dst = Register src1 - Register src2)
void TurboAssembler::SubLogical32(Register dst, Register src1, Register src2) {
- if (!dst.is(src2) && !dst.is(src1)) {
+ if (dst != src2 && dst != src1) {
lr(dst, src1);
slr(dst, src2);
- } else if (!dst.is(src2)) {
+ } else if (dst != src2) {
// dst == src1
- DCHECK(dst.is(src1));
+ DCHECK(dst == src1);
slr(dst, src2);
} else {
// dst == src2
- DCHECK(dst.is(src2));
+ DCHECK(dst == src2);
lr(r0, dst);
SubLogical32(dst, src1, r0);
}
@@ -2999,9 +2830,9 @@ void TurboAssembler::Sub32(Register dst, Register src1, Register src2) {
srk(dst, src1, src2);
return;
}
- if (!dst.is(src1) && !dst.is(src2)) lr(dst, src1);
+ if (dst != src1 && dst != src2) lr(dst, src1);
// In scenario where we have dst = src - dst, we need to swap and negate
- if (!dst.is(src1) && dst.is(src2)) {
+ if (dst != src1 && dst == src2) {
Label done;
lcr(dst, dst); // dst = -dst
b(overflow, &done);
@@ -3019,9 +2850,9 @@ void TurboAssembler::SubP(Register dst, Register src1, Register src2) {
SubP_RRR(dst, src1, src2);
return;
}
- if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
+ if (dst != src1 && dst != src2) LoadRR(dst, src1);
// In scenario where we have dst = src - dst, we need to swap and negate
- if (!dst.is(src1) && dst.is(src2)) {
+ if (dst != src1 && dst == src2) {
Label done;
LoadComplementRR(dst, dst); // dst = -dst
b(overflow, &done);
@@ -3039,10 +2870,10 @@ void TurboAssembler::SubP(Register dst, Register src1, Register src2) {
void TurboAssembler::SubP_ExtendSrc(Register dst, Register src1,
Register src2) {
#if V8_TARGET_ARCH_S390X
- if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
+ if (dst != src1 && dst != src2) LoadRR(dst, src1);
// In scenario where we have dst = src - dst, we need to swap and negate
- if (!dst.is(src1) && dst.is(src2)) {
+ if (dst != src1 && dst == src2) {
lgfr(dst, dst); // Sign extend this operand first.
LoadComplementRR(dst, dst); // dst = -dst
AddP(dst, src1); // dst = -dst + src
@@ -3140,7 +2971,7 @@ void TurboAssembler::AndP(Register dst, Register src) { AndRR(dst, src); }
// Non-clobbering AND 32-bit - dst = src1 & src1
void TurboAssembler::And(Register dst, Register src1, Register src2) {
- if (!dst.is(src1) && !dst.is(src2)) {
+ if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
@@ -3149,7 +2980,7 @@ void TurboAssembler::And(Register dst, Register src1, Register src2) {
} else {
lr(dst, src1);
}
- } else if (dst.is(src2)) {
+ } else if (dst == src2) {
src2 = src1;
}
And(dst, src2);
@@ -3157,7 +2988,7 @@ void TurboAssembler::And(Register dst, Register src1, Register src2) {
// Non-clobbering AND pointer size - dst = src1 & src1
void TurboAssembler::AndP(Register dst, Register src1, Register src2) {
- if (!dst.is(src1) && !dst.is(src2)) {
+ if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
@@ -3166,7 +2997,7 @@ void TurboAssembler::AndP(Register dst, Register src1, Register src2) {
} else {
LoadRR(dst, src1);
}
- } else if (dst.is(src2)) {
+ } else if (dst == src2) {
src2 = src1;
}
AndP(dst, src2);
@@ -3210,7 +3041,7 @@ void TurboAssembler::AndP(Register dst, const Operand& opnd) {
// AND 32-bit - dst = src & imm
void TurboAssembler::And(Register dst, Register src, const Operand& opnd) {
- if (!dst.is(src)) lr(dst, src);
+ if (dst != src) lr(dst, src);
nilf(dst, opnd);
}
@@ -3250,7 +3081,7 @@ void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) {
}
// If we are &'ing zero, we can just whack the dst register and skip copy
- if (!dst.is(src) && (0 != value)) LoadRR(dst, src);
+ if (dst != src && (0 != value)) LoadRR(dst, src);
AndP(dst, opnd);
}
@@ -3262,7 +3093,7 @@ void TurboAssembler::OrP(Register dst, Register src) { OrRR(dst, src); }
// Non-clobbering OR 32-bit - dst = src1 & src1
void TurboAssembler::Or(Register dst, Register src1, Register src2) {
- if (!dst.is(src1) && !dst.is(src2)) {
+ if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
@@ -3271,7 +3102,7 @@ void TurboAssembler::Or(Register dst, Register src1, Register src2) {
} else {
lr(dst, src1);
}
- } else if (dst.is(src2)) {
+ } else if (dst == src2) {
src2 = src1;
}
Or(dst, src2);
@@ -3279,7 +3110,7 @@ void TurboAssembler::Or(Register dst, Register src1, Register src2) {
// Non-clobbering OR pointer size - dst = src1 & src1
void TurboAssembler::OrP(Register dst, Register src1, Register src2) {
- if (!dst.is(src1) && !dst.is(src2)) {
+ if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
@@ -3288,7 +3119,7 @@ void TurboAssembler::OrP(Register dst, Register src1, Register src2) {
} else {
LoadRR(dst, src1);
}
- } else if (dst.is(src2)) {
+ } else if (dst == src2) {
src2 = src1;
}
OrP(dst, src2);
@@ -3332,13 +3163,13 @@ void TurboAssembler::OrP(Register dst, const Operand& opnd) {
// OR 32-bit - dst = src & imm
void TurboAssembler::Or(Register dst, Register src, const Operand& opnd) {
- if (!dst.is(src)) lr(dst, src);
+ if (dst != src) lr(dst, src);
oilf(dst, opnd);
}
// OR Pointer Size - dst = src & imm
void TurboAssembler::OrP(Register dst, Register src, const Operand& opnd) {
- if (!dst.is(src)) LoadRR(dst, src);
+ if (dst != src) LoadRR(dst, src);
OrP(dst, opnd);
}
@@ -3350,7 +3181,7 @@ void TurboAssembler::XorP(Register dst, Register src) { XorRR(dst, src); }
// Non-clobbering XOR 32-bit - dst = src1 & src1
void TurboAssembler::Xor(Register dst, Register src1, Register src2) {
- if (!dst.is(src1) && !dst.is(src2)) {
+ if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
@@ -3359,7 +3190,7 @@ void TurboAssembler::Xor(Register dst, Register src1, Register src2) {
} else {
lr(dst, src1);
}
- } else if (dst.is(src2)) {
+ } else if (dst == src2) {
src2 = src1;
}
Xor(dst, src2);
@@ -3367,7 +3198,7 @@ void TurboAssembler::Xor(Register dst, Register src1, Register src2) {
// Non-clobbering XOR pointer size - dst = src1 & src1
void TurboAssembler::XorP(Register dst, Register src1, Register src2) {
- if (!dst.is(src1) && !dst.is(src2)) {
+ if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
@@ -3376,7 +3207,7 @@ void TurboAssembler::XorP(Register dst, Register src1, Register src2) {
} else {
LoadRR(dst, src1);
}
- } else if (dst.is(src2)) {
+ } else if (dst == src2) {
src2 = src1;
}
XorP(dst, src2);
@@ -3417,23 +3248,23 @@ void TurboAssembler::XorP(Register dst, const Operand& opnd) {
// XOR 32-bit - dst = src & imm
void TurboAssembler::Xor(Register dst, Register src, const Operand& opnd) {
- if (!dst.is(src)) lr(dst, src);
+ if (dst != src) lr(dst, src);
xilf(dst, opnd);
}
// XOR Pointer Size - dst = src & imm
void TurboAssembler::XorP(Register dst, Register src, const Operand& opnd) {
- if (!dst.is(src)) LoadRR(dst, src);
+ if (dst != src) LoadRR(dst, src);
XorP(dst, opnd);
}
void TurboAssembler::Not32(Register dst, Register src) {
- if (!src.is(no_reg) && !src.is(dst)) lr(dst, src);
+ if (src != no_reg && src != dst) lr(dst, src);
xilf(dst, Operand(0xFFFFFFFF));
}
void TurboAssembler::Not64(Register dst, Register src) {
- if (!src.is(no_reg) && !src.is(dst)) lgr(dst, src);
+ if (src != no_reg && src != dst) lgr(dst, src);
xihf(dst, Operand(0xFFFFFFFF));
xilf(dst, Operand(0xFFFFFFFF));
}
@@ -3733,7 +3564,7 @@ void TurboAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
Register scratch) {
#if V8_TARGET_ARCH_S390X
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- if (!dst.is(src)) LoadRR(dst, src);
+ if (dst != src) LoadRR(dst, src);
aih(dst, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
} else {
LoadSmiLiteral(scratch, smi);
@@ -3748,7 +3579,7 @@ void TurboAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
Register scratch) {
#if V8_TARGET_ARCH_S390X
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- if (!dst.is(src)) LoadRR(dst, src);
+ if (dst != src) LoadRR(dst, src);
aih(dst, Operand((-reinterpret_cast<intptr_t>(smi)) >> 32));
} else {
LoadSmiLiteral(scratch, smi);
@@ -3760,7 +3591,7 @@ void TurboAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
}
void TurboAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
- if (!dst.is(src)) LoadRR(dst, src);
+ if (dst != src) LoadRR(dst, src);
#if V8_TARGET_ARCH_S390X
DCHECK((reinterpret_cast<intptr_t>(smi) & 0xffffffff) == 0);
int value = static_cast<int>(reinterpret_cast<intptr_t>(smi) >> 32);
@@ -3775,7 +3606,7 @@ void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
- if (!scratch.is(no_reg) && !is_int20(offset)) {
+ if (scratch != no_reg && !is_int20(offset)) {
/* cannot use d-form */
LoadIntLiteral(scratch, offset);
#if V8_TARGET_ARCH_S390X
@@ -3800,8 +3631,8 @@ void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
void TurboAssembler::StoreP(Register src, const MemOperand& mem,
Register scratch) {
if (!is_int20(mem.offset())) {
- DCHECK(!scratch.is(no_reg));
- DCHECK(!scratch.is(r0));
+ DCHECK(scratch != no_reg);
+ DCHECK(scratch != r0);
LoadIntLiteral(scratch, mem.offset());
#if V8_TARGET_ARCH_S390X
stg(src, MemOperand(mem.rb(), scratch));
@@ -3827,7 +3658,7 @@ void TurboAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
// Try to use MVGHI/MVHI
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
- mem.getIndexRegister().is(r0) && is_int16(opnd.immediate())) {
+ mem.getIndexRegister() == r0 && is_int16(opnd.immediate())) {
#if V8_TARGET_ARCH_S390X
mvghi(mem, opnd);
#else
@@ -3894,7 +3725,7 @@ void TurboAssembler::LoadW(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
lgfr(dst, src);
#else
- if (!dst.is(src)) lr(dst, src);
+ if (dst != src) lr(dst, src);
#endif
}
@@ -3904,7 +3735,7 @@ void TurboAssembler::LoadW(Register dst, const MemOperand& mem,
int offset = mem.offset();
if (!is_int20(offset)) {
- DCHECK(!scratch.is(no_reg));
+ DCHECK(scratch != no_reg);
LoadIntLiteral(scratch, offset);
#if V8_TARGET_ARCH_S390X
lgf(dst, MemOperand(mem.rb(), scratch));
@@ -3929,7 +3760,7 @@ void TurboAssembler::LoadlW(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
llgfr(dst, src);
#else
- if (!dst.is(src)) lr(dst, src);
+ if (dst != src) lr(dst, src);
#endif
}
@@ -3943,7 +3774,7 @@ void TurboAssembler::LoadlW(Register dst, const MemOperand& mem,
#if V8_TARGET_ARCH_S390X
if (is_int20(offset)) {
llgf(dst, mem);
- } else if (!scratch.is(no_reg)) {
+ } else if (scratch != no_reg) {
// Materialize offset into scratch register.
LoadIntLiteral(scratch, offset);
llgf(dst, MemOperand(base, scratch));
@@ -3959,7 +3790,7 @@ void TurboAssembler::LoadlW(Register dst, const MemOperand& mem,
} else if (is_int20(offset)) {
// RXY-format supports signed 20-bits offset.
use_RXYform = true;
- } else if (!scratch.is(no_reg)) {
+ } else if (scratch != no_reg) {
// Materialize offset into scratch register.
LoadIntLiteral(scratch, offset);
} else {
@@ -4249,7 +4080,7 @@ void TurboAssembler::StoreW(Register src, const MemOperand& mem,
} else if (is_int20(offset)) {
// RXY-format supports signed 20-bits offset.
use_RXYform = true;
- } else if (!scratch.is(no_reg)) {
+ } else if (scratch != no_reg) {
// Materialize offset into scratch register.
LoadIntLiteral(scratch, offset);
} else {
@@ -4274,7 +4105,7 @@ void TurboAssembler::LoadHalfWordP(Register dst, const MemOperand& mem,
int offset = mem.offset();
if (!is_int20(offset)) {
- DCHECK(!scratch.is(no_reg));
+ DCHECK(scratch != no_reg);
LoadIntLiteral(scratch, offset);
#if V8_TARGET_ARCH_S390X
lgh(dst, MemOperand(base, scratch));
@@ -4306,7 +4137,7 @@ void TurboAssembler::StoreHalfWord(Register src, const MemOperand& mem,
} else if (is_int20(offset)) {
sthy(src, mem);
} else {
- DCHECK(!scratch.is(no_reg));
+ DCHECK(scratch != no_reg);
LoadIntLiteral(scratch, offset);
sth(src, MemOperand(base, scratch));
}
@@ -4324,7 +4155,7 @@ void TurboAssembler::StoreByte(Register src, const MemOperand& mem,
} else if (is_int20(offset)) {
stcy(src, mem);
} else {
- DCHECK(!scratch.is(no_reg));
+ DCHECK(scratch != no_reg);
LoadIntLiteral(scratch, offset);
stc(src, MemOperand(base, scratch));
}
@@ -4332,7 +4163,7 @@ void TurboAssembler::StoreByte(Register src, const MemOperand& mem,
// Shift left logical for 32-bit integer types.
void TurboAssembler::ShiftLeft(Register dst, Register src, const Operand& val) {
- if (dst.is(src)) {
+ if (dst == src) {
sll(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
sllk(dst, src, val);
@@ -4344,12 +4175,12 @@ void TurboAssembler::ShiftLeft(Register dst, Register src, const Operand& val) {
// Shift left logical for 32-bit integer types.
void TurboAssembler::ShiftLeft(Register dst, Register src, Register val) {
- if (dst.is(src)) {
+ if (dst == src) {
sll(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
sllk(dst, src, val);
} else {
- DCHECK(!dst.is(val)); // The lr/sll path clobbers val.
+ DCHECK(dst != val); // The lr/sll path clobbers val.
lr(dst, src);
sll(dst, val);
}
@@ -4358,7 +4189,7 @@ void TurboAssembler::ShiftLeft(Register dst, Register src, Register val) {
// Shift right logical for 32-bit integer types.
void TurboAssembler::ShiftRight(Register dst, Register src,
const Operand& val) {
- if (dst.is(src)) {
+ if (dst == src) {
srl(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
srlk(dst, src, val);
@@ -4370,12 +4201,12 @@ void TurboAssembler::ShiftRight(Register dst, Register src,
// Shift right logical for 32-bit integer types.
void TurboAssembler::ShiftRight(Register dst, Register src, Register val) {
- if (dst.is(src)) {
+ if (dst == src) {
srl(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
srlk(dst, src, val);
} else {
- DCHECK(!dst.is(val)); // The lr/srl path clobbers val.
+ DCHECK(dst != val); // The lr/srl path clobbers val.
lr(dst, src);
srl(dst, val);
}
@@ -4384,7 +4215,7 @@ void TurboAssembler::ShiftRight(Register dst, Register src, Register val) {
// Shift left arithmetic for 32-bit integer types.
void TurboAssembler::ShiftLeftArith(Register dst, Register src,
const Operand& val) {
- if (dst.is(src)) {
+ if (dst == src) {
sla(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
slak(dst, src, val);
@@ -4396,12 +4227,12 @@ void TurboAssembler::ShiftLeftArith(Register dst, Register src,
// Shift left arithmetic for 32-bit integer types.
void TurboAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
- if (dst.is(src)) {
+ if (dst == src) {
sla(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
slak(dst, src, val);
} else {
- DCHECK(!dst.is(val)); // The lr/sla path clobbers val.
+ DCHECK(dst != val); // The lr/sla path clobbers val.
lr(dst, src);
sla(dst, val);
}
@@ -4410,7 +4241,7 @@ void TurboAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
// Shift right arithmetic for 32-bit integer types.
void TurboAssembler::ShiftRightArith(Register dst, Register src,
const Operand& val) {
- if (dst.is(src)) {
+ if (dst == src) {
sra(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
srak(dst, src, val);
@@ -4422,12 +4253,12 @@ void TurboAssembler::ShiftRightArith(Register dst, Register src,
// Shift right arithmetic for 32-bit integer types.
void TurboAssembler::ShiftRightArith(Register dst, Register src, Register val) {
- if (dst.is(src)) {
+ if (dst == src) {
sra(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
srak(dst, src, val);
} else {
- DCHECK(!dst.is(val)); // The lr/sra path clobbers val.
+ DCHECK(dst != val); // The lr/sra path clobbers val.
lr(dst, src);
sra(dst, val);
}
@@ -4448,7 +4279,7 @@ void TurboAssembler::ClearRightImm(Register dst, Register src,
uint64_t hexMask = ~((1L << numBitsToClear) - 1);
// S390 AND instr clobbers source. Make a copy if necessary
- if (!dst.is(src)) LoadRR(dst, src);
+ if (dst != src) LoadRR(dst, src);
if (numBitsToClear <= 16) {
nill(dst, Operand(static_cast<uint16_t>(hexMask)));
@@ -4461,8 +4292,8 @@ void TurboAssembler::ClearRightImm(Register dst, Register src,
}
void TurboAssembler::Popcnt32(Register dst, Register src) {
- DCHECK(!src.is(r0));
- DCHECK(!dst.is(r0));
+ DCHECK(src != r0);
+ DCHECK(dst != r0);
popcnt(dst, src);
ShiftRight(r0, dst, Operand(16));
@@ -4474,8 +4305,8 @@ void TurboAssembler::Popcnt32(Register dst, Register src) {
#ifdef V8_TARGET_ARCH_S390X
void TurboAssembler::Popcnt64(Register dst, Register src) {
- DCHECK(!src.is(r0));
- DCHECK(!dst.is(r0));
+ DCHECK(src != r0);
+ DCHECK(dst != r0);
popcnt(dst, src);
ShiftRightP(r0, dst, Operand(32));
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index ecc22deee5..707b21f1bb 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -8,25 +8,26 @@
#include "src/assembler.h"
#include "src/bailout-reason.h"
#include "src/globals.h"
+#include "src/s390/assembler-s390.h"
namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {Register::kCode_r2};
-const Register kReturnRegister1 = {Register::kCode_r3};
-const Register kReturnRegister2 = {Register::kCode_r4};
-const Register kJSFunctionRegister = {Register::kCode_r3};
-const Register kContextRegister = {Register::kCode_r13};
-const Register kAllocateSizeRegister = {Register::kCode_r3};
-const Register kInterpreterAccumulatorRegister = {Register::kCode_r2};
-const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r6};
-const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r7};
-const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
-const Register kJavaScriptCallArgCountRegister = {Register::kCode_r2};
-const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r5};
-const Register kRuntimeCallFunctionRegister = {Register::kCode_r3};
-const Register kRuntimeCallArgCountRegister = {Register::kCode_r2};
+const Register kReturnRegister0 = r2;
+const Register kReturnRegister1 = r3;
+const Register kReturnRegister2 = r4;
+const Register kJSFunctionRegister = r3;
+const Register kContextRegister = r13;
+const Register kAllocateSizeRegister = r3;
+const Register kInterpreterAccumulatorRegister = r2;
+const Register kInterpreterBytecodeOffsetRegister = r6;
+const Register kInterpreterBytecodeArrayRegister = r7;
+const Register kInterpreterDispatchTableRegister = r8;
+const Register kJavaScriptCallArgCountRegister = r2;
+const Register kJavaScriptCallNewTargetRegister = r5;
+const Register kRuntimeCallFunctionRegister = r3;
+const Register kRuntimeCallArgCountRegister = r2;
// ----------------------------------------------------------------------------
// Static helper functions
@@ -56,10 +57,6 @@ enum TaggingMode {
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum PointersToHereCheck {
- kPointersToHereMaybeInteresting,
- kPointersToHereAreAlwaysInteresting
-};
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
@@ -164,13 +161,7 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
class TurboAssembler : public Assembler {
public:
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ =
- Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
- }
- }
+ CodeObjectRequired create_code_object);
Isolate* isolate() const { return isolate_; }
@@ -225,18 +216,36 @@ class TurboAssembler : public Assembler {
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
+ void SaveRegisters(RegList registers);
+ void RestoreRegisters(RegList registers);
+
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode);
+
void MultiPush(RegList regs, Register location = sp);
void MultiPop(RegList regs, Register location = sp);
void MultiPushDoubles(RegList dregs, Register location = sp);
void MultiPopDoubles(RegList dregs, Register location = sp);
- void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
- void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ // Calculate how much stack space (in bytes) are required to store caller
+ // registers excluding those specified in the arguments.
+ int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg) const;
+
+ // Push caller saved registers on the stack, and return the number of bytes
+ // stack pointer is adjusted.
+ int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
+ // Restore caller saved registers from the stack, and return the number of
+ // bytes stack pointer is adjusted.
+ int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
// Load an object from the root table.
void LoadRoot(Register destination, Heap::RootListIndex index,
@@ -556,16 +565,16 @@ class TurboAssembler : public Assembler {
// Push five registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4,
Register src5) {
- DCHECK(!src1.is(src2));
- DCHECK(!src1.is(src3));
- DCHECK(!src2.is(src3));
- DCHECK(!src1.is(src4));
- DCHECK(!src2.is(src4));
- DCHECK(!src3.is(src4));
- DCHECK(!src1.is(src5));
- DCHECK(!src2.is(src5));
- DCHECK(!src3.is(src5));
- DCHECK(!src4.is(src5));
+ DCHECK(src1 != src2);
+ DCHECK(src1 != src3);
+ DCHECK(src2 != src3);
+ DCHECK(src1 != src4);
+ DCHECK(src2 != src4);
+ DCHECK(src3 != src4);
+ DCHECK(src1 != src5);
+ DCHECK(src2 != src5);
+ DCHECK(src3 != src5);
+ DCHECK(src4 != src5);
lay(sp, MemOperand(sp, -kPointerSize * 5));
StoreP(src1, MemOperand(sp, kPointerSize * 4));
@@ -878,7 +887,7 @@ class TurboAssembler : public Assembler {
} else {
if (rangeEnd > 0) // Don't need to shift if rangeEnd is zero.
ShiftRightP(dst, src, Operand(rangeEnd));
- else if (!dst.is(src)) // If we didn't shift, we might need to copy
+ else if (dst != src) // If we didn't shift, we might need to copy
LoadRR(dst, src);
int width = rangeStart - rangeEnd + 1;
#if V8_TARGET_ARCH_S390X
@@ -1057,27 +1066,6 @@ class MacroAssembler : public TurboAssembler {
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
- // set with result of map compare. If multiple map compares are required, the
- // compare sequences branches to early_success.
- void CompareMap(Register obj, Register scratch, Handle<Map> map,
- Label* early_success);
-
- // As above, but the map of the object is already loaded into the register
- // which is preserved by the code generated.
- void CompareMap(Register obj_map, Handle<Map> map, Label* early_success);
-
- // Check if the map of an object is equal to a specified map and branch to
- // label if not. Skip the smi check if not required (object is known to be a
- // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail,
- SmiCheckType smi_check_type);
-
- void CheckMap(Register obj, Register scratch, Heap::RootListIndex index,
- Label* fail, SmiCheckType smi_check_type);
-
void GetWeakValue(Register value, Handle<WeakCell> cell);
// Load the value of the weak cell in the value register. Branch to the given
@@ -1122,8 +1110,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// StatsCounter support
- void SetCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
@@ -1192,11 +1178,6 @@ class MacroAssembler : public TurboAssembler {
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
- // Load the global object from the current context.
- void LoadGlobalObject(Register dst) {
- LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
- }
-
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
@@ -1204,30 +1185,6 @@ class MacroAssembler : public TurboAssembler {
void LoadNativeContextSlot(int index, Register dst);
- // Load the initial map from the global function. The registers
- // function and map can be the same, function is then overwritten.
- void LoadGlobalFunctionInitialMap(Register function, Register map,
- Register scratch);
-
- // ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space or old pointer space. The object_size is
- // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. If the space is exhausted control continues at the gc_required
- // label. The allocated object is returned in result. If the flag
- // tag_allocated_object is true the result is tagged as as a heap object.
- // All registers are clobbered also when control continues at the gc_required
- // label.
- void Allocate(int object_size, Register result, Register scratch1,
- Register scratch2, Label* gc_required, AllocationFlags flags);
-
- // Allocate and initialize a JSValue wrapper with the specified {constructor}
- // and {value}.
- void AllocateJSValue(Register result, Register constructor, Register value,
- Register scratch1, Register scratch2,
- Label* gc_required);
-
// ---------------------------------------------------------------------------
// Smi utilities
@@ -1295,20 +1252,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// String utilities
- // Checks if both objects are sequential one-byte strings and jumps to label
- // if either is not. Assumes that neither object is a smi.
- void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- // Checks if both instance types are sequential one-byte strings and jumps to
- // label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialOneByte(
- Register first_object_instance_type, Register second_object_instance_type,
- Register scratch1, Register scratch2, Label* failure);
-
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
void LoadInstanceDescriptors(Register map, Register descriptors);
@@ -1331,15 +1274,12 @@ class MacroAssembler : public TurboAssembler {
void IncrementalMarkingRecordWriteHelper(Register object, Register value,
Register address);
- enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
-
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
Register addr, Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
+ SaveFPRegsMode save_fp);
void CallJSEntry(Register target);
static int CallSizeNotPredictableCodeSize(Address target,
@@ -1380,26 +1320,7 @@ class MacroAssembler : public TurboAssembler {
Register object, int offset, Register value, Register scratch,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
-
- // As above, but the offset has the tag presubtracted. For use with
- // MemOperand(reg, off).
- inline void RecordWriteContextSlot(
- Register context, int offset, Register value, Register scratch,
- LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting) {
- RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
- lr_status, save_fp, remembered_set_action, smi_check,
- pointers_to_here_check_for_value);
- }
-
- void RecordWriteForMap(Register object, Register map, Register dst,
- LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
@@ -1408,9 +1329,7 @@ class MacroAssembler : public TurboAssembler {
Register object, Register address, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
@@ -1440,9 +1359,6 @@ class MacroAssembler : public TurboAssembler {
inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
Register mask_reg);
- static const RegList kSafepointSavedRegisters;
- static const int kNumSafepointSavedRegisters;
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
index 1900117502..5647e0f980 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -1942,10 +1942,6 @@ typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
typedef ObjectPair (*SimulatorRuntimePairCall)(intptr_t arg0, intptr_t arg1,
intptr_t arg2, intptr_t arg3,
intptr_t arg4, intptr_t arg5);
-typedef ObjectTriple (*SimulatorRuntimeTripleCall)(intptr_t arg0, intptr_t arg1,
- intptr_t arg2, intptr_t arg3,
- intptr_t arg4,
- intptr_t arg5);
// These prototypes handle the four types of FP calls.
typedef int (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
@@ -1980,9 +1976,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
int arg0_regnum = 2;
intptr_t result_buffer = 0;
bool uses_result_buffer =
- redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE ||
- (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR &&
- !ABI_RETURNS_OBJECTPAIR_IN_REGS);
+ redirection->type() == ExternalReference::BUILTIN_CALL_PAIR &&
+ !ABI_RETURNS_OBJECTPAIR_IN_REGS;
if (uses_result_buffer) {
result_buffer = get_register(r2);
arg0_regnum++;
@@ -2188,52 +2183,35 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- if (redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE) {
- SimulatorRuntimeTripleCall target =
- reinterpret_cast<SimulatorRuntimeTripleCall>(external);
- ObjectTriple result =
+ if (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR) {
+ SimulatorRuntimePairCall target =
+ reinterpret_cast<SimulatorRuntimePairCall>(external);
+ ObjectPair result =
target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ intptr_t x;
+ intptr_t y;
+ decodeObjectPair(&result, &x, &y);
if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
- "}\n",
- reinterpret_cast<intptr_t>(result.x),
- reinterpret_cast<intptr_t>(result.y),
- reinterpret_cast<intptr_t>(result.z));
+ PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR "}\n", x, y);
}
- memcpy(reinterpret_cast<void*>(result_buffer), &result,
- sizeof(ObjectTriple));
- set_register(r2, result_buffer);
- } else {
- if (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR) {
- SimulatorRuntimePairCall target =
- reinterpret_cast<SimulatorRuntimePairCall>(external);
- ObjectPair result =
- target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
- intptr_t x;
- intptr_t y;
- decodeObjectPair(&result, &x, &y);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR "}\n", x, y);
- }
- if (ABI_RETURNS_OBJECTPAIR_IN_REGS) {
- set_register(r2, x);
- set_register(r3, y);
- } else {
- memcpy(reinterpret_cast<void*>(result_buffer), &result,
- sizeof(ObjectPair));
- set_register(r2, result_buffer);
- }
+ if (ABI_RETURNS_OBJECTPAIR_IN_REGS) {
+ set_register(r2, x);
+ set_register(r3, y);
} else {
- DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- intptr_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
- arg[5], arg[6], arg[7], arg[8]);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08" V8PRIxPTR "\n", result);
- }
- set_register(r2, result);
+ memcpy(reinterpret_cast<void*>(result_buffer), &result,
+ sizeof(ObjectPair));
+ set_register(r2, result_buffer);
+ }
+ } else {
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ intptr_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
+ arg[5], arg[6], arg[7], arg[8]);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08" V8PRIxPTR "\n", result);
}
+ set_register(r2, result);
}
// #if !V8_TARGET_ARCH_S390X
// DCHECK(redirection->type() ==
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index 03459d5994..057d5d8c5e 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -14,7 +14,7 @@
namespace v8 {
namespace internal {
-struct Register;
+class Register;
class SafepointEntry BASE_EMBEDDED {
public:
diff --git a/deps/v8/src/setup-isolate-deserialize.cc b/deps/v8/src/setup-isolate-deserialize.cc
index eec5f60a33..a97b77fac6 100644
--- a/deps/v8/src/setup-isolate-deserialize.cc
+++ b/deps/v8/src/setup-isolate-deserialize.cc
@@ -12,14 +12,13 @@
namespace v8 {
namespace internal {
-void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate,
- bool create_heap_objects) {
- DCHECK(!create_heap_objects);
+void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate) {
+ DCHECK(!create_heap_objects_);
// No actual work to be done; builtins will be deserialized from the snapshot.
}
void SetupIsolateDelegate::SetupInterpreter(
- interpreter::Interpreter* interpreter, bool create_heap_objects) {
+ interpreter::Interpreter* interpreter) {
#if defined(V8_USE_SNAPSHOT) && !defined(V8_USE_SNAPSHOT_WITH_UNWINDING_INFO)
if (FLAG_perf_prof_unwinding_info) {
OFStream os(stdout);
@@ -31,5 +30,11 @@ void SetupIsolateDelegate::SetupInterpreter(
DCHECK(interpreter->IsDispatchTableInitialized());
}
+bool SetupIsolateDelegate::SetupHeap(Heap* heap) {
+ DCHECK(!create_heap_objects_);
+ // No actual work to be done; heap will be deserialized from the snapshot.
+ return true;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/setup-isolate-full.cc b/deps/v8/src/setup-isolate-full.cc
index 9dd7e64d29..14ef318b67 100644
--- a/deps/v8/src/setup-isolate-full.cc
+++ b/deps/v8/src/setup-isolate-full.cc
@@ -5,6 +5,7 @@
#include "src/setup-isolate.h"
#include "src/base/logging.h"
+#include "src/heap/heap-inl.h"
#include "src/interpreter/interpreter.h"
#include "src/interpreter/setup-interpreter.h"
#include "src/isolate.h"
@@ -12,9 +13,8 @@
namespace v8 {
namespace internal {
-void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate,
- bool create_heap_objects) {
- if (create_heap_objects) {
+void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate) {
+ if (create_heap_objects_) {
SetupBuiltinsInternal(isolate);
} else {
DCHECK(isolate->snapshot_available());
@@ -22,13 +22,22 @@ void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate,
}
void SetupIsolateDelegate::SetupInterpreter(
- interpreter::Interpreter* interpreter, bool create_heap_objects) {
- if (create_heap_objects) {
+ interpreter::Interpreter* interpreter) {
+ if (create_heap_objects_) {
interpreter::SetupInterpreter::InstallBytecodeHandlers(interpreter);
} else {
DCHECK(interpreter->IsDispatchTableInitialized());
}
}
+bool SetupIsolateDelegate::SetupHeap(Heap* heap) {
+ if (create_heap_objects_) {
+ return SetupHeapInternal(heap);
+ } else {
+ DCHECK(heap->isolate()->snapshot_available());
+ return true;
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/setup-isolate.h b/deps/v8/src/setup-isolate.h
index 99bdd9bcfa..2003caeac9 100644
--- a/deps/v8/src/setup-isolate.h
+++ b/deps/v8/src/setup-isolate.h
@@ -10,6 +10,7 @@ namespace internal {
class Builtins;
class Code;
+class Heap;
class Isolate;
namespace interpreter {
@@ -31,19 +32,25 @@ class Interpreter;
// linked in by the latter two Delegate implementations.
class SetupIsolateDelegate {
public:
- SetupIsolateDelegate() {}
+ explicit SetupIsolateDelegate(bool create_heap_objects)
+ : create_heap_objects_(create_heap_objects) {}
virtual ~SetupIsolateDelegate() {}
- virtual void SetupBuiltins(Isolate* isolate, bool create_heap_objects);
+ virtual void SetupBuiltins(Isolate* isolate);
- virtual void SetupInterpreter(interpreter::Interpreter* interpreter,
- bool create_heap_objects);
+ virtual void SetupInterpreter(interpreter::Interpreter* interpreter);
+
+ virtual bool SetupHeap(Heap* heap);
protected:
static void SetupBuiltinsInternal(Isolate* isolate);
static void AddBuiltin(Builtins* builtins, int index, Code* code);
static void PopulateWithPlaceholders(Isolate* isolate);
static void ReplacePlaceholders(Isolate* isolate);
+
+ static bool SetupHeapInternal(Heap* heap);
+
+ const bool create_heap_objects_;
};
} // namespace internal
diff --git a/deps/v8/src/small-pointer-list.h b/deps/v8/src/small-pointer-list.h
deleted file mode 100644
index ac5ecaae57..0000000000
--- a/deps/v8/src/small-pointer-list.h
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SMALL_POINTER_LIST_H_
-#define V8_SMALL_POINTER_LIST_H_
-
-#include "src/base/logging.h"
-#include "src/globals.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-// SmallPointerList is a list optimized for storing no or just a
-// single value. When more values are given it falls back to ZoneList.
-//
-// The interface tries to be as close to List from list.h as possible.
-template <typename T>
-class SmallPointerList {
- public:
- SmallPointerList() : data_(kEmptyTag) {}
-
- SmallPointerList(int capacity, Zone* zone) : data_(kEmptyTag) {
- Reserve(capacity, zone);
- }
-
- void Reserve(int capacity, Zone* zone) {
- if (capacity < 2) return;
- if ((data_ & kTagMask) == kListTag) {
- if (list()->capacity() >= capacity) return;
- int old_length = list()->length();
- list()->AddBlock(NULL, capacity - list()->capacity(), zone);
- list()->Rewind(old_length);
- return;
- }
- PointerList* list = new(zone) PointerList(capacity, zone);
- if ((data_ & kTagMask) == kSingletonTag) {
- list->Add(single_value(), zone);
- }
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment));
- data_ = reinterpret_cast<intptr_t>(list) | kListTag;
- }
-
- void Clear() {
- data_ = kEmptyTag;
- }
-
- void Sort() {
- if ((data_ & kTagMask) == kListTag) {
- list()->Sort(compare_value);
- }
- }
-
- bool is_empty() const { return length() == 0; }
-
- int length() const {
- if ((data_ & kTagMask) == kEmptyTag) return 0;
- if ((data_ & kTagMask) == kSingletonTag) return 1;
- return list()->length();
- }
-
- void Add(T* pointer, Zone* zone) {
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(pointer), kPointerAlignment));
- if ((data_ & kTagMask) == kEmptyTag) {
- data_ = reinterpret_cast<intptr_t>(pointer) | kSingletonTag;
- return;
- }
- if ((data_ & kTagMask) == kSingletonTag) {
- PointerList* list = new(zone) PointerList(2, zone);
- list->Add(single_value(), zone);
- list->Add(pointer, zone);
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment));
- data_ = reinterpret_cast<intptr_t>(list) | kListTag;
- return;
- }
- list()->Add(pointer, zone);
- }
-
- // Note: returns T* and not T*& (unlike List from list.h).
- // This makes the implementation simpler and more const correct.
- T* at(int i) const {
- DCHECK((data_ & kTagMask) != kEmptyTag);
- if ((data_ & kTagMask) == kSingletonTag) {
- DCHECK(i == 0);
- return single_value();
- }
- return list()->at(i);
- }
-
- // See the note above.
- T* operator[](int i) const { return at(i); }
-
- // Remove the given element from the list (if present).
- void RemoveElement(T* pointer) {
- if ((data_ & kTagMask) == kEmptyTag) return;
- if ((data_ & kTagMask) == kSingletonTag) {
- if (pointer == single_value()) {
- data_ = kEmptyTag;
- }
- return;
- }
- list()->RemoveElement(pointer);
- }
-
- T* RemoveLast() {
- DCHECK((data_ & kTagMask) != kEmptyTag);
- if ((data_ & kTagMask) == kSingletonTag) {
- T* result = single_value();
- data_ = kEmptyTag;
- return result;
- }
- return list()->RemoveLast();
- }
-
- void Rewind(int pos) {
- if ((data_ & kTagMask) == kEmptyTag) {
- DCHECK(pos == 0);
- return;
- }
- if ((data_ & kTagMask) == kSingletonTag) {
- DCHECK(pos == 0 || pos == 1);
- if (pos == 0) {
- data_ = kEmptyTag;
- }
- return;
- }
- list()->Rewind(pos);
- }
-
- int CountOccurrences(T* pointer, int start, int end) const {
- if ((data_ & kTagMask) == kEmptyTag) return 0;
- if ((data_ & kTagMask) == kSingletonTag) {
- if (start == 0 && end >= 0) {
- return (single_value() == pointer) ? 1 : 0;
- }
- return 0;
- }
- return list()->CountOccurrences(pointer, start, end);
- }
-
- private:
- typedef ZoneList<T*> PointerList;
-
- static int compare_value(T* const* a, T* const* b) {
- return Compare<T>(**a, **b);
- }
-
- static const intptr_t kEmptyTag = 1;
- static const intptr_t kSingletonTag = 0;
- static const intptr_t kListTag = 2;
- static const intptr_t kTagMask = 3;
- static const intptr_t kValueMask = ~kTagMask;
-
- STATIC_ASSERT(kTagMask + 1 <= kPointerAlignment);
-
- T* single_value() const {
- DCHECK((data_ & kTagMask) == kSingletonTag);
- STATIC_ASSERT(kSingletonTag == 0);
- return reinterpret_cast<T*>(data_);
- }
-
- PointerList* list() const {
- DCHECK((data_ & kTagMask) == kListTag);
- return reinterpret_cast<PointerList*>(data_ & kValueMask);
- }
-
- intptr_t data_;
-
- DISALLOW_COPY_AND_ASSIGN(SmallPointerList);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SMALL_POINTER_LIST_H_
diff --git a/deps/v8/src/snapshot/builtin-deserializer.cc b/deps/v8/src/snapshot/builtin-deserializer.cc
new file mode 100644
index 0000000000..fb41a9fec9
--- /dev/null
+++ b/deps/v8/src/snapshot/builtin-deserializer.cc
@@ -0,0 +1,243 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/builtin-deserializer.h"
+
+#include "src/assembler-inl.h"
+#include "src/objects-inl.h"
+#include "src/snapshot/snapshot.h"
+
+namespace v8 {
+namespace internal {
+
+// Tracks the builtin currently being deserialized (required for allocation).
+class DeserializingBuiltinScope {
+ public:
+ DeserializingBuiltinScope(BuiltinDeserializer* builtin_deserializer,
+ int builtin_id)
+ : builtin_deserializer_(builtin_deserializer) {
+ DCHECK_EQ(BuiltinDeserializer::kNoBuiltinId,
+ builtin_deserializer->current_builtin_id_);
+ builtin_deserializer->current_builtin_id_ = builtin_id;
+ }
+
+ ~DeserializingBuiltinScope() {
+ builtin_deserializer_->current_builtin_id_ =
+ BuiltinDeserializer::kNoBuiltinId;
+ }
+
+ private:
+ BuiltinDeserializer* builtin_deserializer_;
+
+ DISALLOW_COPY_AND_ASSIGN(DeserializingBuiltinScope)
+};
+
+BuiltinDeserializer::BuiltinDeserializer(Isolate* isolate,
+ const BuiltinSnapshotData* data)
+ : Deserializer(data, false) {
+ // We may have to relax this at some point to pack reloc infos and handler
+ // tables into the builtin blob (instead of the partial snapshot cache).
+ DCHECK(ReservesOnlyCodeSpace());
+
+ builtin_offsets_ = data->BuiltinOffsets();
+ DCHECK_EQ(Builtins::builtin_count, builtin_offsets_.length());
+ DCHECK(std::is_sorted(builtin_offsets_.begin(), builtin_offsets_.end()));
+
+ Initialize(isolate);
+}
+
+void BuiltinDeserializer::DeserializeEagerBuiltins() {
+ DCHECK(!AllowHeapAllocation::IsAllowed());
+ DCHECK_EQ(0, source()->position());
+
+ Builtins* builtins = isolate()->builtins();
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ if (IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
+ // Do nothing. These builtins have been replaced by DeserializeLazy in
+ // InitializeBuiltinsTable.
+ DCHECK_EQ(builtins->builtin(Builtins::kDeserializeLazy),
+ builtins->builtin(i));
+ } else {
+ builtins->set_builtin(i, DeserializeBuiltin(i));
+ }
+ }
+
+#ifdef DEBUG
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ Object* o = builtins->builtin(i);
+ DCHECK(o->IsCode() && Code::cast(o)->is_builtin());
+ }
+#endif
+}
+
+Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
+ DCHECK(!AllowHeapAllocation::IsAllowed());
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+
+ DeserializingBuiltinScope scope(this, builtin_id);
+
+ const int initial_position = source()->position();
+ SetPositionToBuiltin(builtin_id);
+
+ Object* o = ReadDataSingle();
+ DCHECK(o->IsCode() && Code::cast(o)->is_builtin());
+
+ // Rewind.
+ source()->set_position(initial_position);
+
+ // Flush the instruction cache.
+ Code* code = Code::cast(o);
+ Assembler::FlushICache(isolate(), code->instruction_start(),
+ code->instruction_size());
+
+ return code;
+}
+
+void BuiltinDeserializer::SetPositionToBuiltin(int builtin_id) {
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+
+ const uint32_t offset = builtin_offsets_[builtin_id];
+ source()->set_position(offset);
+
+ // Grab the size of the code object.
+ byte data = source()->Get();
+
+ // The first bytecode can either be kNewObject, or kNextChunk if the current
+ // chunk has been exhausted. Since we do allocations differently here, we
+ // don't care about kNextChunk and can simply skip over it.
+ // TODO(jgruber): When refactoring (de)serializer allocations, ensure we don't
+ // generate kNextChunk bytecodes anymore for the builtins snapshot. In fact,
+ // the entire reservations mechanism is unused for the builtins snapshot.
+ if (data == kNextChunk) {
+ source()->Get(); // Skip over kNextChunk's {space} parameter.
+ } else {
+ source()->set_position(offset); // Rewind.
+ }
+}
+
+uint32_t BuiltinDeserializer::ExtractBuiltinSize(int builtin_id) {
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+
+ const int initial_position = source()->position();
+
+ // Grab the size of the code object.
+ SetPositionToBuiltin(builtin_id);
+ byte data = source()->Get();
+
+ USE(data);
+ DCHECK_EQ(kNewObject | kPlain | kStartOfObject | CODE_SPACE, data);
+ const uint32_t result = source()->GetInt() << kObjectAlignmentBits;
+
+ // Rewind.
+ source()->set_position(initial_position);
+
+ return result;
+}
+
+Heap::Reservation BuiltinDeserializer::CreateReservationsForEagerBuiltins() {
+ DCHECK(ReservesOnlyCodeSpace());
+
+ Heap::Reservation result;
+
+ // DeserializeLazy is always the first reservation (to simplify logic in
+ // InitializeBuiltinsTable).
+ {
+ DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
+ uint32_t builtin_size = ExtractBuiltinSize(Builtins::kDeserializeLazy);
+ DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
+ result.push_back({builtin_size, nullptr, nullptr});
+ }
+
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ if (i == Builtins::kDeserializeLazy) continue;
+
+ // Skip lazy builtins. These will be replaced by the DeserializeLazy code
+ // object in InitializeBuiltinsTable and thus require no reserved space.
+ if (IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) continue;
+
+ uint32_t builtin_size = ExtractBuiltinSize(i);
+ DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
+ result.push_back({builtin_size, nullptr, nullptr});
+ }
+
+ return result;
+}
+
+void BuiltinDeserializer::InitializeBuiltinFromReservation(
+ const Heap::Chunk& chunk, int builtin_id) {
+ DCHECK_EQ(ExtractBuiltinSize(builtin_id), chunk.size);
+ DCHECK_EQ(chunk.size, chunk.end - chunk.start);
+
+ SkipList::Update(chunk.start, chunk.size);
+ isolate()->builtins()->set_builtin(builtin_id,
+ HeapObject::FromAddress(chunk.start));
+}
+
+void BuiltinDeserializer::InitializeBuiltinsTable(
+ const Heap::Reservation& reservation) {
+ DCHECK(!AllowHeapAllocation::IsAllowed());
+
+ Builtins* builtins = isolate()->builtins();
+ int reservation_index = 0;
+
+ // Other builtins can be replaced by DeserializeLazy so it may not be lazy.
+ // It always occupies the first reservation slot.
+ {
+ DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
+ InitializeBuiltinFromReservation(reservation[reservation_index],
+ Builtins::kDeserializeLazy);
+ reservation_index++;
+ }
+
+ Code* deserialize_lazy = builtins->builtin(Builtins::kDeserializeLazy);
+
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ if (i == Builtins::kDeserializeLazy) continue;
+
+ if (IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
+ builtins->set_builtin(i, deserialize_lazy);
+ } else {
+ InitializeBuiltinFromReservation(reservation[reservation_index], i);
+ reservation_index++;
+ }
+ }
+
+ DCHECK_EQ(reservation.size(), reservation_index);
+}
+
+void BuiltinDeserializer::ReserveAndInitializeBuiltinsTableForBuiltin(
+ int builtin_id) {
+ DCHECK(AllowHeapAllocation::IsAllowed());
+ DCHECK(isolate()->builtins()->is_initialized());
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+ DCHECK_NE(Builtins::kDeserializeLazy, builtin_id);
+ DCHECK_EQ(Builtins::kDeserializeLazy,
+ isolate()->builtins()->builtin(builtin_id)->builtin_index());
+
+ const uint32_t builtin_size = ExtractBuiltinSize(builtin_id);
+ DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
+
+ Handle<HeapObject> o =
+ isolate()->factory()->NewCodeForDeserialization(builtin_size);
+
+ // Note: After this point and until deserialization finishes, heap allocation
+ // is disallowed. We currently can't safely assert this since we'd need to
+ // pass the DisallowHeapAllocation scope out of this function.
+
+ // Write the allocated filler object into the builtins table. It will be
+ // returned by our custom Allocate method below once needed.
+
+ isolate()->builtins()->set_builtin(builtin_id, *o);
+}
+
+Address BuiltinDeserializer::Allocate(int space_index, int size) {
+ DCHECK_EQ(CODE_SPACE, space_index);
+ DCHECK_EQ(ExtractBuiltinSize(current_builtin_id_), size);
+ Object* obj = isolate()->builtins()->builtin(current_builtin_id_);
+ DCHECK(Internals::HasHeapObjectTag(obj));
+ return HeapObject::cast(obj)->address();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-deserializer.h b/deps/v8/src/snapshot/builtin-deserializer.h
new file mode 100644
index 0000000000..a73c68ed34
--- /dev/null
+++ b/deps/v8/src/snapshot/builtin-deserializer.h
@@ -0,0 +1,89 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_
+#define V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_
+
+#include "src/heap/heap.h"
+#include "src/snapshot/deserializer.h"
+
+namespace v8 {
+namespace internal {
+
+class BuiltinSnapshotData;
+
+// Deserializes the builtins blob.
+class BuiltinDeserializer final : public Deserializer {
+ public:
+ BuiltinDeserializer(Isolate* isolate, const BuiltinSnapshotData* data);
+
+ // Builtins deserialization is tightly integrated with deserialization of the
+ // startup blob. In particular, we need to ensure that no GC can occur
+ // between startup- and builtins deserialization, as all builtins have been
+ // pre-allocated and their pointers may not be invalidated.
+ //
+ // After this, the instruction cache must be flushed by the caller (we don't
+ // do it ourselves since the startup serializer batch-flushes all code pages).
+ void DeserializeEagerBuiltins();
+
+ // Deserializes the single given builtin. Assumes that reservations have
+ // already been allocated.
+ Code* DeserializeBuiltin(int builtin_id);
+
+ // These methods are used to pre-allocate builtin objects prior to
+ // deserialization.
+ // TODO(jgruber): Refactor reservation/allocation logic in deserializers to
+ // make this less messy.
+ Heap::Reservation CreateReservationsForEagerBuiltins();
+ void InitializeBuiltinsTable(const Heap::Reservation& reservation);
+
+ // Creates reservations and initializes the builtins table in preparation for
+ // lazily deserializing a single builtin.
+ void ReserveAndInitializeBuiltinsTableForBuiltin(int builtin_id);
+
+ private:
+ // TODO(jgruber): Remove once allocations have been refactored.
+ void SetPositionToBuiltin(int builtin_id);
+
+ // Extracts the size builtin Code objects (baked into the snapshot).
+ uint32_t ExtractBuiltinSize(int builtin_id);
+
+ // Used after memory allocation prior to isolate initialization, to register
+ // the newly created object in code space and add it to the builtins table.
+ void InitializeBuiltinFromReservation(const Heap::Chunk& chunk,
+ int builtin_id);
+
+ // Allocation works differently here than in other deserializers. Instead of
+ // a statically-known memory area determined at serialization-time, our
+ // memory requirements here are determined at runtime. Another major
+ // difference is that we create builtin Code objects up-front (before
+ // deserialization) in order to avoid having to patch builtin references
+ // later on. See also the kBuiltin case in deserializer.cc.
+ //
+ // Allocate simply returns the pre-allocated object prepared by
+ // InitializeBuiltinsTable.
+ Address Allocate(int space_index, int size) override;
+
+ // BuiltinDeserializer implements its own builtin iteration logic. Make sure
+ // the RootVisitor API is not used accidentally.
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
+ UNREACHABLE();
+ }
+
+ // Stores the builtin currently being deserialized. We need this to determine
+ // where to 'allocate' from during deserialization.
+ static const int kNoBuiltinId = -1;
+ int current_builtin_id_ = kNoBuiltinId;
+
+ // The offsets of each builtin within the serialized data. Equivalent to
+ // BuiltinSerializer::builtin_offsets_ but on the deserialization side.
+ Vector<const uint32_t> builtin_offsets_;
+
+ friend class DeserializingBuiltinScope;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_
diff --git a/deps/v8/src/snapshot/builtin-serializer.cc b/deps/v8/src/snapshot/builtin-serializer.cc
new file mode 100644
index 0000000000..6e90ea18be
--- /dev/null
+++ b/deps/v8/src/snapshot/builtin-serializer.cc
@@ -0,0 +1,90 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/builtin-serializer.h"
+
+#include "src/objects-inl.h"
+#include "src/snapshot/startup-serializer.h"
+
+namespace v8 {
+namespace internal {
+
+BuiltinSerializer::BuiltinSerializer(Isolate* isolate,
+ StartupSerializer* startup_serializer)
+ : Serializer(isolate), startup_serializer_(startup_serializer) {}
+
+BuiltinSerializer::~BuiltinSerializer() {
+ OutputStatistics("BuiltinSerializer");
+}
+
+void BuiltinSerializer::SerializeBuiltins() {
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ builtin_offsets_[i] = sink_.Position();
+ SerializeBuiltin(isolate()->builtins()->builtin(i));
+ }
+ Pad(); // Pad with kNop since GetInt() might read too far.
+
+ // Append the offset table. During deserialization, the offset table is
+ // extracted by BuiltinSnapshotData.
+ const byte* data = reinterpret_cast<const byte*>(&builtin_offsets_[0]);
+ int data_length = static_cast<int>(sizeof(builtin_offsets_));
+ sink_.PutRaw(data, data_length, "BuiltinOffsets");
+}
+
+void BuiltinSerializer::VisitRootPointers(Root root, Object** start,
+ Object** end) {
+ UNREACHABLE(); // We iterate manually in SerializeBuiltins.
+}
+
+void BuiltinSerializer::SerializeBuiltin(Code* code) {
+ DCHECK_GE(code->builtin_index(), 0);
+
+ // All builtins are serialized unconditionally when the respective builtin is
+ // reached while iterating the builtins list. A builtin seen at any other
+ // time (e.g. startup snapshot creation, or while iterating a builtin code
+ // object during builtin serialization) is serialized by reference - see
+ // BuiltinSerializer::SerializeObject below.
+ ObjectSerializer object_serializer(this, code, &sink_, kPlain,
+ kStartOfObject);
+ object_serializer.Serialize();
+}
+
+void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) {
+ DCHECK(!o->IsSmi());
+
+ // Roots can simply be serialized as root references.
+ int root_index = root_index_map()->Lookup(o);
+ if (root_index != RootIndexMap::kInvalidRootIndex) {
+ DCHECK(startup_serializer_->root_has_been_serialized(root_index));
+ PutRoot(root_index, o, how_to_code, where_to_point, skip);
+ return;
+ }
+
+ // Builtins are serialized using a dedicated bytecode. We only reach this
+ // point if encountering a Builtin e.g. while iterating the body of another
+ // builtin.
+ if (SerializeBuiltinReference(o, how_to_code, where_to_point, skip)) return;
+
+ // Embedded objects are serialized as part of the partial snapshot cache.
+ // Currently we expect to see:
+ // * Code: Jump targets.
+ // * ByteArrays: Relocation infos.
+ // * FixedArrays: Handler tables.
+ // * Strings: CSA_ASSERTs in debug builds, various other string constants.
+ // * HeapNumbers: Embedded constants.
+ // TODO(6624): Jump targets should never trigger content serialization, it
+ // should always result in a reference instead. Reloc infos and handler
+ // tables should not end up in the partial snapshot cache.
+
+ FlushSkip(skip);
+
+ int cache_index = startup_serializer_->PartialSnapshotCacheIndex(o);
+ sink_.Put(kPartialSnapshotCache + how_to_code + where_to_point,
+ "PartialSnapshotCache");
+ sink_.PutInt(cache_index, "partial_snapshot_cache_index");
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-serializer.h b/deps/v8/src/snapshot/builtin-serializer.h
new file mode 100644
index 0000000000..85c59f84c0
--- /dev/null
+++ b/deps/v8/src/snapshot/builtin-serializer.h
@@ -0,0 +1,47 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
+#define V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
+
+#include "src/snapshot/serializer.h"
+
+namespace v8 {
+namespace internal {
+
+class StartupSerializer;
+
+// Responsible for serializing all builtin objects during startup snapshot
+// creation. Builtins are serialized into a dedicated area of the snapshot.
+// See snapshot.h for documentation of the snapshot layout.
+class BuiltinSerializer : public Serializer<> {
+ public:
+ BuiltinSerializer(Isolate* isolate, StartupSerializer* startup_serializer);
+ ~BuiltinSerializer() override;
+
+ void SerializeBuiltins();
+
+ private:
+ void VisitRootPointers(Root root, Object** start, Object** end) override;
+
+ void SerializeBuiltin(Code* code);
+ void SerializeObject(HeapObject* o, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) override;
+
+ // The startup serializer is needed for access to the partial snapshot cache,
+ // which is used to serialize things like embedded constants.
+ StartupSerializer* startup_serializer_;
+
+ // Stores the starting offset, within the serialized data, of each builtin.
+ // This is later packed into the builtin snapshot, and used by the builtin
+ // deserializer to deserialize individual builtins.
+ uint32_t builtin_offsets_[Builtins::builtin_count];
+
+ DISALLOW_COPY_AND_ASSIGN(BuiltinSerializer);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 0af5d12dc4..29e1e783e4 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -16,7 +16,7 @@
#include "src/version.h"
#include "src/visitors.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
namespace internal {
@@ -55,7 +55,7 @@ ScriptData* CodeSerializer::Serialize(Handle<HeapObject> obj) {
SerializeDeferredObjects();
Pad();
- SerializedCodeData data(sink()->data(), this);
+ SerializedCodeData data(sink_.data(), this);
return data.GetScriptData();
}
@@ -64,7 +64,7 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
- int root_index = root_index_map_.Lookup(obj);
+ int root_index = root_index_map()->Lookup(obj);
if (root_index != RootIndexMap::kInvalidRootIndex) {
PutRoot(root_index, obj, how_to_code, where_to_point, skip);
return;
@@ -78,30 +78,21 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
Code* code_object = Code::cast(obj);
switch (code_object->kind()) {
case Code::OPTIMIZED_FUNCTION: // No optimized code compiled yet.
- case Code::HANDLER: // No handlers patched in yet.
case Code::REGEXP: // No regexp literals initialized yet.
case Code::NUMBER_OF_KINDS: // Pseudo enum value.
case Code::BYTECODE_HANDLER: // No direct references to handlers.
CHECK(false);
case Code::BUILTIN:
- SerializeBuiltin(code_object->builtin_index(), how_to_code,
- where_to_point);
+ SerializeBuiltinReference(code_object, how_to_code, where_to_point, 0);
return;
case Code::STUB:
-#define IC_KIND_CASE(KIND) case Code::KIND:
- IC_KIND_LIST(IC_KIND_CASE)
-#undef IC_KIND_CASE
if (code_object->builtin_index() == -1) {
SerializeCodeStub(code_object, how_to_code, where_to_point);
} else {
- SerializeBuiltin(code_object->builtin_index(), how_to_code,
- where_to_point);
+ SerializeBuiltinReference(code_object, how_to_code, where_to_point,
+ 0);
}
return;
- case Code::FUNCTION:
- DCHECK(code_object->has_reloc_info_for_serialization());
- SerializeGeneric(code_object, how_to_code, where_to_point);
- return;
default:
return SerializeCodeObject(code_object, how_to_code, where_to_point);
}
@@ -118,6 +109,16 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
Script::cast(obj)->set_wrapper(isolate()->heap()->undefined_value());
}
+ if (obj->IsSharedFunctionInfo()) {
+ SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
+ // Mark SFI to indicate whether the code is cached.
+ bool was_deserialized = sfi->deserialized();
+ sfi->set_deserialized(sfi->is_compiled());
+ SerializeGeneric(obj, how_to_code, where_to_point);
+ sfi->set_deserialized(was_deserialized);
+ return;
+ }
+
// Past this point we should not see any (context-specific) maps anymore.
CHECK(!obj->IsMap());
// There should be no references to the global object embedded.
@@ -139,22 +140,6 @@ void CodeSerializer::SerializeGeneric(HeapObject* heap_object,
serializer.Serialize();
}
-void CodeSerializer::SerializeBuiltin(int builtin_index, HowToCode how_to_code,
- WhereToPoint where_to_point) {
- DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
- (how_to_code == kFromCode && where_to_point == kInnerPointer));
- DCHECK_LT(builtin_index, Builtins::builtin_count);
- DCHECK_LE(0, builtin_index);
-
- if (FLAG_trace_serializer) {
- PrintF(" Encoding builtin: %s\n",
- isolate()->builtins()->name(builtin_index));
- }
-
- sink_.Put(kBuiltin + how_to_code + where_to_point, "Builtin");
- sink_.PutInt(builtin_index, "builtin_index");
-}
-
void CodeSerializer::SerializeCodeStub(Code* code_stub, HowToCode how_to_code,
WhereToPoint where_to_point) {
// We only arrive here if we have not encountered this code stub before.
@@ -210,7 +195,6 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
int length = cached_data->length();
PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms);
}
- result->set_deserialized(true);
if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
String* name = isolate->heap()->empty_string();
@@ -275,15 +259,27 @@ void WasmCompiledModuleSerializer::SerializeCodeObject(
Code::Kind kind = code_object->kind();
switch (kind) {
case Code::WASM_FUNCTION:
- case Code::JS_TO_WASM_FUNCTION:
+ case Code::JS_TO_WASM_FUNCTION: {
+ // Because the trap handler index is not meaningful across copies and
+ // serializations, we need to serialize it as kInvalidIndex. We do this by
+ // saving the old value, setting the index to kInvalidIndex and then
+ // restoring the old value.
+ const int old_trap_handler_index =
+ code_object->trap_handler_index()->value();
+ code_object->set_trap_handler_index(
+ Smi::FromInt(trap_handler::kInvalidIndex));
+
// Just serialize the code_object.
SerializeGeneric(code_object, how_to_code, where_to_point);
+ code_object->set_trap_handler_index(Smi::FromInt(old_trap_handler_index));
break;
+ }
case Code::WASM_INTERPRETER_ENTRY:
case Code::WASM_TO_JS_FUNCTION:
// Serialize the illegal builtin instead. On instantiation of a
// deserialized module, these will be replaced again.
- SerializeBuiltin(Builtins::kIllegal, how_to_code, where_to_point);
+ SerializeBuiltinReference(*BUILTIN_CODE(isolate(), Illegal), how_to_code,
+ where_to_point, 0);
break;
default:
UNREACHABLE();
@@ -337,9 +333,7 @@ SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
const CodeSerializer* cs) {
DisallowHeapAllocation no_gc;
const std::vector<uint32_t>* stub_keys = cs->stub_keys();
-
- std::vector<Reservation> reservations;
- cs->EncodeReservations(&reservations);
+ std::vector<Reservation> reservations = cs->EncodeReservations();
// Calculate sizes.
uint32_t reservation_size =
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index 9c2ada725e..7f8ff5cc8b 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -11,7 +11,7 @@
namespace v8 {
namespace internal {
-class CodeSerializer : public Serializer {
+class CodeSerializer : public Serializer<> {
public:
static ScriptData* Serialize(Isolate* isolate,
Handle<SharedFunctionInfo> info,
@@ -39,8 +39,6 @@ class CodeSerializer : public Serializer {
virtual bool ElideObject(Object* obj) { return false; }
void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
WhereToPoint where_to_point);
- void SerializeBuiltin(int builtin_index, HowToCode how_to_code,
- WhereToPoint where_to_point);
private:
void SerializeObject(HeapObject* o, HowToCode how_to_code,
@@ -104,6 +102,7 @@ class SerializedCodeData : public SerializedData {
// ... reservations
// ... code stub keys
// ... serialized payload
+ static const uint32_t kVersionHashOffset = kMagicNumberOffset + kUInt32Size;
static const uint32_t kSourceHashOffset = kVersionHashOffset + kUInt32Size;
static const uint32_t kCpuFeaturesOffset = kSourceHashOffset + kUInt32Size;
static const uint32_t kFlagHashOffset = kCpuFeaturesOffset + kUInt32Size;
diff --git a/deps/v8/src/snapshot/default-serializer-allocator.cc b/deps/v8/src/snapshot/default-serializer-allocator.cc
new file mode 100644
index 0000000000..1dfa21ad2b
--- /dev/null
+++ b/deps/v8/src/snapshot/default-serializer-allocator.cc
@@ -0,0 +1,153 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/default-serializer-allocator.h"
+
+#include "src/heap/heap-inl.h"
+#include "src/snapshot/serializer.h"
+#include "src/snapshot/snapshot-source-sink.h"
+
+namespace v8 {
+namespace internal {
+
+DefaultSerializerAllocator::DefaultSerializerAllocator(
+ Serializer<DefaultSerializerAllocator>* serializer)
+ : serializer_(serializer) {
+ for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
+ pending_chunk_[i] = 0;
+ }
+}
+
+SerializerReference DefaultSerializerAllocator::Allocate(AllocationSpace space,
+ uint32_t size) {
+ DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
+ DCHECK(size > 0 && size <= MaxChunkSizeInSpace(space));
+
+ // Maps are allocated through AllocateMap.
+ DCHECK_NE(MAP_SPACE, space);
+
+ uint32_t new_chunk_size = pending_chunk_[space] + size;
+ if (new_chunk_size > MaxChunkSizeInSpace(space)) {
+ // The new chunk size would not fit onto a single page. Complete the
+ // current chunk and start a new one.
+ serializer_->PutNextChunk(space);
+ completed_chunks_[space].push_back(pending_chunk_[space]);
+ pending_chunk_[space] = 0;
+ new_chunk_size = size;
+ }
+ uint32_t offset = pending_chunk_[space];
+ pending_chunk_[space] = new_chunk_size;
+ return SerializerReference::BackReference(
+ space, static_cast<uint32_t>(completed_chunks_[space].size()), offset);
+}
+
+SerializerReference DefaultSerializerAllocator::AllocateMap() {
+ // Maps are allocated one-by-one when deserializing.
+ return SerializerReference::MapReference(num_maps_++);
+}
+
+SerializerReference DefaultSerializerAllocator::AllocateLargeObject(
+ uint32_t size) {
+ // Large objects are allocated one-by-one when deserializing. We do not
+ // have to keep track of multiple chunks.
+ large_objects_total_size_ += size;
+ return SerializerReference::LargeObjectReference(seen_large_objects_index_++);
+}
+
+SerializerReference DefaultSerializerAllocator::AllocateOffHeapBackingStore() {
+ DCHECK_NE(0, seen_backing_stores_index_);
+ return SerializerReference::OffHeapBackingStoreReference(
+ seen_backing_stores_index_++);
+}
+
+#ifdef DEBUG
+bool DefaultSerializerAllocator::BackReferenceIsAlreadyAllocated(
+ SerializerReference reference) const {
+ DCHECK(reference.is_back_reference());
+ AllocationSpace space = reference.space();
+ if (space == LO_SPACE) {
+ return reference.large_object_index() < seen_large_objects_index_;
+ } else if (space == MAP_SPACE) {
+ return reference.map_index() < num_maps_;
+ } else {
+ size_t chunk_index = reference.chunk_index();
+ if (chunk_index == completed_chunks_[space].size()) {
+ return reference.chunk_offset() < pending_chunk_[space];
+ } else {
+ return chunk_index < completed_chunks_[space].size() &&
+ reference.chunk_offset() < completed_chunks_[space][chunk_index];
+ }
+ }
+}
+
+bool DefaultSerializerAllocator::HasNotExceededFirstPageOfEachSpace() const {
+ for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
+ if (!completed_chunks_[i].empty()) return false;
+ }
+ return true;
+}
+#endif
+
+std::vector<SerializedData::Reservation>
+DefaultSerializerAllocator::EncodeReservations() const {
+ std::vector<SerializedData::Reservation> out;
+
+ STATIC_ASSERT(NEW_SPACE == 0);
+ for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
+ for (size_t j = 0; j < completed_chunks_[i].size(); j++) {
+ out.emplace_back(completed_chunks_[i][j]);
+ }
+
+ if (pending_chunk_[i] > 0 || completed_chunks_[i].size() == 0) {
+ out.emplace_back(pending_chunk_[i]);
+ }
+ out.back().mark_as_last();
+ }
+
+ STATIC_ASSERT(MAP_SPACE == kNumberOfPreallocatedSpaces);
+ out.emplace_back(num_maps_ * Map::kSize);
+ out.back().mark_as_last();
+
+ STATIC_ASSERT(LO_SPACE == MAP_SPACE + 1);
+ out.emplace_back(large_objects_total_size_);
+ out.back().mark_as_last();
+
+ return out;
+}
+
+void DefaultSerializerAllocator::OutputStatistics() {
+ DCHECK(FLAG_serialization_statistics);
+
+ PrintF(" Spaces (bytes):\n");
+
+ STATIC_ASSERT(NEW_SPACE == 0);
+ for (int space = 0; space < kNumberOfSpaces; space++) {
+ PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
+ }
+ PrintF("\n");
+
+ STATIC_ASSERT(NEW_SPACE == 0);
+ for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
+ size_t s = pending_chunk_[space];
+ for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
+ PrintF("%16" PRIuS, s);
+ }
+
+ STATIC_ASSERT(MAP_SPACE == kNumberOfPreallocatedSpaces);
+ PrintF("%16d", num_maps_ * Map::kSize);
+
+ STATIC_ASSERT(LO_SPACE == MAP_SPACE + 1);
+ PrintF("%16d\n", large_objects_total_size_);
+}
+
+// static
+uint32_t DefaultSerializerAllocator::MaxChunkSizeInSpace(int space) {
+ DCHECK(0 <= space && space < kNumberOfPreallocatedSpaces);
+
+ return static_cast<uint32_t>(
+ MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space)));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/default-serializer-allocator.h b/deps/v8/src/snapshot/default-serializer-allocator.h
new file mode 100644
index 0000000000..7bd247aaf1
--- /dev/null
+++ b/deps/v8/src/snapshot/default-serializer-allocator.h
@@ -0,0 +1,74 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_DEFAULT_SERIALIZER_ALLOCATOR_H_
+#define V8_SNAPSHOT_DEFAULT_SERIALIZER_ALLOCATOR_H_
+
+#include "src/snapshot/serializer-common.h"
+
+namespace v8 {
+namespace internal {
+
+template <class AllocatorT>
+class Serializer;
+
+class DefaultSerializerAllocator final {
+ public:
+ DefaultSerializerAllocator(
+ Serializer<DefaultSerializerAllocator>* serializer);
+
+ SerializerReference Allocate(AllocationSpace space, uint32_t size);
+ SerializerReference AllocateMap();
+ SerializerReference AllocateLargeObject(uint32_t size);
+ SerializerReference AllocateOffHeapBackingStore();
+
+#ifdef DEBUG
+ bool BackReferenceIsAlreadyAllocated(
+ SerializerReference back_reference) const;
+ bool HasNotExceededFirstPageOfEachSpace() const;
+#endif
+
+ std::vector<SerializedData::Reservation> EncodeReservations() const;
+
+ void OutputStatistics();
+
+ private:
+ static constexpr int kNumberOfPreallocatedSpaces =
+ SerializerDeserializer::kNumberOfPreallocatedSpaces;
+ static constexpr int kNumberOfSpaces =
+ SerializerDeserializer::kNumberOfSpaces;
+
+ static uint32_t MaxChunkSizeInSpace(int space);
+
+ // Objects from the same space are put into chunks for bulk-allocation
+ // when deserializing. We have to make sure that each chunk fits into a
+ // page. So we track the chunk size in pending_chunk_ of a space, but
+ // when it exceeds a page, we complete the current chunk and start a new one.
+ uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
+ std::vector<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
+
+ // Number of maps that we need to allocate.
+ uint32_t num_maps_ = 0;
+
+ // We map serialized large objects to indexes for back-referencing.
+ uint32_t large_objects_total_size_ = 0;
+ uint32_t seen_large_objects_index_ = 0;
+
+ // Used to keep track of the off-heap backing stores used by TypedArrays/
+ // ArrayBuffers. Note that the index begins at 1 and not 0, because when a
+ // TypedArray has an on-heap backing store, the backing_store pointer in the
+ // corresponding ArrayBuffer will be null, which makes it indistinguishable
+ // from index 0.
+ uint32_t seen_backing_stores_index_ = 1;
+
+ // The current serializer.
+ Serializer<DefaultSerializerAllocator>* const serializer_;
+
+ DISALLOW_COPY_AND_ASSIGN(DefaultSerializerAllocator)
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_DEFAULT_SERIALIZER_ALLOCATOR_H_
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 113d24bb70..1eb15d6c38 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -13,7 +13,9 @@
#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/snapshot/builtin-deserializer.h"
#include "src/snapshot/natives.h"
+#include "src/snapshot/startup-deserializer.h"
#include "src/v8.h"
#include "src/v8threads.h"
@@ -41,7 +43,7 @@ void Deserializer::RegisterDeserializedObjectsForBlackAllocation() {
bool Deserializer::ReserveSpace() {
#ifdef DEBUG
for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
- CHECK(reservations_[i].size() > 0);
+ DCHECK(reservations_[i].size() > 0);
}
#endif // DEBUG
DCHECK(allocated_maps_.empty());
@@ -53,6 +55,82 @@ bool Deserializer::ReserveSpace() {
return true;
}
+// static
+bool Deserializer::ReserveSpace(StartupDeserializer* startup_deserializer,
+ BuiltinDeserializer* builtin_deserializer) {
+ const int first_space = NEW_SPACE;
+ const int last_space = SerializerDeserializer::kNumberOfSpaces;
+ Isolate* isolate = startup_deserializer->isolate();
+
+ // Create a set of merged reservations to reserve space in one go.
+ // The BuiltinDeserializer's reservations are ignored, since our actual
+ // requirements vary based on whether lazy deserialization is enabled.
+ // Instead, we manually determine the required code-space.
+
+ DCHECK(builtin_deserializer->ReservesOnlyCodeSpace());
+ Heap::Reservation merged_reservations[kNumberOfSpaces];
+ for (int i = first_space; i < last_space; i++) {
+ merged_reservations[i] = startup_deserializer->reservations_[i];
+ }
+
+ Heap::Reservation builtin_reservations =
+ builtin_deserializer->CreateReservationsForEagerBuiltins();
+ DCHECK(!builtin_reservations.empty());
+
+ for (const auto& c : builtin_reservations) {
+ merged_reservations[CODE_SPACE].push_back(c);
+ }
+
+ if (!isolate->heap()->ReserveSpace(merged_reservations,
+ &startup_deserializer->allocated_maps_)) {
+ return false;
+ }
+
+ DisallowHeapAllocation no_allocation;
+
+ // Distribute the successful allocations between both deserializers.
+ // There's nothing to be done here except for code space.
+
+ {
+ const int num_builtin_reservations =
+ static_cast<int>(builtin_reservations.size());
+ for (int i = num_builtin_reservations - 1; i >= 0; i--) {
+ const auto& c = merged_reservations[CODE_SPACE].back();
+ DCHECK_EQ(c.size, builtin_reservations[i].size);
+ DCHECK_EQ(c.size, c.end - c.start);
+ builtin_reservations[i].start = c.start;
+ builtin_reservations[i].end = c.end;
+ merged_reservations[CODE_SPACE].pop_back();
+ }
+
+ builtin_deserializer->InitializeBuiltinsTable(builtin_reservations);
+ }
+
+ // Write back startup reservations.
+
+ for (int i = first_space; i < last_space; i++) {
+ startup_deserializer->reservations_[i].swap(merged_reservations[i]);
+ }
+
+ for (int i = first_space; i < kNumberOfPreallocatedSpaces; i++) {
+ startup_deserializer->high_water_[i] =
+ startup_deserializer->reservations_[i][0].start;
+ builtin_deserializer->high_water_[i] = nullptr;
+ }
+
+ return true;
+}
+
+bool Deserializer::ReservesOnlyCodeSpace() const {
+ for (int space = NEW_SPACE; space < kNumberOfSpaces; space++) {
+ if (space == CODE_SPACE) continue;
+ const auto& r = reservations_[space];
+ for (const Heap::Chunk& c : r)
+ if (c.size != 0) return false;
+ }
+ return true;
+}
+
void Deserializer::Initialize(Isolate* isolate) {
DCHECK_NULL(isolate_);
DCHECK_NOT_NULL(isolate);
@@ -81,18 +159,22 @@ void Deserializer::SortMapDescriptors() {
}
}
+bool Deserializer::IsLazyDeserializationEnabled() const {
+ return FLAG_lazy_deserialization && !isolate()->serializer_enabled();
+}
+
Deserializer::~Deserializer() {
#ifdef DEBUG
// Do not perform checks if we aborted deserialization.
if (source_.position() == 0) return;
// Check that we only have padding bytes remaining.
- while (source_.HasMore()) CHECK_EQ(kNop, source_.Get());
+ while (source_.HasMore()) DCHECK_EQ(kNop, source_.Get());
for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
int chunk_index = current_chunk_[space];
- CHECK_EQ(reservations_[space].size(), chunk_index + 1);
- CHECK_EQ(reservations_[space][chunk_index].end, high_water_[space]);
+ DCHECK_EQ(reservations_[space].size(), chunk_index + 1);
+ DCHECK_EQ(reservations_[space][chunk_index].end, high_water_[space]);
}
- CHECK_EQ(allocated_maps_.size(), next_map_index_);
+ DCHECK_EQ(allocated_maps_.size(), next_map_index_);
#endif // DEBUG
}
@@ -107,6 +189,7 @@ void Deserializer::VisitRootPointers(Root root, Object** start, Object** end) {
void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
static const byte expected = kSynchronize;
CHECK_EQ(expected, source_.Get());
+ deserializing_builtins_ = (tag == VisitorSynchronization::kHandleScope);
}
void Deserializer::DeserializeDeferredObjects() {
@@ -251,6 +334,14 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
return obj;
}
+int Deserializer::MaybeReplaceWithDeserializeLazy(int builtin_id) {
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+ return (IsLazyDeserializationEnabled() && Builtins::IsLazy(builtin_id) &&
+ !deserializing_builtins_)
+ ? Builtins::kDeserializeLazy
+ : builtin_id;
+}
+
HeapObject* Deserializer::GetBackReferencedObject(int space) {
HeapObject* obj;
SerializerReference back_reference =
@@ -364,13 +455,25 @@ Address Deserializer::Allocate(int space_index, int size) {
// Assert that the current reserved chunk is still big enough.
const Heap::Reservation& reservation = reservations_[space_index];
int chunk_index = current_chunk_[space_index];
- CHECK_LE(high_water_[space_index], reservation[chunk_index].end);
+ DCHECK_LE(high_water_[space_index], reservation[chunk_index].end);
#endif
if (space_index == CODE_SPACE) SkipList::Update(address, size);
return address;
}
}
+Object* Deserializer::ReadDataSingle() {
+ Object* o;
+ Object** start = &o;
+ Object** end = start + 1;
+ int source_space = NEW_SPACE;
+ Address current_object = nullptr;
+
+ CHECK(ReadData(start, end, source_space, current_object));
+
+ return o;
+}
+
bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
Address current_object_address) {
Isolate* const isolate = isolate_;
@@ -462,6 +565,8 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
// Find an object in the partial snapshots cache and write a pointer to it
// to the current object.
SINGLE_CASE(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
+ SINGLE_CASE(kPartialSnapshotCache, kFromCode, kStartOfObject, 0)
+ SINGLE_CASE(kPartialSnapshotCache, kFromCode, kInnerPointer, 0)
// Find an external reference and write a pointer to it to the current
// object.
SINGLE_CASE(kExternalReference, kPlain, kStartOfObject, 0)
@@ -475,6 +580,7 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
SINGLE_CASE(kAttachedReference, kFromCode, kInnerPointer, 0)
// Find a builtin and write a pointer to it to the current object.
SINGLE_CASE(kBuiltin, kPlain, kStartOfObject, 0)
+ SINGLE_CASE(kBuiltin, kFromCode, kStartOfObject, 0)
SINGLE_CASE(kBuiltin, kFromCode, kInnerPointer, 0)
#undef CASE_STATEMENT
@@ -547,6 +653,17 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
int size_in_bytes = source_.GetInt();
byte* raw_data_out = reinterpret_cast<byte*>(current);
source_.CopyRaw(raw_data_out, size_in_bytes);
+ current = reinterpret_cast<Object**>(
+ reinterpret_cast<intptr_t>(current) + size_in_bytes);
+ break;
+ }
+
+ // Deserialize raw code directly into the body of the code object.
+ // Do not move current.
+ case kVariableRawCode: {
+ int size_in_bytes = source_.GetInt();
+ source_.CopyRaw(current_object_address + Code::kDataStart,
+ size_in_bytes);
break;
}
@@ -713,20 +830,20 @@ Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current,
emit_write_barrier = isolate->heap()->InNewSpace(new_object);
} else {
DCHECK(where == kBuiltin);
- DCHECK(deserializing_user_code());
- int builtin_id = source_.GetInt();
- DCHECK_LE(0, builtin_id);
- DCHECK_LT(builtin_id, Builtins::builtin_count);
- Builtins::Name name = static_cast<Builtins::Name>(builtin_id);
- new_object = isolate->builtins()->builtin(name);
+ int builtin_id = MaybeReplaceWithDeserializeLazy(source_.GetInt());
+ new_object = isolate->builtins()->builtin(builtin_id);
emit_write_barrier = false;
}
if (within == kInnerPointer) {
DCHECK(how == kFromCode);
- if (new_object->IsCode()) {
- Code* new_code_object = Code::cast(new_object);
- new_object =
- reinterpret_cast<Object*>(new_code_object->instruction_start());
+ if (where == kBuiltin) {
+ // At this point, new_object may still be uninitialized, thus the
+ // unchecked Code cast.
+ new_object = reinterpret_cast<Object*>(
+ reinterpret_cast<Code*>(new_object)->instruction_start());
+ } else if (new_object->IsCode()) {
+ new_object = reinterpret_cast<Object*>(
+ Code::cast(new_object)->instruction_start());
} else {
Cell* cell = Cell::cast(new_object);
new_object = reinterpret_cast<Object*>(cell->ValueAddress());
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 9c6cf9c901..5aa2f8d656 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -25,7 +25,9 @@ namespace internal {
#define V8_CODE_EMBEDS_OBJECT_POINTER 0
#endif
+class BuiltinDeserializer;
class Heap;
+class StartupDeserializer;
// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
class Deserializer : public SerializerDeserializer {
@@ -59,11 +61,23 @@ class Deserializer : public SerializerDeserializer {
off_heap_backing_stores_.push_back(nullptr);
}
- void Initialize(Isolate* isolate);
bool ReserveSpace();
+
+ // Atomically reserves space for the two given deserializers. Guarantees
+ // reservation for both without garbage collection in-between.
+ static bool ReserveSpace(StartupDeserializer* startup_deserializer,
+ BuiltinDeserializer* builtin_deserializer);
+ bool ReservesOnlyCodeSpace() const;
+
+ void Initialize(Isolate* isolate);
void DeserializeDeferredObjects();
void RegisterDeserializedObjectsForBlackAllocation();
+ virtual Address Allocate(int space_index, int size);
+
+ // Deserializes into a single pointer and returns the resulting object.
+ Object* ReadDataSingle();
+
// This returns the address of an object that has been described in the
// snapshot by chunk index and offset.
HeapObject* GetBackReferencedObject(int space);
@@ -91,6 +105,8 @@ class Deserializer : public SerializerDeserializer {
bool deserializing_user_code() const { return deserializing_user_code_; }
bool can_rehash() const { return can_rehash_; }
+ bool IsLazyDeserializationEnabled() const;
+
private:
void VisitRootPointers(Root root, Object** start, Object** end) override;
@@ -126,11 +142,14 @@ class Deserializer : public SerializerDeserializer {
bool write_barrier_needed);
void ReadObject(int space_number, Object** write_back);
- Address Allocate(int space_index, int size);
// Special handling for serialized code like hooking up internalized strings.
HeapObject* PostProcessNewObject(HeapObject* obj, int space);
+ // May replace the given builtin_id with the DeserializeLazy builtin for lazy
+ // deserialization.
+ int MaybeReplaceWithDeserializeLazy(int builtin_id);
+
// Cached current isolate.
Isolate* isolate_;
@@ -162,6 +181,10 @@ class Deserializer : public SerializerDeserializer {
const bool deserializing_user_code_;
+ // TODO(jgruber): This workaround will no longer be necessary once builtin
+ // reference patching has been removed (through advance allocation).
+ bool deserializing_builtins_ = false;
+
AllocationAlignment next_alignment_;
// TODO(6593): generalize rehashing, and remove this flag.
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index a24fa6a777..a6d9862c10 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -15,8 +15,6 @@
#include "src/snapshot/partial-serializer.h"
#include "src/snapshot/startup-serializer.h"
-using namespace v8;
-
class SnapshotWriter {
public:
SnapshotWriter() : snapshot_cpp_path_(NULL), snapshot_blob_path_(NULL) {}
@@ -103,7 +101,7 @@ class SnapshotWriter {
}
static FILE* GetFileDescriptorOrDie(const char* filename) {
- FILE* fp = base::OS::FOpen(filename, "wb");
+ FILE* fp = v8::base::OS::FOpen(filename, "wb");
if (fp == NULL) {
i::PrintF("Unable to open file \"%s\" for writing.\n", filename);
exit(1);
@@ -118,7 +116,7 @@ class SnapshotWriter {
char* GetExtraCode(char* filename, const char* description) {
if (filename == NULL || strlen(filename) == 0) return NULL;
::printf("Loading script for %s: %s\n", description, filename);
- FILE* file = base::OS::FOpen(filename, "rb");
+ FILE* file = v8::base::OS::FOpen(filename, "rb");
if (file == NULL) {
fprintf(stderr, "Failed to open '%s': errno %d\n", filename, errno);
exit(1);
@@ -156,7 +154,7 @@ int main(int argc, char** argv) {
}
i::CpuFeatures::Probe(true);
- V8::InitializeICUDefaultLocation(argv[0]);
+ v8::V8::InitializeICUDefaultLocation(argv[0]);
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
v8::V8::Initialize();
@@ -167,12 +165,12 @@ int main(int argc, char** argv) {
if (i::FLAG_startup_blob) writer.SetStartupBlobFile(i::FLAG_startup_blob);
char* embed_script = GetExtraCode(argc >= 2 ? argv[1] : NULL, "embedding");
- StartupData blob = v8::V8::CreateSnapshotDataBlob(embed_script);
+ v8::StartupData blob = v8::V8::CreateSnapshotDataBlob(embed_script);
delete[] embed_script;
char* warmup_script = GetExtraCode(argc >= 3 ? argv[2] : NULL, "warm up");
if (warmup_script) {
- StartupData cold = blob;
+ v8::StartupData cold = blob;
blob = v8::V8::WarmUpSnapshotDataBlob(cold, warmup_script);
delete[] cold.data;
delete[] warmup_script;
@@ -183,8 +181,8 @@ int main(int argc, char** argv) {
delete[] blob.data;
}
- V8::Dispose();
- V8::ShutdownPlatform();
+ v8::V8::Dispose();
+ v8::V8::ShutdownPlatform();
delete platform;
return 0;
}
diff --git a/deps/v8/src/snapshot/partial-deserializer.cc b/deps/v8/src/snapshot/partial-deserializer.cc
index 241204d3ba..f4786006f8 100644
--- a/deps/v8/src/snapshot/partial-deserializer.cc
+++ b/deps/v8/src/snapshot/partial-deserializer.cc
@@ -4,6 +4,7 @@
#include "src/snapshot/partial-deserializer.h"
+#include "src/api.h"
#include "src/heap/heap-inl.h"
#include "src/snapshot/snapshot.h"
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index 4587fa7c4e..cae28234c1 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -5,6 +5,7 @@
#include "src/snapshot/partial-serializer.h"
#include "src/snapshot/startup-serializer.h"
+#include "src/api.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -36,14 +37,13 @@ void PartialSerializer::Serialize(Object** o, bool include_global_proxy) {
// it before serializing, it will get re-added to the context list
// explicitly when it's loaded.
context->set(Context::NEXT_CONTEXT_LINK,
- isolate_->heap()->undefined_value());
+ isolate()->heap()->undefined_value());
DCHECK(!context->global_object()->IsUndefined(context->GetIsolate()));
// Reset math random cache to get fresh random numbers.
context->set_math_random_index(Smi::kZero);
- context->set_math_random_cache(isolate_->heap()->undefined_value());
+ context->set_math_random_cache(isolate()->heap()->undefined_value());
DCHECK_NULL(rehashable_global_dictionary_);
- rehashable_global_dictionary_ =
- context->global_object()->global_dictionary();
+ rehashable_global_dictionary_ = context->global_object()->global_dictionary();
VisitRootPointer(Root::kPartialSnapshotCache, o);
SerializeDeferredObjects();
@@ -53,15 +53,15 @@ void PartialSerializer::Serialize(Object** o, bool include_global_proxy) {
void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
- if (obj->IsMap()) {
- // The code-caches link to context-specific code objects, which
- // the startup and context serializes cannot currently handle.
- DCHECK(Map::cast(obj)->code_cache() == obj->GetHeap()->empty_fixed_array());
+ BuiltinReferenceSerializationMode mode =
+ startup_serializer_->clear_function_code() ? kCanonicalizeCompileLazy
+ : kDefault;
+ if (SerializeBuiltinReference(obj, how_to_code, where_to_point, skip, mode)) {
+ return;
}
-
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
- int root_index = root_index_map_.Lookup(obj);
+ int root_index = root_index_map()->Lookup(obj);
if (root_index != RootIndexMap::kInvalidRootIndex) {
PutRoot(root_index, obj, how_to_code, where_to_point, skip);
return;
@@ -82,7 +82,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
// Pointers from the partial snapshot to the objects in the startup snapshot
// should go through the root array or through the partial snapshot cache.
// If this is not the case you may have to add something to the root array.
- DCHECK(!startup_serializer_->reference_map()->Lookup(obj).is_valid());
+ DCHECK(!startup_serializer_->ReferenceMapContains(obj));
// All the internalized strings that the partial snapshot needs should be
// either in the root table or in the partial snapshot cache.
DCHECK(!obj->IsInternalizedString());
@@ -91,11 +91,8 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
FlushSkip(skip);
- // Clear literal boilerplates.
- if (obj->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(obj);
- function->ClearTypeFeedbackInfo();
- }
+ // Clear literal boilerplates and feedback.
+ if (obj->IsFeedbackVector()) FeedbackVector::cast(obj)->ClearSlots(isolate());
if (obj->IsJSObject()) {
JSObject* jsobj = JSObject::cast(obj);
@@ -136,7 +133,7 @@ void PartialSerializer::SerializeEmbedderFields() {
HandleScope scope(isolate());
Handle<JSObject> obj(embedder_field_holders_.back(), isolate());
embedder_field_holders_.pop_back();
- SerializerReference reference = reference_map_.Lookup(*obj);
+ SerializerReference reference = reference_map()->Lookup(*obj);
DCHECK(reference.is_back_reference());
int embedder_fields_count = obj->GetEmbedderFieldCount();
for (int i = 0; i < embedder_fields_count; i++) {
diff --git a/deps/v8/src/snapshot/partial-serializer.h b/deps/v8/src/snapshot/partial-serializer.h
index 5ccf8f340d..6eb8b91436 100644
--- a/deps/v8/src/snapshot/partial-serializer.h
+++ b/deps/v8/src/snapshot/partial-serializer.h
@@ -13,7 +13,7 @@ namespace internal {
class StartupSerializer;
-class PartialSerializer : public Serializer {
+class PartialSerializer : public Serializer<> {
public:
PartialSerializer(Isolate* isolate, StartupSerializer* startup_serializer,
v8::SerializeEmbedderFieldsCallback callback);
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index 4840da9cf5..ec7b7b25c7 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -5,8 +5,6 @@
#include "src/snapshot/serializer-common.h"
#include "src/external-reference-table.h"
-#include "src/ic/stub-cache.h"
-#include "src/list-inl.h"
#include "src/objects-inl.h"
namespace v8 {
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index bfb881810b..f753402d15 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -186,21 +186,24 @@ class SerializerDeserializer : public RootVisitor {
// Repeats of variable length.
static const int kVariableRepeat = 0x19;
// Raw data of variable length.
- static const int kVariableRawData = 0x1a;
- // Internal reference encoded as offsets of pc and target from code entry.
- static const int kInternalReference = 0x1b;
- static const int kInternalReferenceEncoded = 0x1c;
+ static const int kVariableRawCode = 0x1a;
+ static const int kVariableRawData = 0x1b;
+
+ // Used for embedder-allocated backing stores for TypedArrays.
+ static const int kOffHeapBackingStore = 0x1c;
+
// Used to encode deoptimizer entry code.
static const int kDeoptimizerEntryPlain = 0x1d;
static const int kDeoptimizerEntryFromCode = 0x1e;
// Used for embedder-provided serialization data for embedder fields.
static const int kEmbedderFieldsData = 0x1f;
- // Used for embedder-allocated backing stores for TypedArrays.
- static const int kOffHeapBackingStore = 0x35;
+ // Internal reference encoded as offsets of pc and target from code entry.
+ static const int kInternalReference = 0x35;
+ static const int kInternalReferenceEncoded = 0x36;
// Used to encode external referenced provided through the API.
- static const int kApiReference = 0x36;
+ static const int kApiReference = 0x37;
// 8 hot (recently seen or back-referenced) objects with optional skip.
static const int kNumberOfHotObjects = 8;
@@ -211,7 +214,7 @@ class SerializerDeserializer : public RootVisitor {
static const int kHotObjectWithSkip = 0x58;
static const int kHotObjectMask = 0x07;
- // 0x37, 0x55..0x57, 0x75..0x7f unused.
+ // 0x55..0x57, 0x75..0x7f unused.
// ---------- byte code range 0x80..0xff ----------
// First 32 root array items.
@@ -272,7 +275,7 @@ class SerializedData {
other.owns_data_ = false;
}
- ~SerializedData() {
+ virtual ~SerializedData() {
if (owns_data_) DeleteArray<byte>(data_);
}
@@ -287,7 +290,6 @@ class SerializedData {
}
static const uint32_t kMagicNumberOffset = 0;
- static const uint32_t kVersionHashOffset = kMagicNumberOffset + kUInt32Size;
protected:
void SetHeaderValue(uint32_t offset, uint32_t value) {
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 3d0176a139..9db7d798a5 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -5,32 +5,18 @@
#include "src/snapshot/serializer.h"
#include "src/assembler-inl.h"
-#include "src/deoptimizer.h"
-#include "src/heap/heap-inl.h"
-#include "src/macro-assembler.h"
+#include "src/objects/map.h"
#include "src/snapshot/natives.h"
namespace v8 {
namespace internal {
-Serializer::Serializer(Isolate* isolate)
+template <class AllocatorT>
+Serializer<AllocatorT>::Serializer(Isolate* isolate)
: isolate_(isolate),
external_reference_encoder_(isolate),
root_index_map_(isolate),
- recursion_depth_(0),
- code_address_map_(NULL),
- num_maps_(0),
- large_objects_total_size_(0),
- seen_large_objects_index_(0),
- seen_backing_stores_index_(1) {
- // The serializer is meant to be used only to generate initial heap images
- // from a context in which there is only one isolate.
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
- pending_chunk_[i] = 0;
- max_chunk_size_[i] = static_cast<uint32_t>(
- MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(i)));
- }
-
+ allocator_(this) {
#ifdef OBJECT_PRINT
if (FLAG_serialization_statistics) {
instance_type_count_ = NewArray<int>(kInstanceTypes);
@@ -46,7 +32,8 @@ Serializer::Serializer(Isolate* isolate)
#endif // OBJECT_PRINT
}
-Serializer::~Serializer() {
+template <class AllocatorT>
+Serializer<AllocatorT>::~Serializer() {
if (code_address_map_ != NULL) delete code_address_map_;
#ifdef OBJECT_PRINT
if (instance_type_count_ != NULL) {
@@ -57,28 +44,21 @@ Serializer::~Serializer() {
}
#ifdef OBJECT_PRINT
-void Serializer::CountInstanceType(Map* map, int size) {
+template <class AllocatorT>
+void Serializer<AllocatorT>::CountInstanceType(Map* map, int size) {
int instance_type = map->instance_type();
instance_type_count_[instance_type]++;
instance_type_size_[instance_type] += size;
}
#endif // OBJECT_PRINT
-void Serializer::OutputStatistics(const char* name) {
+template <class AllocatorT>
+void Serializer<AllocatorT>::OutputStatistics(const char* name) {
if (!FLAG_serialization_statistics) return;
+
PrintF("%s:\n", name);
- PrintF(" Spaces (bytes):\n");
- for (int space = 0; space < kNumberOfSpaces; space++) {
- PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
- }
- PrintF("\n");
- for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
- size_t s = pending_chunk_[space];
- for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
- PrintF("%16" PRIuS, s);
- }
- PrintF("%16d", num_maps_ * Map::kSize);
- PrintF("%16d\n", large_objects_total_size_);
+ allocator()->OutputStatistics();
+
#ifdef OBJECT_PRINT
PrintF(" Instance types (count and bytes):\n");
#define PRINT_INSTANCE_TYPE(Name) \
@@ -92,7 +72,8 @@ void Serializer::OutputStatistics(const char* name) {
#endif // OBJECT_PRINT
}
-void Serializer::SerializeDeferredObjects() {
+template <class AllocatorT>
+void Serializer<AllocatorT>::SerializeDeferredObjects() {
while (!deferred_objects_.empty()) {
HeapObject* obj = deferred_objects_.back();
deferred_objects_.pop_back();
@@ -102,9 +83,14 @@ void Serializer::SerializeDeferredObjects() {
sink_.Put(kSynchronize, "Finished with deferred objects");
}
-bool Serializer::MustBeDeferred(HeapObject* object) { return false; }
+template <class AllocatorT>
+bool Serializer<AllocatorT>::MustBeDeferred(HeapObject* object) {
+ return false;
+}
-void Serializer::VisitRootPointers(Root root, Object** start, Object** end) {
+template <class AllocatorT>
+void Serializer<AllocatorT>::VisitRootPointers(Root root, Object** start,
+ Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsSmi()) {
PutSmi(Smi::cast(*current));
@@ -114,45 +100,9 @@ void Serializer::VisitRootPointers(Root root, Object** start, Object** end) {
}
}
-void Serializer::EncodeReservations(
- std::vector<SerializedData::Reservation>* out) const {
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
- for (size_t j = 0; j < completed_chunks_[i].size(); j++) {
- out->push_back(SerializedData::Reservation(completed_chunks_[i][j]));
- }
-
- if (pending_chunk_[i] > 0 || completed_chunks_[i].size() == 0) {
- out->push_back(SerializedData::Reservation(pending_chunk_[i]));
- }
- out->back().mark_as_last();
- }
- out->push_back(SerializedData::Reservation(num_maps_ * Map::kSize));
- out->back().mark_as_last();
- out->push_back(SerializedData::Reservation(large_objects_total_size_));
- out->back().mark_as_last();
-}
-
#ifdef DEBUG
-bool Serializer::BackReferenceIsAlreadyAllocated(
- SerializerReference reference) {
- DCHECK(reference.is_back_reference());
- AllocationSpace space = reference.space();
- if (space == LO_SPACE) {
- return reference.large_object_index() < seen_large_objects_index_;
- } else if (space == MAP_SPACE) {
- return reference.map_index() < num_maps_;
- } else {
- size_t chunk_index = reference.chunk_index();
- if (chunk_index == completed_chunks_[space].size()) {
- return reference.chunk_offset() < pending_chunk_[space];
- } else {
- return chunk_index < completed_chunks_[space].size() &&
- reference.chunk_offset() < completed_chunks_[space][chunk_index];
- }
- }
-}
-
-void Serializer::PrintStack() {
+template <class AllocatorT>
+void Serializer<AllocatorT>::PrintStack() {
for (const auto o : stack_) {
o->Print();
PrintF("\n");
@@ -160,8 +110,11 @@ void Serializer::PrintStack() {
}
#endif // DEBUG
-bool Serializer::SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
+template <class AllocatorT>
+bool Serializer<AllocatorT>::SerializeHotObject(HeapObject* obj,
+ HowToCode how_to_code,
+ WhereToPoint where_to_point,
+ int skip) {
if (how_to_code != kPlain || where_to_point != kStartOfObject) return false;
// Encode a reference to a hot object by its index in the working set.
int index = hot_objects_.Find(obj);
@@ -180,8 +133,12 @@ bool Serializer::SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
}
return true;
}
-bool Serializer::SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
+
+template <class AllocatorT>
+bool Serializer<AllocatorT>::SerializeBackReference(HeapObject* obj,
+ HowToCode how_to_code,
+ WhereToPoint where_to_point,
+ int skip) {
SerializerReference reference = reference_map_.Lookup(obj);
if (!reference.is_valid()) return false;
// Encode the location of an already deserialized object in order to write
@@ -217,10 +174,43 @@ bool Serializer::SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
return true;
}
-void Serializer::PutRoot(int root_index, HeapObject* object,
- SerializerDeserializer::HowToCode how_to_code,
- SerializerDeserializer::WhereToPoint where_to_point,
- int skip) {
+template <class AllocatorT>
+bool Serializer<AllocatorT>::SerializeBuiltinReference(
+ HeapObject* obj, HowToCode how_to_code, WhereToPoint where_to_point,
+ int skip, BuiltinReferenceSerializationMode mode) {
+ if (!obj->IsCode()) return false;
+
+ Code* code = Code::cast(obj);
+ int builtin_index = code->builtin_index();
+ if (builtin_index < 0) return false;
+
+ DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
+ (how_to_code == kFromCode));
+ DCHECK_LT(builtin_index, Builtins::builtin_count);
+ DCHECK_LE(0, builtin_index);
+
+ if (mode == kCanonicalizeCompileLazy &&
+ code->is_interpreter_trampoline_builtin()) {
+ builtin_index = static_cast<int>(Builtins::kCompileLazy);
+ }
+
+ if (FLAG_trace_serializer) {
+ PrintF(" Encoding builtin reference: %s\n",
+ isolate()->builtins()->name(builtin_index));
+ }
+
+ FlushSkip(skip);
+ sink_.Put(kBuiltin + how_to_code + where_to_point, "Builtin");
+ sink_.PutInt(builtin_index, "builtin_index");
+
+ return true;
+}
+
+template <class AllocatorT>
+void Serializer<AllocatorT>::PutRoot(
+ int root_index, HeapObject* object,
+ SerializerDeserializer::HowToCode how_to_code,
+ SerializerDeserializer::WhereToPoint where_to_point, int skip) {
if (FLAG_trace_serializer) {
PrintF(" Encoding root %d:", root_index);
object->ShortPrint();
@@ -249,22 +239,25 @@ void Serializer::PutRoot(int root_index, HeapObject* object,
}
}
-void Serializer::PutSmi(Smi* smi) {
+template <class AllocatorT>
+void Serializer<AllocatorT>::PutSmi(Smi* smi) {
sink_.Put(kOnePointerRawData, "Smi");
byte* bytes = reinterpret_cast<byte*>(&smi);
for (int i = 0; i < kPointerSize; i++) sink_.Put(bytes[i], "Byte");
}
-void Serializer::PutBackReference(HeapObject* object,
- SerializerReference reference) {
- DCHECK(BackReferenceIsAlreadyAllocated(reference));
+template <class AllocatorT>
+void Serializer<AllocatorT>::PutBackReference(HeapObject* object,
+ SerializerReference reference) {
+ DCHECK(allocator()->BackReferenceIsAlreadyAllocated(reference));
sink_.PutInt(reference.back_reference(), "BackRefValue");
hot_objects_.Add(object);
}
-void Serializer::PutAttachedReference(SerializerReference reference,
- HowToCode how_to_code,
- WhereToPoint where_to_point) {
+template <class AllocatorT>
+void Serializer<AllocatorT>::PutAttachedReference(SerializerReference reference,
+ HowToCode how_to_code,
+ WhereToPoint where_to_point) {
DCHECK(reference.is_attached_reference());
DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
(how_to_code == kFromCode && where_to_point == kStartOfObject) ||
@@ -273,7 +266,8 @@ void Serializer::PutAttachedReference(SerializerReference reference,
sink_.PutInt(reference.attached_reference_index(), "AttachedRefIndex");
}
-int Serializer::PutAlignmentPrefix(HeapObject* object) {
+template <class AllocatorT>
+int Serializer<AllocatorT>::PutAlignmentPrefix(HeapObject* object) {
AllocationAlignment alignment = object->RequiredAlignment();
if (alignment != kWordAligned) {
DCHECK(1 <= alignment && alignment <= 3);
@@ -284,44 +278,14 @@ int Serializer::PutAlignmentPrefix(HeapObject* object) {
return 0;
}
-SerializerReference Serializer::AllocateOffHeapBackingStore() {
- DCHECK_NE(0, seen_backing_stores_index_);
- return SerializerReference::OffHeapBackingStoreReference(
- seen_backing_stores_index_++);
-}
-
-SerializerReference Serializer::AllocateLargeObject(int size) {
- // Large objects are allocated one-by-one when deserializing. We do not
- // have to keep track of multiple chunks.
- large_objects_total_size_ += size;
- return SerializerReference::LargeObjectReference(seen_large_objects_index_++);
+template <class AllocatorT>
+void Serializer<AllocatorT>::PutNextChunk(int space) {
+ sink_.Put(kNextChunk, "NextChunk");
+ sink_.Put(space, "NextChunkSpace");
}
-SerializerReference Serializer::AllocateMap() {
- // Maps are allocated one-by-one when deserializing.
- return SerializerReference::MapReference(num_maps_++);
-}
-
-SerializerReference Serializer::Allocate(AllocationSpace space, int size) {
- DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
- DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
- uint32_t new_chunk_size = pending_chunk_[space] + size;
- if (new_chunk_size > max_chunk_size(space)) {
- // The new chunk size would not fit onto a single page. Complete the
- // current chunk and start a new one.
- sink_.Put(kNextChunk, "NextChunk");
- sink_.Put(space, "NextChunkSpace");
- completed_chunks_[space].push_back(pending_chunk_[space]);
- pending_chunk_[space] = 0;
- new_chunk_size = size;
- }
- uint32_t offset = pending_chunk_[space];
- pending_chunk_[space] = new_chunk_size;
- return SerializerReference::BackReference(
- space, static_cast<uint32_t>(completed_chunks_[space].size()), offset);
-}
-
-void Serializer::Pad() {
+template <class AllocatorT>
+void Serializer<AllocatorT>::Pad() {
// The non-branching GetInt will read up to 3 bytes too far, so we need
// to pad the snapshot to make sure we don't read over the end.
for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
@@ -333,12 +297,14 @@ void Serializer::Pad() {
}
}
-void Serializer::InitializeCodeAddressMap() {
+template <class AllocatorT>
+void Serializer<AllocatorT>::InitializeCodeAddressMap() {
isolate_->InitializeLoggingAndCounters();
code_address_map_ = new CodeAddressMap(isolate_);
}
-Code* Serializer::CopyCode(Code* code) {
+template <class AllocatorT>
+Code* Serializer<AllocatorT>::CopyCode(Code* code) {
code_buffer_.clear(); // Clear buffer without deleting backing store.
int size = code->CodeSize();
code_buffer_.insert(code_buffer_.end(), code->address(),
@@ -346,15 +312,9 @@ Code* Serializer::CopyCode(Code* code) {
return Code::cast(HeapObject::FromAddress(&code_buffer_.front()));
}
-bool Serializer::HasNotExceededFirstPageOfEachSpace() {
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
- if (!completed_chunks_[i].empty()) return false;
- }
- return true;
-}
-
-void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
- int size, Map* map) {
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::SerializePrologue(
+ AllocationSpace space, int size, Map* map) {
if (serializer_->code_address_map_) {
const char* code_name =
serializer_->code_address_map_->Lookup(object_->address());
@@ -372,16 +332,16 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
} else {
sink_->Put(NOT_EXECUTABLE, "not executable large object");
}
- back_reference = serializer_->AllocateLargeObject(size);
+ back_reference = serializer_->allocator()->AllocateLargeObject(size);
} else if (space == MAP_SPACE) {
DCHECK_EQ(Map::kSize, size);
- back_reference = serializer_->AllocateMap();
+ back_reference = serializer_->allocator()->AllocateMap();
sink_->Put(kNewObject + reference_representation_ + space, "NewMap");
// This is redundant, but we include it anyways.
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
} else {
int fill = serializer_->PutAlignmentPrefix(object_);
- back_reference = serializer_->Allocate(space, size + fill);
+ back_reference = serializer_->allocator()->Allocate(space, size + fill);
sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
}
@@ -399,7 +359,8 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
}
-int32_t Serializer::ObjectSerializer::SerializeBackingStore(
+template <class AllocatorT>
+int32_t Serializer<AllocatorT>::ObjectSerializer::SerializeBackingStore(
void* backing_store, int32_t byte_length) {
SerializerReference reference =
serializer_->reference_map()->Lookup(backing_store);
@@ -410,7 +371,7 @@ int32_t Serializer::ObjectSerializer::SerializeBackingStore(
sink_->PutInt(byte_length, "length");
sink_->PutRaw(static_cast<byte*>(backing_store), byte_length,
"BackingStore");
- reference = serializer_->AllocateOffHeapBackingStore();
+ reference = serializer_->allocator()->AllocateOffHeapBackingStore();
// Mark this backing store as already serialized.
serializer_->reference_map()->Add(backing_store, reference);
}
@@ -422,7 +383,8 @@ int32_t Serializer::ObjectSerializer::SerializeBackingStore(
// same backing store does not know anything about it. This fixup step finds
// neutered TypedArrays and clears the values in the FixedTypedArray so that
// we don't try to serialize the now invalid backing store.
-void Serializer::ObjectSerializer::FixupIfNeutered() {
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::FixupIfNeutered() {
JSTypedArray* array = JSTypedArray::cast(object_);
if (!array->WasNeutered()) return;
@@ -432,7 +394,8 @@ void Serializer::ObjectSerializer::FixupIfNeutered() {
fta->set_length(0);
}
-void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::SerializeJSArrayBuffer() {
JSArrayBuffer* buffer = JSArrayBuffer::cast(object_);
void* backing_store = buffer->backing_store();
// We cannot store byte_length larger than Smi range in the snapshot.
@@ -445,10 +408,11 @@ void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
int32_t ref = SerializeBackingStore(backing_store, byte_length);
buffer->set_backing_store(Smi::FromInt(ref));
}
- SerializeContent();
+ SerializeObject();
}
-void Serializer::ObjectSerializer::SerializeFixedTypedArray() {
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::SerializeFixedTypedArray() {
FixedTypedArrayBase* fta = FixedTypedArrayBase::cast(object_);
void* backing_store = fta->DataPtr();
// We cannot store byte_length larger than Smi range in the snapshot.
@@ -464,10 +428,11 @@ void Serializer::ObjectSerializer::SerializeFixedTypedArray() {
int32_t ref = SerializeBackingStore(backing_store, byte_length);
fta->set_external_pointer(Smi::FromInt(ref));
}
- SerializeContent();
+ SerializeObject();
}
-void Serializer::ObjectSerializer::SerializeExternalString() {
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::SerializeExternalString() {
Heap* heap = serializer_->isolate()->heap();
if (object_->map() != heap->native_source_string_map()) {
// Usually we cannot recreate resources for external strings. To work
@@ -484,13 +449,15 @@ void Serializer::ObjectSerializer::SerializeExternalString() {
string->resource());
// Replace the resource field with the type and index of the native source.
string->set_resource(resource->EncodeForSerialization());
- SerializeContent();
+ SerializeObject();
// Restore the resource field.
string->set_resource(resource);
}
}
-void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
+template <class AllocatorT>
+void Serializer<
+ AllocatorT>::ObjectSerializer::SerializeExternalStringAsSequentialString() {
// Instead of serializing this as an external string, we serialize
// an imaginary sequential string with the same content.
Isolate* isolate = serializer_->isolate();
@@ -545,9 +512,6 @@ void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
int padding_size = allocation_size - SeqString::kHeaderSize - content_size;
DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
for (int i = 0; i < padding_size; i++) sink_->PutSection(0, "StringPadding");
-
- sink_->Put(kSkip, "SkipAfterString");
- sink_->PutInt(bytes_to_output, "SkipDistance");
}
// Clear and later restore the next link in the weak cell or allocation site.
@@ -576,7 +540,8 @@ class UnlinkWeakNextScope {
DisallowHeapAllocation no_gc_;
};
-void Serializer::ObjectSerializer::Serialize() {
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::Serialize() {
if (FLAG_trace_serializer) {
PrintF(" Encoding heap object: ");
object_->ShortPrint();
@@ -614,10 +579,11 @@ void Serializer::ObjectSerializer::Serialize() {
Script::cast(object_)->set_line_ends(undefined);
}
- SerializeContent();
+ SerializeObject();
}
-void Serializer::ObjectSerializer::SerializeContent() {
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::SerializeObject() {
int size = object_->Size();
Map* map = object_->map();
AllocationSpace space =
@@ -638,13 +604,11 @@ void Serializer::ObjectSerializer::SerializeContent() {
return;
}
- UnlinkWeakNextScope unlink_weak_next(object_);
-
- object_->IterateBody(map->instance_type(), size, this);
- OutputRawData(object_->address() + size);
+ SerializeContent(map, size);
}
-void Serializer::ObjectSerializer::SerializeDeferred() {
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::SerializeDeferred() {
if (FLAG_trace_serializer) {
PrintF(" Encoding deferred heap object: ");
object_->ShortPrint();
@@ -666,14 +630,32 @@ void Serializer::ObjectSerializer::SerializeDeferred() {
serializer_->PutBackReference(object_, back_reference);
sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
- UnlinkWeakNextScope unlink_weak_next(object_);
+ SerializeContent(map, size);
+}
- object_->IterateBody(map->instance_type(), size, this);
- OutputRawData(object_->address() + size);
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::SerializeContent(Map* map,
+ int size) {
+ UnlinkWeakNextScope unlink_weak_next(object_);
+ if (object_->IsCode()) {
+ // For code objects, output raw bytes first.
+ OutputCode(size);
+ // Then iterate references via reloc info.
+ object_->IterateBody(map->instance_type(), size, this);
+ // Finally skip to the end.
+ serializer_->FlushSkip(SkipTo(object_->address() + size));
+ } else {
+ // For other objects, iterate references first.
+ object_->IterateBody(map->instance_type(), size, this);
+ // Then output data payload, if any.
+ OutputRawData(object_->address() + size);
+ }
}
-void Serializer::ObjectSerializer::VisitPointers(HeapObject* host,
- Object** start, Object** end) {
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::VisitPointers(HeapObject* host,
+ Object** start,
+ Object** end) {
Object** current = start;
while (current < end) {
while (current < end && (*current)->IsSmi()) current++;
@@ -711,10 +693,10 @@ void Serializer::ObjectSerializer::VisitPointers(HeapObject* host,
}
}
-void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code* host,
- RelocInfo* rinfo) {
- int skip = OutputRawData(rinfo->target_address_address(),
- kCanReturnSkipInsteadOfSkipping);
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::VisitEmbeddedPointer(
+ Code* host, RelocInfo* rinfo) {
+ int skip = SkipTo(rinfo->target_address_address());
HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
Object* object = rinfo->target_object();
serializer_->SerializeObject(HeapObject::cast(object), how_to_code,
@@ -722,10 +704,10 @@ void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code* host,
bytes_processed_so_far_ += rinfo->target_address_size();
}
-void Serializer::ObjectSerializer::VisitExternalReference(Foreign* host,
- Address* p) {
- int skip = OutputRawData(reinterpret_cast<Address>(p),
- kCanReturnSkipInsteadOfSkipping);
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::VisitExternalReference(
+ Foreign* host, Address* p) {
+ int skip = SkipTo(reinterpret_cast<Address>(p));
Address target = *p;
auto encoded_reference = serializer_->EncodeExternalReference(target);
if (encoded_reference.is_from_api()) {
@@ -738,10 +720,10 @@ void Serializer::ObjectSerializer::VisitExternalReference(Foreign* host,
bytes_processed_so_far_ += kPointerSize;
}
-void Serializer::ObjectSerializer::VisitExternalReference(Code* host,
- RelocInfo* rinfo) {
- int skip = OutputRawData(rinfo->target_address_address(),
- kCanReturnSkipInsteadOfSkipping);
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::VisitExternalReference(
+ Code* host, RelocInfo* rinfo) {
+ int skip = SkipTo(rinfo->target_address_address());
Address target = rinfo->target_external_reference();
auto encoded_reference = serializer_->EncodeExternalReference(target);
if (encoded_reference.is_from_api()) {
@@ -758,10 +740,9 @@ void Serializer::ObjectSerializer::VisitExternalReference(Code* host,
bytes_processed_so_far_ += rinfo->target_address_size();
}
-void Serializer::ObjectSerializer::VisitInternalReference(Code* host,
- RelocInfo* rinfo) {
- // We can only reference to internal references of code that has been output.
- DCHECK(object_->IsCode() && code_has_been_output_);
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::VisitInternalReference(
+ Code* host, RelocInfo* rinfo) {
// We do not use skip from last patched pc to find the pc to patch, since
// target_address_address may not return addresses in ascending order when
// used for internal references. External references may be stored at the
@@ -783,10 +764,10 @@ void Serializer::ObjectSerializer::VisitInternalReference(Code* host,
sink_->PutInt(static_cast<uintptr_t>(target_offset), "internal ref value");
}
-void Serializer::ObjectSerializer::VisitRuntimeEntry(Code* host,
- RelocInfo* rinfo) {
- int skip = OutputRawData(rinfo->target_address_address(),
- kCanReturnSkipInsteadOfSkipping);
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::VisitRuntimeEntry(
+ Code* host, RelocInfo* rinfo) {
+ int skip = SkipTo(rinfo->target_address_address());
HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
Address target = rinfo->target_address();
auto encoded_reference = serializer_->EncodeExternalReference(target);
@@ -797,16 +778,57 @@ void Serializer::ObjectSerializer::VisitRuntimeEntry(Code* host,
bytes_processed_so_far_ += rinfo->target_address_size();
}
-void Serializer::ObjectSerializer::VisitCodeTarget(Code* host,
- RelocInfo* rinfo) {
- int skip = OutputRawData(rinfo->target_address_address(),
- kCanReturnSkipInsteadOfSkipping);
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::VisitCodeTarget(
+ Code* host, RelocInfo* rinfo) {
+ int skip = SkipTo(rinfo->target_address_address());
Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
bytes_processed_so_far_ += rinfo->target_address_size();
}
-Address Serializer::ObjectSerializer::PrepareCode() {
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::OutputRawData(Address up_to) {
+ Address object_start = object_->address();
+ int base = bytes_processed_so_far_;
+ int up_to_offset = static_cast<int>(up_to - object_start);
+ int to_skip = up_to_offset - bytes_processed_so_far_;
+ int bytes_to_output = to_skip;
+ bytes_processed_so_far_ += to_skip;
+ DCHECK(to_skip >= 0);
+ if (bytes_to_output != 0) {
+ DCHECK(to_skip == bytes_to_output);
+ if (IsAligned(bytes_to_output, kPointerAlignment) &&
+ bytes_to_output <= kNumberOfFixedRawData * kPointerSize) {
+ int size_in_words = bytes_to_output >> kPointerSizeLog2;
+ sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData");
+ } else {
+ sink_->Put(kVariableRawData, "VariableRawData");
+ sink_->PutInt(bytes_to_output, "length");
+ }
+#ifdef MEMORY_SANITIZER
+ // Check that we do not serialize uninitialized memory.
+ __msan_check_mem_is_initialized(object_start + base, bytes_to_output);
+#endif // MEMORY_SANITIZER
+ sink_->PutRaw(object_start + base, bytes_to_output, "Bytes");
+ }
+}
+
+template <class AllocatorT>
+int Serializer<AllocatorT>::ObjectSerializer::SkipTo(Address to) {
+ Address object_start = object_->address();
+ int up_to_offset = static_cast<int>(to - object_start);
+ int to_skip = up_to_offset - bytes_processed_so_far_;
+ bytes_processed_so_far_ += to_skip;
+ // This assert will fail if the reloc info gives us the target_address_address
+ // locations in a non-ascending order. Luckily that doesn't happen.
+ DCHECK(to_skip >= 0);
+ return to_skip;
+}
+
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::OutputCode(int size) {
+ DCHECK_EQ(kPointerSize, bytes_processed_so_far_);
Code* code = Code::cast(object_);
if (FLAG_predictable) {
// To make snapshots reproducible, we make a copy of the code object
@@ -826,57 +848,22 @@ Address Serializer::ObjectSerializer::PrepareCode() {
// relocations, because some of these fields are needed for the latter.
code->WipeOutHeader();
}
- return code->address();
-}
-int Serializer::ObjectSerializer::OutputRawData(
- Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
- Address object_start = object_->address();
- int base = bytes_processed_so_far_;
- int up_to_offset = static_cast<int>(up_to - object_start);
- int to_skip = up_to_offset - bytes_processed_so_far_;
- int bytes_to_output = to_skip;
- bytes_processed_so_far_ += to_skip;
- // This assert will fail if the reloc info gives us the target_address_address
- // locations in a non-ascending order. Luckily that doesn't happen.
- DCHECK(to_skip >= 0);
- bool outputting_code = false;
- bool is_code_object = object_->IsCode();
- if (to_skip != 0 && is_code_object && !code_has_been_output_) {
- // Output the code all at once and fix later.
- bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
- outputting_code = true;
- code_has_been_output_ = true;
- }
- if (bytes_to_output != 0 && (!is_code_object || outputting_code)) {
- if (!outputting_code && bytes_to_output == to_skip &&
- IsAligned(bytes_to_output, kPointerAlignment) &&
- bytes_to_output <= kNumberOfFixedRawData * kPointerSize) {
- int size_in_words = bytes_to_output >> kPointerSizeLog2;
- sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData");
- to_skip = 0; // This instruction includes skip.
- } else {
- // We always end up here if we are outputting the code of a code object.
- sink_->Put(kVariableRawData, "VariableRawData");
- sink_->PutInt(bytes_to_output, "length");
- }
+ Address start = code->address() + Code::kDataStart;
+ int bytes_to_output = size - Code::kDataStart;
- if (is_code_object) object_start = PrepareCode();
+ sink_->Put(kVariableRawCode, "VariableRawCode");
+ sink_->PutInt(bytes_to_output, "length");
- const char* description = is_code_object ? "Code" : "Byte";
#ifdef MEMORY_SANITIZER
- // Check that we do not serialize uninitialized memory.
- __msan_check_mem_is_initialized(object_start + base, bytes_to_output);
+ // Check that we do not serialize uninitialized memory.
+ __msan_check_mem_is_initialized(start, bytes_to_output);
#endif // MEMORY_SANITIZER
- sink_->PutRaw(object_start + base, bytes_to_output, description);
- }
- if (to_skip != 0 && return_skip == kIgnoringReturn) {
- sink_->Put(kSkip, "Skip");
- sink_->PutInt(to_skip, "SkipDistance");
- to_skip = 0;
- }
- return to_skip;
+ sink_->PutRaw(start, bytes_to_output, "Code");
}
+// Explicit instantiation.
+template class Serializer<DefaultSerializerAllocator>;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index a1924b4f7a..1fe607b530 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -10,6 +10,7 @@
#include "src/isolate.h"
#include "src/log.h"
#include "src/objects.h"
+#include "src/snapshot/default-serializer-allocator.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot-source-sink.h"
@@ -119,24 +120,23 @@ class CodeAddressMap : public CodeEventLogger {
Isolate* isolate_;
};
-// There can be only one serializer per V8 process.
+template <class AllocatorT = DefaultSerializerAllocator>
class Serializer : public SerializerDeserializer {
public:
explicit Serializer(Isolate* isolate);
~Serializer() override;
- void EncodeReservations(std::vector<SerializedData::Reservation>* out) const;
-
- void SerializeDeferredObjects();
+ std::vector<SerializedData::Reservation> EncodeReservations() const {
+ return allocator_.EncodeReservations();
+ }
- Isolate* isolate() const { return isolate_; }
+ const std::vector<byte>* Payload() const { return sink_.data(); }
- SerializerReferenceMap* reference_map() { return &reference_map_; }
- RootIndexMap* root_index_map() { return &root_index_map_; }
+ bool ReferenceMapContains(HeapObject* o) {
+ return reference_map()->Lookup(o).is_valid();
+ }
-#ifdef OBJECT_PRINT
- void CountInstanceType(Map* map, int size);
-#endif // OBJECT_PRINT
+ Isolate* isolate() const { return isolate_; }
protected:
class ObjectSerializer;
@@ -155,6 +155,7 @@ class Serializer : public SerializerDeserializer {
Serializer* serializer_;
};
+ void SerializeDeferredObjects();
virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) = 0;
@@ -164,16 +165,13 @@ class Serializer : public SerializerDeserializer {
void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
int skip);
-
void PutSmi(Smi* smi);
-
void PutBackReference(HeapObject* object, SerializerReference reference);
-
void PutAttachedReference(SerializerReference reference,
HowToCode how_to_code, WhereToPoint where_to_point);
-
// Emit alignment prefix if necessary, return required padding space in bytes.
int PutAlignmentPrefix(HeapObject* object);
+ void PutNextChunk(int space);
// Returns true if the object was successfully serialized as hot object.
bool SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
@@ -183,6 +181,18 @@ class Serializer : public SerializerDeserializer {
bool SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip);
+ // Determines whether the interpreter trampoline is replaced by CompileLazy.
+ enum BuiltinReferenceSerializationMode {
+ kDefault,
+ kCanonicalizeCompileLazy,
+ };
+
+ // Returns true if the object was successfully serialized as a builtin
+ // reference.
+ bool SerializeBuiltinReference(
+ HeapObject* obj, HowToCode how_to_code, WhereToPoint where_to_point,
+ int skip, BuiltinReferenceSerializationMode mode = kDefault);
+
inline void FlushSkip(int skip) {
if (skip != 0) {
sink_.Put(kSkip, "SkipFromSerializeObject");
@@ -190,17 +200,10 @@ class Serializer : public SerializerDeserializer {
}
}
- // This will return the space for an object.
- SerializerReference AllocateOffHeapBackingStore();
- SerializerReference AllocateLargeObject(int size);
- SerializerReference AllocateMap();
- SerializerReference Allocate(AllocationSpace space, int size);
ExternalReferenceEncoder::Value EncodeExternalReference(Address addr) {
return external_reference_encoder_.Encode(addr);
}
- bool HasNotExceededFirstPageOfEachSpace();
-
// GetInt reads 4 bytes at once, requiring padding at the end.
void Pad();
@@ -210,14 +213,6 @@ class Serializer : public SerializerDeserializer {
Code* CopyCode(Code* code);
- inline uint32_t max_chunk_size(int space) const {
- DCHECK_LE(0, space);
- DCHECK_LT(space, kNumberOfSpaces);
- return max_chunk_size_[space];
- }
-
- const SnapshotByteSink* sink() const { return &sink_; }
-
void QueueDeferredObject(HeapObject* obj) {
DCHECK(reference_map_.Lookup(obj).is_back_reference());
deferred_objects_.push_back(obj);
@@ -225,56 +220,32 @@ class Serializer : public SerializerDeserializer {
void OutputStatistics(const char* name);
+#ifdef OBJECT_PRINT
+ void CountInstanceType(Map* map, int size);
+#endif // OBJECT_PRINT
+
#ifdef DEBUG
void PushStack(HeapObject* o) { stack_.push_back(o); }
void PopStack() { stack_.pop_back(); }
void PrintStack();
-
- bool BackReferenceIsAlreadyAllocated(SerializerReference back_reference);
#endif // DEBUG
- Isolate* isolate_;
+ SerializerReferenceMap* reference_map() { return &reference_map_; }
+ RootIndexMap* root_index_map() { return &root_index_map_; }
+ AllocatorT* allocator() { return &allocator_; }
- SnapshotByteSink sink_;
- ExternalReferenceEncoder external_reference_encoder_;
+ SnapshotByteSink sink_; // Used directly by subclasses.
+ private:
+ Isolate* isolate_;
SerializerReferenceMap reference_map_;
+ ExternalReferenceEncoder external_reference_encoder_;
RootIndexMap root_index_map_;
-
- int recursion_depth_;
-
- friend class Deserializer;
- friend class ObjectSerializer;
- friend class RecursionScope;
- friend class SnapshotData;
-
- private:
- CodeAddressMap* code_address_map_;
- // Objects from the same space are put into chunks for bulk-allocation
- // when deserializing. We have to make sure that each chunk fits into a
- // page. So we track the chunk size in pending_chunk_ of a space, but
- // when it exceeds a page, we complete the current chunk and start a new one.
- uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
- std::vector<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
- uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces];
- // Number of maps that we need to allocate.
- uint32_t num_maps_;
-
- // We map serialized large objects to indexes for back-referencing.
- uint32_t large_objects_total_size_;
- uint32_t seen_large_objects_index_;
-
- // Used to keep track of the off-heap backing stores used by TypedArrays/
- // ArrayBuffers. Note that the index begins at 1 and not 0, because when a
- // TypedArray has an on-heap backing store, the backing_store pointer in the
- // corresponding ArrayBuffer will be null, which makes it indistinguishable
- // from index 0.
- uint32_t seen_backing_stores_index_;
-
+ CodeAddressMap* code_address_map_ = nullptr;
std::vector<byte> code_buffer_;
-
- // To handle stack overflow.
- std::vector<HeapObject*> deferred_objects_;
+ std::vector<HeapObject*> deferred_objects_; // To handle stack overflow.
+ int recursion_depth_ = 0;
+ AllocatorT allocator_;
#ifdef OBJECT_PRINT
static const int kInstanceTypes = 256;
@@ -286,10 +257,13 @@ class Serializer : public SerializerDeserializer {
std::vector<HeapObject*> stack_;
#endif // DEBUG
+ friend class DefaultSerializerAllocator;
+
DISALLOW_COPY_AND_ASSIGN(Serializer);
};
-class Serializer::ObjectSerializer : public ObjectVisitor {
+template <class AllocatorT>
+class Serializer<AllocatorT>::ObjectSerializer : public ObjectVisitor {
public:
ObjectSerializer(Serializer* serializer, HeapObject* obj,
SnapshotByteSink* sink, HowToCode how_to_code,
@@ -298,8 +272,7 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
object_(obj),
sink_(sink),
reference_representation_(how_to_code + where_to_point),
- bytes_processed_so_far_(0),
- code_has_been_output_(false) {
+ bytes_processed_so_far_(0) {
#ifdef DEBUG
serializer_->PushStack(obj);
#endif // DEBUG
@@ -310,7 +283,7 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
#endif // DEBUG
}
void Serialize();
- void SerializeContent();
+ void SerializeObject();
void SerializeDeferred();
void VisitPointers(HeapObject* host, Object** start, Object** end) override;
void VisitEmbeddedPointer(Code* host, RelocInfo* target) override;
@@ -323,13 +296,12 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
private:
void SerializePrologue(AllocationSpace space, int size, Map* map);
-
- enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn };
// This function outputs or skips the raw data between the last pointer and
- // up to the current position. It optionally can just return the number of
- // bytes to skip instead of performing a skip instruction, in case the skip
- // can be merged into the next instruction.
- int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn);
+ // up to the current position.
+ void SerializeContent(Map* map, int size);
+ void OutputRawData(Address up_to);
+ void OutputCode(int size);
+ int SkipTo(Address to);
int32_t SerializeBackingStore(void* backing_store, int32_t byte_length);
void FixupIfNeutered();
void SerializeJSArrayBuffer();
@@ -337,15 +309,12 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
void SerializeExternalString();
void SerializeExternalStringAsSequentialString();
- Address PrepareCode();
-
Serializer* serializer_;
HeapObject* object_;
SnapshotByteSink* sink_;
std::map<void*, Smi*> backing_stores;
int reference_representation_;
int bytes_processed_so_far_;
- bool code_has_been_output_;
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 94b9f4c7a4..da528a50ba 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -9,6 +9,8 @@
#include "src/api.h"
#include "src/base/platform/platform.h"
#include "src/objects-inl.h"
+#include "src/snapshot/builtin-deserializer.h"
+#include "src/snapshot/builtin-serializer.h"
#include "src/snapshot/partial-deserializer.h"
#include "src/snapshot/snapshot-source-sink.h"
#include "src/snapshot/startup-deserializer.h"
@@ -19,7 +21,7 @@ namespace v8 {
namespace internal {
#ifdef DEBUG
-bool Snapshot::SnapshotIsValid(v8::StartupData* snapshot_blob) {
+bool Snapshot::SnapshotIsValid(const v8::StartupData* snapshot_blob) {
return Snapshot::ExtractNumContexts(snapshot_blob) > 0;
}
#endif // DEBUG
@@ -39,9 +41,13 @@ bool Snapshot::Initialize(Isolate* isolate) {
if (FLAG_profile_deserialization) timer.Start();
const v8::StartupData* blob = isolate->snapshot_blob();
+ CheckVersion(blob);
Vector<const byte> startup_data = ExtractStartupData(blob);
- SnapshotData snapshot_data(startup_data);
- StartupDeserializer deserializer(&snapshot_data);
+ SnapshotData startup_snapshot_data(startup_data);
+ Vector<const byte> builtin_data = ExtractBuiltinData(blob);
+ BuiltinSnapshotData builtin_snapshot_data(builtin_data);
+ StartupDeserializer deserializer(&startup_snapshot_data,
+ &builtin_snapshot_data);
deserializer.SetRehashability(ExtractRehashability(blob));
bool success = isolate->Init(&deserializer);
if (FLAG_profile_deserialization) {
@@ -81,8 +87,35 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
return result;
}
+// static
+Code* Snapshot::DeserializeBuiltin(Isolate* isolate, int builtin_id) {
+ base::ElapsedTimer timer;
+ if (FLAG_profile_deserialization) timer.Start();
+
+ const v8::StartupData* blob = isolate->snapshot_blob();
+ Vector<const byte> builtin_data = Snapshot::ExtractBuiltinData(blob);
+ BuiltinSnapshotData builtin_snapshot_data(builtin_data);
+
+ BuiltinDeserializer builtin_deserializer(isolate, &builtin_snapshot_data);
+ builtin_deserializer.ReserveAndInitializeBuiltinsTableForBuiltin(builtin_id);
+
+ DisallowHeapAllocation no_gc;
+
+ Code* code = builtin_deserializer.DeserializeBuiltin(builtin_id);
+ DCHECK_EQ(code, isolate->builtins()->builtin(builtin_id));
+
+ if (FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ int bytes = code->Size();
+ PrintF("[Deserializing builtin %s (%d bytes) took %0.3f ms]\n",
+ Builtins::name(builtin_id), bytes, ms);
+ }
+
+ return code;
+}
+
void ProfileDeserialization(
- const SnapshotData* startup_snapshot,
+ const SnapshotData* startup_snapshot, const SnapshotData* builtin_snapshot,
const std::vector<SnapshotData*>& context_snapshots) {
if (FLAG_profile_deserialization) {
int startup_total = 0;
@@ -90,6 +123,9 @@ void ProfileDeserialization(
for (const auto& reservation : startup_snapshot->Reservations()) {
startup_total += reservation.chunk_size();
}
+ for (const auto& reservation : builtin_snapshot->Reservations()) {
+ startup_total += reservation.chunk_size();
+ }
PrintF("%10d bytes per isolate\n", startup_total);
for (size_t i = 0; i < context_snapshots.size(); i++) {
int context_total = 0;
@@ -103,21 +139,30 @@ void ProfileDeserialization(
v8::StartupData Snapshot::CreateSnapshotBlob(
const SnapshotData* startup_snapshot,
+ const BuiltinSnapshotData* builtin_snapshot,
const std::vector<SnapshotData*>& context_snapshots, bool can_be_rehashed) {
uint32_t num_contexts = static_cast<uint32_t>(context_snapshots.size());
uint32_t startup_snapshot_offset = StartupSnapshotOffset(num_contexts);
uint32_t total_length = startup_snapshot_offset;
total_length += static_cast<uint32_t>(startup_snapshot->RawData().length());
+ total_length += static_cast<uint32_t>(builtin_snapshot->RawData().length());
for (const auto context_snapshot : context_snapshots) {
total_length += static_cast<uint32_t>(context_snapshot->RawData().length());
}
- ProfileDeserialization(startup_snapshot, context_snapshots);
+ ProfileDeserialization(startup_snapshot, builtin_snapshot, context_snapshots);
char* data = new char[total_length];
SetHeaderValue(data, kNumberOfContextsOffset, num_contexts);
SetHeaderValue(data, kRehashabilityOffset, can_be_rehashed ? 1 : 0);
- uint32_t payload_offset = StartupSnapshotOffset(num_contexts);
+
+ // Write version string into snapshot data.
+ memset(data + kVersionStringOffset, 0, kVersionStringLength);
+ Version::GetString(
+ Vector<char>(data + kVersionStringOffset, kVersionStringLength));
+
+ // Startup snapshot (isolate-specific data).
+ uint32_t payload_offset = startup_snapshot_offset;
uint32_t payload_length =
static_cast<uint32_t>(startup_snapshot->RawData().length());
CopyBytes(data + payload_offset,
@@ -128,6 +173,19 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
payload_length);
}
payload_offset += payload_length;
+
+ // Builtins.
+ SetHeaderValue(data, kBuiltinOffsetOffset, payload_offset);
+ payload_length = builtin_snapshot->RawData().length();
+ CopyBytes(data + payload_offset,
+ reinterpret_cast<const char*>(builtin_snapshot->RawData().start()),
+ payload_length);
+ if (FLAG_profile_deserialization) {
+ PrintF("%10d bytes for builtins\n", payload_length);
+ }
+ payload_offset += payload_length;
+
+ // Partial snapshots (context-specific data).
for (uint32_t i = 0; i < num_contexts; i++) {
SetHeaderValue(data, ContextSnapshotOffsetOffset(i), payload_offset);
SnapshotData* context_snapshot = context_snapshots[i];
@@ -143,6 +201,7 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
}
v8::StartupData result = {data, static_cast<int>(total_length)};
+ DCHECK_EQ(total_length, payload_offset);
return result;
}
@@ -171,14 +230,31 @@ Vector<const byte> Snapshot::ExtractStartupData(const v8::StartupData* data) {
uint32_t num_contexts = ExtractNumContexts(data);
uint32_t startup_offset = StartupSnapshotOffset(num_contexts);
CHECK_LT(startup_offset, data->raw_size);
- uint32_t first_context_offset = ExtractContextOffset(data, 0);
- CHECK_LT(first_context_offset, data->raw_size);
- uint32_t startup_length = first_context_offset - startup_offset;
+ uint32_t builtin_offset = GetHeaderValue(data, kBuiltinOffsetOffset);
+ CHECK_LT(builtin_offset, data->raw_size);
+ CHECK_GT(builtin_offset, startup_offset);
+ uint32_t startup_length = builtin_offset - startup_offset;
const byte* startup_data =
reinterpret_cast<const byte*>(data->data + startup_offset);
return Vector<const byte>(startup_data, startup_length);
}
+Vector<const byte> Snapshot::ExtractBuiltinData(const v8::StartupData* data) {
+ DCHECK(SnapshotIsValid(data));
+
+ uint32_t from_offset = GetHeaderValue(data, kBuiltinOffsetOffset);
+ CHECK_LT(from_offset, data->raw_size);
+
+ uint32_t to_offset = GetHeaderValue(data, ContextSnapshotOffsetOffset(0));
+ CHECK_LT(to_offset, data->raw_size);
+
+ CHECK_GT(to_offset, from_offset);
+ uint32_t length = to_offset - from_offset;
+ const byte* builtin_data =
+ reinterpret_cast<const byte*>(data->data + from_offset);
+ return Vector<const byte>(builtin_data, length);
+}
+
Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data,
uint32_t index) {
uint32_t num_contexts = ExtractNumContexts(data);
@@ -199,11 +275,30 @@ Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data,
return Vector<const byte>(context_data, context_length);
}
-SnapshotData::SnapshotData(const Serializer* serializer) {
+void Snapshot::CheckVersion(const v8::StartupData* data) {
+ char version[kVersionStringLength];
+ memset(version, 0, kVersionStringLength);
+ CHECK_LT(kVersionStringOffset + kVersionStringLength,
+ static_cast<uint32_t>(data->raw_size));
+ Version::GetString(Vector<char>(version, kVersionStringLength));
+ if (memcmp(version, data->data + kVersionStringOffset,
+ kVersionStringLength) != 0) {
+ V8_Fatal(__FILE__, __LINE__,
+ "Version mismatch between V8 binary and snapshot.\n"
+ "# V8 binary version: %.*s\n"
+ "# Snapshot version: %.*s\n"
+ "# The snapshot consists of %d bytes and contains %d context(s).",
+ kVersionStringLength, version, kVersionStringLength,
+ data->data + kVersionStringOffset, data->raw_size,
+ ExtractNumContexts(data));
+ }
+}
+
+template <class AllocatorT>
+SnapshotData::SnapshotData(const Serializer<AllocatorT>* serializer) {
DisallowHeapAllocation no_gc;
- std::vector<Reservation> reservations;
- serializer->EncodeReservations(&reservations);
- const std::vector<byte>* payload = serializer->sink()->data();
+ std::vector<Reservation> reservations = serializer->EncodeReservations();
+ const std::vector<byte>* payload = serializer->Payload();
// Calculate sizes.
uint32_t reservation_size =
@@ -216,7 +311,6 @@ SnapshotData::SnapshotData(const Serializer* serializer) {
// Set header values.
SetMagicNumber(serializer->isolate());
- SetHeaderValue(kVersionHashOffset, Version::Hash());
SetHeaderValue(kNumReservationsOffset, static_cast<int>(reservations.size()));
SetHeaderValue(kPayloadLengthOffset, static_cast<int>(payload->size()));
@@ -229,9 +323,9 @@ SnapshotData::SnapshotData(const Serializer* serializer) {
static_cast<size_t>(payload->size()));
}
-bool SnapshotData::IsSane() {
- return GetHeaderValue(kVersionHashOffset) == Version::Hash();
-}
+// Explicit instantiation.
+template SnapshotData::SnapshotData(
+ const Serializer<DefaultSerializerAllocator>* serializer);
Vector<const SerializedData::Reservation> SnapshotData::Reservations() const {
return Vector<const Reservation>(
@@ -248,5 +342,32 @@ Vector<const byte> SnapshotData::Payload() const {
return Vector<const byte>(payload, length);
}
+BuiltinSnapshotData::BuiltinSnapshotData(const BuiltinSerializer* serializer)
+ : SnapshotData(serializer) {}
+
+Vector<const byte> BuiltinSnapshotData::Payload() const {
+ uint32_t reservations_size =
+ GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
+ const byte* payload = data_ + kHeaderSize + reservations_size;
+ int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
+ uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
+ DCHECK_EQ(data_ + size_, payload + payload_length);
+ DCHECK_GT(payload_length, builtin_offsets_size);
+ return Vector<const byte>(payload, payload_length - builtin_offsets_size);
+}
+
+Vector<const uint32_t> BuiltinSnapshotData::BuiltinOffsets() const {
+ uint32_t reservations_size =
+ GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
+ const byte* payload = data_ + kHeaderSize + reservations_size;
+ int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
+ uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
+ DCHECK_EQ(data_ + size_, payload + payload_length);
+ DCHECK_GT(payload_length, builtin_offsets_size);
+ const uint32_t* data = reinterpret_cast<const uint32_t*>(
+ payload + payload_length - builtin_offsets_size);
+ return Vector<const uint32_t>(data, Builtins::builtin_count);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h
index 399340ef40..584f86a760 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot/snapshot-source-sink.h
@@ -64,6 +64,7 @@ class SnapshotByteSource final {
int GetBlob(const byte** data);
int position() { return position_; }
+ void set_position(int position) { position_ = position; }
private:
const byte* data_;
@@ -95,7 +96,7 @@ class SnapshotByteSink {
void PutInt(uintptr_t integer, const char* description);
void PutRaw(const byte* data, int number_of_bytes, const char* description);
- int Position() { return static_cast<int>(data_.size()); }
+ int Position() const { return static_cast<int>(data_.size()); }
const std::vector<byte>* data() const { return &data_; }
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index b91c26eccf..0c639d4c53 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -15,6 +15,7 @@ namespace internal {
// Forward declarations.
class Isolate;
+class BuiltinSerializer;
class PartialSerializer;
class StartupSerializer;
@@ -22,64 +23,99 @@ class StartupSerializer;
class SnapshotData : public SerializedData {
public:
// Used when producing.
- explicit SnapshotData(const Serializer* serializer);
+ template <class AllocatorT>
+ explicit SnapshotData(const Serializer<AllocatorT>* serializer);
// Used when consuming.
explicit SnapshotData(const Vector<const byte> snapshot)
: SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
- CHECK(IsSane());
}
Vector<const Reservation> Reservations() const;
- Vector<const byte> Payload() const;
+ virtual Vector<const byte> Payload() const;
Vector<const byte> RawData() const {
return Vector<const byte>(data_, size_);
}
- private:
- bool IsSane();
-
+ protected:
// The data header consists of uint32_t-sized entries:
// [0] magic number and (internal) external reference count
- // [1] API-provided external reference count
- // [2] version hash
- // [3] number of reservation size entries
- // [4] payload length
+ // [1] number of reservation size entries
+ // [2] payload length
// ... reservations
// ... serialized payload
static const uint32_t kNumReservationsOffset =
- kVersionHashOffset + kUInt32Size;
+ kMagicNumberOffset + kUInt32Size;
static const uint32_t kPayloadLengthOffset =
kNumReservationsOffset + kUInt32Size;
static const uint32_t kHeaderSize = kPayloadLengthOffset + kUInt32Size;
};
+class BuiltinSnapshotData final : public SnapshotData {
+ public:
+ // Used when producing.
+ // This simply forwards to the SnapshotData constructor.
+ // The BuiltinSerializer appends the builtin offset table to the payload.
+ explicit BuiltinSnapshotData(const BuiltinSerializer* serializer);
+
+ // Used when consuming.
+ explicit BuiltinSnapshotData(const Vector<const byte> snapshot)
+ : SnapshotData(snapshot) {
+ }
+
+ // Returns the serialized payload without the builtin offsets table.
+ Vector<const byte> Payload() const override;
+
+ // Returns only the builtin offsets table.
+ Vector<const uint32_t> BuiltinOffsets() const;
+
+ private:
+ // In addition to the format specified in SnapshotData, BuiltinsSnapshotData
+ // includes a list of builtin at the end of the serialized payload:
+ //
+ // ...
+ // ... serialized payload
+ // ... list of builtins offsets
+};
+
class Snapshot : public AllStatic {
public:
+ // ---------------- Deserialization ----------------
+
// Initialize the Isolate from the internal snapshot. Returns false if no
// snapshot could be found.
static bool Initialize(Isolate* isolate);
+
// Create a new context using the internal partial snapshot.
static MaybeHandle<Context> NewContextFromSnapshot(
Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
size_t context_index,
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer);
- static bool HasContextSnapshot(Isolate* isolate, size_t index);
+ // Deserializes a single given builtin code object. Intended to be called at
+ // runtime after the isolate (and the builtins table) has been fully
+ // initialized.
+ static Code* DeserializeBuiltin(Isolate* isolate, int builtin_id);
+ // ---------------- Helper methods ----------------
+
+ static bool HasContextSnapshot(Isolate* isolate, size_t index);
static bool EmbedsScript(Isolate* isolate);
// To be implemented by the snapshot source.
static const v8::StartupData* DefaultSnapshotBlob();
+ // ---------------- Serialization ----------------
+
static v8::StartupData CreateSnapshotBlob(
const SnapshotData* startup_snapshot,
+ const BuiltinSnapshotData* builtin_snapshot,
const std::vector<SnapshotData*>& context_snapshots,
bool can_be_rehashed);
#ifdef DEBUG
- static bool SnapshotIsValid(v8::StartupData* snapshot_blob);
+ static bool SnapshotIsValid(const v8::StartupData* snapshot_blob);
#endif // DEBUG
private:
@@ -88,6 +124,7 @@ class Snapshot : public AllStatic {
uint32_t index);
static bool ExtractRehashability(const v8::StartupData* data);
static Vector<const byte> ExtractStartupData(const v8::StartupData* data);
+ static Vector<const byte> ExtractBuiltinData(const v8::StartupData* data);
static Vector<const byte> ExtractContextData(const v8::StartupData* data,
uint32_t index);
@@ -98,14 +135,19 @@ class Snapshot : public AllStatic {
WriteLittleEndianValue(data + offset, value);
}
+ static void CheckVersion(const v8::StartupData* data);
+
// Snapshot blob layout:
// [0] number of contexts N
// [1] rehashability
- // [2] offset to context 0
- // [3] offset to context 1
+ // [2] (128 bytes) version string
+ // [3] offset to builtins
+ // [4] offset to context 0
+ // [5] offset to context 1
// ...
// ... offset to context N - 1
// ... startup snapshot data
+ // ... builtin snapshot data
// ... context 0 snapshot data
// ... context 1 snapshot data
@@ -113,8 +155,13 @@ class Snapshot : public AllStatic {
// TODO(yangguo): generalize rehashing, and remove this flag.
static const uint32_t kRehashabilityOffset =
kNumberOfContextsOffset + kUInt32Size;
- static const uint32_t kFirstContextOffsetOffset =
+ static const uint32_t kVersionStringOffset =
kRehashabilityOffset + kUInt32Size;
+ static const uint32_t kVersionStringLength = 64;
+ static const uint32_t kBuiltinOffsetOffset =
+ kVersionStringOffset + kVersionStringLength;
+ static const uint32_t kFirstContextOffsetOffset =
+ kBuiltinOffsetOffset + kUInt32Size;
static uint32_t StartupSnapshotOffset(int num_contexts) {
return kFirstContextOffsetOffset + num_contexts * kInt32Size;
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index 6746a02656..a6e9d6a203 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -4,8 +4,10 @@
#include "src/snapshot/startup-deserializer.h"
+#include "src/api.h"
#include "src/assembler-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/snapshot/builtin-deserializer.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
@@ -13,12 +15,17 @@ namespace internal {
void StartupDeserializer::DeserializeInto(Isolate* isolate) {
Initialize(isolate);
- if (!ReserveSpace()) V8::FatalProcessOutOfMemory("StartupDeserializer");
+
+ BuiltinDeserializer builtin_deserializer(isolate, builtin_data_);
+
+ if (!Deserializer::ReserveSpace(this, &builtin_deserializer)) {
+ V8::FatalProcessOutOfMemory("StartupDeserializer");
+ }
// No active threads.
DCHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
// No active handles.
- DCHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
+ DCHECK(isolate->handle_scope_implementer()->blocks()->empty());
// Partial snapshot cache is not yet populated.
DCHECK(isolate->partial_snapshot_cache()->empty());
// Builtins are not yet created.
@@ -26,14 +33,22 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
{
DisallowHeapAllocation no_gc;
+
isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST);
isolate->heap()->IterateSmiRoots(this);
isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
isolate->heap()->RepairFreeListsAfterDeserialization();
isolate->heap()->IterateWeakRoots(this, VISIT_ALL);
DeserializeDeferredObjects();
- FlushICacheForNewIsolate();
RestoreExternalReferenceRedirectors(accessor_infos());
+
+ // Deserialize eager builtins from the builtin snapshot. Note that deferred
+ // objects must have been deserialized prior to this.
+ builtin_deserializer.DeserializeEagerBuiltins();
+
+ // Flush the instruction cache for the entire code-space. Must happen after
+ // builtins deserialization.
+ FlushICacheForNewIsolate();
}
isolate->heap()->set_native_contexts_list(isolate->heap()->undefined_value());
diff --git a/deps/v8/src/snapshot/startup-deserializer.h b/deps/v8/src/snapshot/startup-deserializer.h
index 8ad2cccacd..269ac8b555 100644
--- a/deps/v8/src/snapshot/startup-deserializer.h
+++ b/deps/v8/src/snapshot/startup-deserializer.h
@@ -14,8 +14,9 @@ namespace internal {
// Initializes an isolate with context-independent data from a given snapshot.
class StartupDeserializer final : public Deserializer {
public:
- explicit StartupDeserializer(const SnapshotData* data)
- : Deserializer(data, false) {}
+ StartupDeserializer(const SnapshotData* startup_data,
+ const BuiltinSnapshotData* builtin_data)
+ : Deserializer(startup_data, false), builtin_data_(builtin_data) {}
// Deserialize the snapshot into an empty heap.
void DeserializeInto(Isolate* isolate);
@@ -26,6 +27,8 @@ class StartupDeserializer final : public Deserializer {
// Rehash after deserializing an isolate.
void Rehash();
+
+ const BuiltinSnapshotData* builtin_data_;
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index e2bd4ceadd..8fec389ee9 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -4,6 +4,7 @@
#include "src/snapshot/startup-serializer.h"
+#include "src/api.h"
#include "src/objects-inl.h"
#include "src/v8threads.h"
@@ -30,29 +31,24 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
DCHECK(!obj->IsJSFunction());
- if (clear_function_code_) {
- if (obj->IsCode()) {
- Code* code = Code::cast(obj);
- // If the function code is compiled (either as native code or bytecode),
- // replace it with lazy-compile builtin. Only exception is when we are
- // serializing the canonical interpreter-entry-trampoline builtin.
- if (code->kind() == Code::FUNCTION ||
- (!serializing_builtins_ &&
- code->is_interpreter_trampoline_builtin())) {
- obj = isolate()->builtins()->builtin(Builtins::kCompileLazy);
- }
- } else if (obj->IsBytecodeArray()) {
- obj = isolate()->heap()->undefined_value();
- }
+ if (clear_function_code() && obj->IsBytecodeArray()) {
+ obj = isolate()->heap()->undefined_value();
}
+ BuiltinReferenceSerializationMode mode =
+ (clear_function_code() && !serializing_builtins_)
+ ? kCanonicalizeCompileLazy
+ : kDefault;
+ if (SerializeBuiltinReference(obj, how_to_code, where_to_point, skip, mode)) {
+ return;
+ }
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
- int root_index = root_index_map_.Lookup(obj);
+ int root_index = root_index_map()->Lookup(obj);
// We can only encode roots as such if it has already been serialized.
// That applies to root indices below the wave front.
if (root_index != RootIndexMap::kInvalidRootIndex) {
- if (root_has_been_serialized_.test(root_index)) {
+ if (root_has_been_serialized(root_index)) {
PutRoot(root_index, obj, how_to_code, where_to_point, skip);
return;
}
@@ -62,7 +58,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
FlushSkip(skip);
- if (isolate_->external_reference_redirector() && obj->IsAccessorInfo()) {
+ if (isolate()->external_reference_redirector() && obj->IsAccessorInfo()) {
// Wipe external reference redirects in the accessor info.
AccessorInfo* info = AccessorInfo::cast(obj);
Address original_address = Foreign::cast(info->getter())->foreign_address();
@@ -70,7 +66,13 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
accessor_infos_.push_back(info);
} else if (obj->IsScript() && Script::cast(obj)->IsUserJavaScript()) {
Script::cast(obj)->set_context_data(
- isolate_->heap()->uninitialized_symbol());
+ isolate()->heap()->uninitialized_symbol());
+ } else if (obj->IsSharedFunctionInfo()) {
+ // Clear inferred name for native functions.
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+ if (!shared->IsSubjectToDebugging() && shared->HasInferredName()) {
+ shared->set_inferred_name(isolate()->heap()->empty_string());
+ }
}
if (obj->IsHashTable()) CheckRehashability(obj);
@@ -116,14 +118,14 @@ void StartupSerializer::SerializeStrongReferences() {
// No active threads.
CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
// No active or weak handles.
- CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
+ CHECK(isolate->handle_scope_implementer()->blocks()->empty());
CHECK_EQ(0, isolate->global_handles()->global_handles_count());
CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
// First visit immortal immovables to make sure they end up in the first page.
serializing_immortal_immovables_roots_ = true;
isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST);
// Check that immortal immovable roots are allocated on the first page.
- CHECK(HasNotExceededFirstPageOfEachSpace());
+ DCHECK(allocator()->HasNotExceededFirstPageOfEachSpace());
serializing_immortal_immovables_roots_ = false;
// Visit the rest of the strong roots.
// Clear the stack limits to make the snapshot reproducible.
@@ -183,11 +185,11 @@ void StartupSerializer::CheckRehashability(HeapObject* table) {
// We can only correctly rehash if the four hash tables below are the only
// ones that we deserialize.
if (table->IsUnseededNumberDictionary()) return;
- if (table == isolate_->heap()->empty_ordered_hash_table()) return;
- if (table == isolate_->heap()->empty_slow_element_dictionary()) return;
- if (table == isolate_->heap()->empty_property_dictionary()) return;
- if (table == isolate_->heap()->weak_object_to_code_table()) return;
- if (table == isolate_->heap()->string_table()) return;
+ if (table == isolate()->heap()->empty_ordered_hash_table()) return;
+ if (table == isolate()->heap()->empty_slow_element_dictionary()) return;
+ if (table == isolate()->heap()->empty_property_dictionary()) return;
+ if (table == isolate()->heap()->weak_object_to_code_table()) return;
+ if (table == isolate()->heap()->string_table()) return;
can_be_rehashed_ = false;
}
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index c17fba2fb4..9c575adbe1 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -12,7 +12,7 @@
namespace v8 {
namespace internal {
-class StartupSerializer : public Serializer {
+class StartupSerializer : public Serializer<> {
public:
StartupSerializer(
Isolate* isolate,
@@ -30,6 +30,10 @@ class StartupSerializer : public Serializer {
int PartialSnapshotCacheIndex(HeapObject* o);
bool can_be_rehashed() const { return can_be_rehashed_; }
+ bool clear_function_code() const { return clear_function_code_; }
+ bool root_has_been_serialized(int root_index) const {
+ return root_has_been_serialized_.test(root_index);
+ }
private:
class PartialCacheIndexMap {
@@ -73,7 +77,7 @@ class StartupSerializer : public Serializer {
void CheckRehashability(HeapObject* hashtable);
- bool clear_function_code_;
+ const bool clear_function_code_;
bool serializing_builtins_;
bool serializing_immortal_immovables_roots_;
std::bitset<Heap::kStrongRootListLength> root_has_been_serialized_;
diff --git a/deps/v8/src/splay-tree-inl.h b/deps/v8/src/splay-tree-inl.h
index 1adfdac6db..c18e2b0e5d 100644
--- a/deps/v8/src/splay-tree-inl.h
+++ b/deps/v8/src/splay-tree-inl.h
@@ -5,6 +5,8 @@
#ifndef V8_SPLAY_TREE_INL_H_
#define V8_SPLAY_TREE_INL_H_
+#include <vector>
+
#include "src/splay-tree.h"
namespace v8 {
@@ -278,13 +280,13 @@ template <typename Config, class Allocator> template <class Callback>
void SplayTree<Config, Allocator>::ForEachNode(Callback* callback) {
if (root_ == NULL) return;
// Pre-allocate some space for tiny trees.
- List<Node*, Allocator> nodes_to_visit(10, allocator_);
- nodes_to_visit.Add(root_, allocator_);
- int pos = 0;
- while (pos < nodes_to_visit.length()) {
+ std::vector<Node*> nodes_to_visit;
+ nodes_to_visit.push_back(root_);
+ size_t pos = 0;
+ while (pos < nodes_to_visit.size()) {
Node* node = nodes_to_visit[pos++];
- if (node->left() != NULL) nodes_to_visit.Add(node->left(), allocator_);
- if (node->right() != NULL) nodes_to_visit.Add(node->right(), allocator_);
+ if (node->left() != NULL) nodes_to_visit.push_back(node->left());
+ if (node->right() != NULL) nodes_to_visit.push_back(node->right());
callback->Call(node);
}
}
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index b9690e8b86..05178ac5f6 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -18,10 +18,8 @@ WeakCell* TransitionsAccessor::GetTargetCell() {
if (target_cell_ != nullptr) return target_cell_;
if (enc == kWeakCell) {
target_cell_ = WeakCell::cast(raw_transitions_);
- } else if (enc == kTuple3Handler) {
- target_cell_ = StoreHandler::GetTuple3TransitionCell(raw_transitions_);
- } else if (enc == kFixedArrayHandler) {
- target_cell_ = StoreHandler::GetArrayTransitionCell(raw_transitions_);
+ } else if (enc == kHandler) {
+ target_cell_ = StoreHandler::GetTransitionCell(raw_transitions_);
} else {
UNREACHABLE();
}
@@ -84,11 +82,8 @@ Name* TransitionsAccessor::GetKey(int transition_number) {
case kWeakCell:
cell = GetTargetCell<kWeakCell>();
break;
- case kTuple3Handler:
- cell = GetTargetCell<kTuple3Handler>();
- break;
- case kFixedArrayHandler:
- cell = GetTargetCell<kFixedArrayHandler>();
+ case kHandler:
+ cell = GetTargetCell<kHandler>();
break;
case kFullTransitionArray:
return transitions()->GetKey(transition_number);
@@ -119,13 +114,8 @@ PropertyDetails TransitionsAccessor::GetTargetDetails(Name* name, Map* target) {
// static
Map* TransitionsAccessor::GetTargetFromRaw(Object* raw) {
- if (raw->IsMap()) return Map::cast(raw);
- if (raw->IsTuple3()) {
- return Map::cast(StoreHandler::GetTuple3TransitionCell(raw)->value());
- } else {
- DCHECK(raw->IsFixedArray());
- return Map::cast(StoreHandler::GetArrayTransitionCell(raw)->value());
- }
+ if (raw->IsWeakCell()) return Map::cast(WeakCell::cast(raw)->value());
+ return Map::cast(StoreHandler::GetTransitionCell(raw)->value());
}
Object* TransitionArray::GetRawTarget(int transition_number) {
@@ -148,11 +138,8 @@ Map* TransitionsAccessor::GetTarget(int transition_number) {
case kWeakCell:
cell = GetTargetCell<kWeakCell>();
break;
- case kTuple3Handler:
- cell = GetTargetCell<kTuple3Handler>();
- break;
- case kFixedArrayHandler:
- cell = GetTargetCell<kFixedArrayHandler>();
+ case kHandler:
+ cell = GetTargetCell<kHandler>();
break;
case kFullTransitionArray:
return transitions()->GetTarget(transition_number);
@@ -162,6 +149,7 @@ Map* TransitionsAccessor::GetTarget(int transition_number) {
}
void TransitionArray::SetTarget(int transition_number, Object* value) {
+ DCHECK(!value->IsMap());
DCHECK(transition_number < number_of_transitions());
set(ToTargetIndex(transition_number), value);
}
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 90f2cc01ca..28b14e1d05 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -17,10 +17,8 @@ void TransitionsAccessor::Initialize() {
encoding_ = kUninitialized;
} else if (HeapObject::cast(raw_transitions_)->IsWeakCell()) {
encoding_ = kWeakCell;
- } else if (HeapObject::cast(raw_transitions_)->IsTuple3()) {
- encoding_ = kTuple3Handler;
- } else if (HeapObject::cast(raw_transitions_)->IsFixedArray()) {
- encoding_ = kFixedArrayHandler;
+ } else if (StoreHandler::IsHandler(raw_transitions_)) {
+ encoding_ = kHandler;
} else if (HeapObject::cast(raw_transitions_)->IsTransitionArray()) {
encoding_ = kFullTransitionArray;
} else {
@@ -37,10 +35,8 @@ Map* TransitionsAccessor::GetSimpleTransition() {
switch (encoding()) {
case kWeakCell:
return Map::cast(GetTargetCell<kWeakCell>()->value());
- case kTuple3Handler:
- return Map::cast(GetTargetCell<kTuple3Handler>()->value());
- case kFixedArrayHandler:
- return Map::cast(GetTargetCell<kFixedArrayHandler>()->value());
+ case kHandler:
+ return Map::cast(GetTargetCell<kHandler>()->value());
default:
return nullptr;
}
@@ -51,10 +47,8 @@ bool TransitionsAccessor::HasSimpleTransitionTo(WeakCell* cell) {
switch (encoding()) {
case kWeakCell:
return raw_transitions_ == cell;
- case kTuple3Handler:
- return StoreHandler::GetTuple3TransitionCell(raw_transitions_) == cell;
- case kFixedArrayHandler:
- return StoreHandler::GetArrayTransitionCell(raw_transitions_) == cell;
+ case kHandler:
+ return StoreHandler::GetTransitionCell(raw_transitions_) == cell;
case kPrototypeInfo:
case kUninitialized:
case kFullTransitionArray:
@@ -68,12 +62,14 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
SimpleTransitionFlag flag) {
DCHECK(!map_handle_.is_null());
Isolate* isolate = map_->GetIsolate();
+ Handle<WeakCell> weak_cell_with_target = Map::WeakCellForMap(target);
+ Reload();
target->SetBackPointer(map_);
// If the map doesn't have any transitions at all yet, install the new one.
if (encoding() == kUninitialized) {
if (flag == SIMPLE_PROPERTY_TRANSITION) {
- ReplaceTransitions(*Map::WeakCellForMap(target));
+ ReplaceTransitions(*weak_cell_with_target);
return;
}
// If the flag requires a full TransitionArray, allocate one.
@@ -94,7 +90,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
if (flag == SIMPLE_PROPERTY_TRANSITION && key->Equals(*name) &&
old_details.kind() == new_details.kind() &&
old_details.attributes() == new_details.attributes()) {
- ReplaceTransitions(*Map::WeakCellForMap(target));
+ ReplaceTransitions(*weak_cell_with_target);
return;
}
// Otherwise allocate a full TransitionArray with slack for a new entry.
@@ -103,10 +99,8 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
Reload();
simple_transition = GetSimpleTransition();
if (simple_transition != nullptr) {
- Object* value = raw_transitions_->IsWeakCell()
- ? WeakCell::cast(raw_transitions_)->value()
- : raw_transitions_;
- result->Set(0, GetSimpleTransitionKey(simple_transition), value);
+ result->Set(0, GetSimpleTransitionKey(simple_transition),
+ raw_transitions_);
} else {
result->SetNumberOfTransitions(0);
}
@@ -138,7 +132,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
&insertion_index);
// If an existing entry was found, overwrite it and return.
if (index != kNotFound) {
- array->SetTarget(index, *target);
+ array->SetTarget(index, *weak_cell_with_target);
return;
}
@@ -154,7 +148,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
array->SetTarget(index, array->GetRawTarget(index - 1));
}
array->SetKey(index, *name);
- array->SetTarget(index, *target);
+ array->SetTarget(index, *weak_cell_with_target);
SLOW_DCHECK(array->IsSortedNoDuplicates());
return;
}
@@ -202,7 +196,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
for (int i = 0; i < insertion_index; ++i) {
result->Set(i, array->GetKey(i), array->GetRawTarget(i));
}
- result->Set(insertion_index, *name, *target);
+ result->Set(insertion_index, *name, *weak_cell_with_target);
for (int i = insertion_index; i < number_of_transitions; ++i) {
result->Set(i + 1, array->GetKey(i), array->GetRawTarget(i));
}
@@ -219,8 +213,7 @@ void TransitionsAccessor::UpdateHandler(Name* name, Object* handler) {
UNREACHABLE();
return;
case kWeakCell:
- case kTuple3Handler:
- case kFixedArrayHandler:
+ case kHandler:
DCHECK_EQ(GetSimpleTransition(), GetTargetFromRaw(handler));
ReplaceTransitions(handler);
return;
@@ -243,23 +236,23 @@ Object* TransitionsAccessor::SearchHandler(Name* name,
case kUninitialized:
case kWeakCell:
return nullptr;
- case kTuple3Handler:
- return StoreHandler::ValidTuple3HandlerOrNull(raw_transitions_, name,
- out_transition);
- case kFixedArrayHandler:
- return StoreHandler::ValidFixedArrayHandlerOrNull(raw_transitions_, name,
- out_transition);
+ case kHandler: {
+ Object* raw_handler = StoreHandler::ValidHandlerOrNull(
+ raw_transitions_, name, out_transition);
+ if (raw_handler == nullptr) return raw_handler;
+ // Check transition key.
+ WeakCell* target_cell = StoreHandler::GetTransitionCell(raw_handler);
+ if (!IsMatchingMap(target_cell, name, kData, NONE)) return nullptr;
+ return raw_handler;
+ }
+
case kFullTransitionArray: {
int transition = transitions()->Search(kData, name, NONE);
if (transition == kNotFound) return nullptr;
Object* raw_handler = transitions()->GetRawTarget(transition);
- if (raw_handler->IsTuple3()) {
- return StoreHandler::ValidTuple3HandlerOrNull(raw_handler, nullptr,
- out_transition);
- }
- if (raw_handler->IsFixedArray()) {
- return StoreHandler::ValidFixedArrayHandlerOrNull(raw_handler, nullptr,
- out_transition);
+ if (StoreHandler::IsHandler(raw_handler)) {
+ return StoreHandler::ValidHandlerOrNull(raw_handler, name,
+ out_transition);
}
return nullptr;
}
@@ -279,11 +272,8 @@ Map* TransitionsAccessor::SearchTransition(Name* name, PropertyKind kind,
case kWeakCell:
cell = GetTargetCell<kWeakCell>();
break;
- case kTuple3Handler:
- cell = GetTargetCell<kTuple3Handler>();
- break;
- case kFixedArrayHandler:
- cell = GetTargetCell<kFixedArrayHandler>();
+ case kHandler:
+ cell = GetTargetCell<kHandler>();
break;
case kFullTransitionArray: {
int transition = transitions()->Search(kind, name, attributes);
@@ -336,11 +326,8 @@ Handle<String> TransitionsAccessor::ExpectedTransitionKey() {
case kWeakCell:
cell = GetTargetCell<kWeakCell>();
break;
- case kTuple3Handler:
- cell = GetTargetCell<kTuple3Handler>();
- break;
- case kFixedArrayHandler:
- cell = GetTargetCell<kFixedArrayHandler>();
+ case kHandler:
+ cell = GetTargetCell<kHandler>();
break;
}
DCHECK(!cell->cleared());
@@ -456,6 +443,7 @@ void TransitionsAccessor::PutPrototypeTransition(Handle<Object> prototype,
int entry = header + last;
Handle<WeakCell> target_cell = Map::WeakCellForMap(target_map);
+ Reload(); // Reload after possible GC.
cache->set(entry, *target_cell);
TransitionArray::SetNumberOfPrototypeTransitions(*cache, last + 1);
}
@@ -498,8 +486,7 @@ int TransitionsAccessor::NumberOfTransitions() {
case kUninitialized:
return 0;
case kWeakCell:
- case kTuple3Handler:
- case kFixedArrayHandler:
+ case kHandler:
return 1;
case kFullTransitionArray:
return transitions()->number_of_transitions();
@@ -553,18 +540,19 @@ void TransitionsAccessor::EnsureHasFullTransitionArray() {
int nof = encoding() == kUninitialized ? 0 : 1;
Handle<TransitionArray> result =
TransitionArray::Allocate(map_->GetIsolate(), nof);
- DisallowHeapAllocation no_gc;
Reload(); // Reload after possible GC.
if (nof == 1) {
- Map* target = GetSimpleTransition();
- if (target == nullptr) {
+ if (encoding() == kUninitialized) {
// If allocation caused GC and cleared the target, trim the new array.
result->Shrink(TransitionArray::ToKeyIndex(0));
result->SetNumberOfTransitions(0);
} else {
// Otherwise populate the new array.
- Name* key = GetSimpleTransitionKey(target);
- result->Set(0, key, target);
+ Handle<Map> target(GetSimpleTransition());
+ Handle<WeakCell> weak_cell_with_target = Map::WeakCellForMap(target);
+ Reload(); // Reload after possible GC.
+ Name* key = GetSimpleTransitionKey(*target);
+ result->Set(0, key, *weak_cell_with_target);
}
}
ReplaceTransitions(*result);
@@ -581,11 +569,8 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
case kWeakCell:
simple_target = Map::cast(GetTargetCell<kWeakCell>()->value());
break;
- case kTuple3Handler:
- simple_target = Map::cast(GetTargetCell<kTuple3Handler>()->value());
- break;
- case kFixedArrayHandler:
- simple_target = Map::cast(GetTargetCell<kFixedArrayHandler>()->value());
+ case kHandler:
+ simple_target = Map::cast(GetTargetCell<kHandler>()->value());
break;
case kFullTransitionArray: {
if (transitions()->HasPrototypeTransitions()) {
diff --git a/deps/v8/src/transitions.h b/deps/v8/src/transitions.h
index dbc999845b..4fa9800571 100644
--- a/deps/v8/src/transitions.h
+++ b/deps/v8/src/transitions.h
@@ -124,8 +124,7 @@ class TransitionsAccessor {
kPrototypeInfo,
kUninitialized,
kWeakCell,
- kTuple3Handler,
- kFixedArrayHandler,
+ kHandler,
kFullTransitionArray,
};
diff --git a/deps/v8/src/trap-handler/handler-inside.cc b/deps/v8/src/trap-handler/handler-inside.cc
index a51bd9427f..9336636b21 100644
--- a/deps/v8/src/trap-handler/handler-inside.cc
+++ b/deps/v8/src/trap-handler/handler-inside.cc
@@ -98,38 +98,11 @@ bool TryHandleSignal(int signum, siginfo_t* info, ucontext_t* context) {
SigUnmaskStack unmask(sigs);
uintptr_t fault_addr = context->uc_mcontext.gregs[REG_RIP];
-
- // TODO(eholk): broad code range check
-
- // Taking locks in a signal handler is risky because a fault in the signal
- // handler could lead to a deadlock when attempting to acquire the lock
- // again. We guard against this case with g_thread_in_wasm_code. The lock
- // may only be taken when not executing Wasm code (an assert in
- // MetadataLock's constructor ensures this). This signal handler will bail
- // out before trying to take the lock if g_thread_in_wasm_code is not set.
- MetadataLock lock_holder;
-
- for (size_t i = 0; i < gNumCodeObjects; ++i) {
- const CodeProtectionInfo* data = gCodeObjects[i].code_info;
- if (data == nullptr) {
- continue;
- }
- const uintptr_t base = reinterpret_cast<uintptr_t>(data->base);
-
- if (fault_addr >= base && fault_addr < base + data->size) {
- // Hurray, we found the code object. Check for protected addresses.
- const ptrdiff_t offset = fault_addr - base;
-
- for (unsigned i = 0; i < data->num_protected_instructions; ++i) {
- if (data->instructions[i].instr_offset == offset) {
- // Hurray again, we found the actual instruction. Tell the caller to
- // return to the landing pad.
- context->uc_mcontext.gregs[REG_RIP] =
- data->instructions[i].landing_offset + base;
- return true;
- }
- }
- }
+ uintptr_t landing_pad = 0;
+ if (TryFindLandingPad(fault_addr, &landing_pad)) {
+ // Tell the caller to return to the landing pad.
+ context->uc_mcontext.gregs[REG_RIP] = landing_pad;
+ return true;
}
} // end signal mask scope
@@ -138,6 +111,47 @@ bool TryHandleSignal(int signum, siginfo_t* info, ucontext_t* context) {
g_thread_in_wasm_code = true;
return false;
}
+
+// This function contains the platform independent portions of fault
+// classification.
+bool TryFindLandingPad(uintptr_t fault_addr, uintptr_t* landing_pad) {
+ // TODO(eholk): broad code range check
+
+ // Taking locks in a signal handler is risky because a fault in the signal
+ // handler could lead to a deadlock when attempting to acquire the lock
+ // again. We guard against this case with g_thread_in_wasm_code. The lock
+ // may only be taken when not executing Wasm code (an assert in
+ // MetadataLock's constructor ensures this). This signal handler will bail
+ // out before trying to take the lock if g_thread_in_wasm_code is not set.
+ MetadataLock lock_holder;
+
+ for (size_t i = 0; i < gNumCodeObjects; ++i) {
+ const CodeProtectionInfo* data = gCodeObjects[i].code_info;
+ if (data == nullptr) {
+ continue;
+ }
+ const uintptr_t base = reinterpret_cast<uintptr_t>(data->base);
+
+ if (fault_addr >= base && fault_addr < base + data->size) {
+ // Hurray, we found the code object. Check for protected addresses.
+ const ptrdiff_t offset = fault_addr - base;
+
+ for (unsigned i = 0; i < data->num_protected_instructions; ++i) {
+ if (data->instructions[i].instr_offset == offset) {
+ // Hurray again, we found the actual instruction.
+ *landing_pad = data->instructions[i].landing_offset + base;
+
+ gRecoveredTrapCount.store(
+ gRecoveredTrapCount.load(std::memory_order_relaxed) + 1,
+ std::memory_order_relaxed);
+
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
#endif // V8_TRAP_HANDLER_SUPPORTED && V8_OS_LINUX
#if V8_TRAP_HANDLER_SUPPORTED
diff --git a/deps/v8/src/trap-handler/handler-outside.cc b/deps/v8/src/trap-handler/handler-outside.cc
index ea1500e67e..5cb9661e7b 100644
--- a/deps/v8/src/trap-handler/handler-outside.cc
+++ b/deps/v8/src/trap-handler/handler-outside.cc
@@ -33,6 +33,12 @@
namespace {
size_t gNextCodeObject = 0;
+
+#if defined(DEBUG)
+const bool kEnableDebug = true;
+#else
+const bool kEnableDebug = false;
+#endif
}
namespace v8 {
@@ -47,6 +53,66 @@ constexpr size_t HandlerDataSize(size_t num_protected_instructions) {
num_protected_instructions * sizeof(ProtectedInstructionData);
}
+namespace {
+template <typename = std::enable_if<kEnableDebug>>
+bool IsDisjoint(const CodeProtectionInfo* a, const CodeProtectionInfo* b) {
+ if (a == nullptr || b == nullptr) {
+ return true;
+ }
+
+ const auto a_base = reinterpret_cast<uintptr_t>(a->base);
+ const auto b_base = reinterpret_cast<uintptr_t>(b->base);
+
+ return a_base >= b_base + b->size || b_base >= a_base + a->size;
+}
+
+// Verify that the code range does not overlap any that have already been
+// registered.
+void VerifyCodeRangeIsDisjoint(const CodeProtectionInfo* code_info) {
+ for (size_t i = 0; i < gNumCodeObjects; ++i) {
+ DCHECK(IsDisjoint(code_info, gCodeObjects[i].code_info));
+ }
+}
+
+void ValidateCodeObjects() {
+ // Sanity-check the code objects
+ for (unsigned i = 0; i < gNumCodeObjects; ++i) {
+ const auto* data = gCodeObjects[i].code_info;
+
+ if (data == nullptr) continue;
+
+ // Do some sanity checks on the protected instruction data
+ for (unsigned i = 0; i < data->num_protected_instructions; ++i) {
+ DCHECK_GE(data->instructions[i].instr_offset, 0);
+ DCHECK_LT(data->instructions[i].instr_offset, data->size);
+ DCHECK_GE(data->instructions[i].landing_offset, 0);
+ DCHECK_LT(data->instructions[i].landing_offset, data->size);
+ DCHECK_GT(data->instructions[i].landing_offset,
+ data->instructions[i].instr_offset);
+ }
+ }
+
+ // Check the validity of the free list.
+ size_t free_count = 0;
+ for (size_t i = gNextCodeObject; i != gNumCodeObjects;
+ i = gCodeObjects[i].next_free) {
+ DCHECK_LT(i, gNumCodeObjects);
+ ++free_count;
+ // This check will fail if we encounter a cycle.
+ DCHECK_LE(free_count, gNumCodeObjects);
+ }
+
+ // Check that all free entries are reachable via the free list.
+ size_t free_count2 = 0;
+ for (size_t i = 0; i < gNumCodeObjects; ++i) {
+ if (gCodeObjects[i].code_info == nullptr) {
+ ++free_count2;
+ }
+ }
+ DCHECK_EQ(free_count, free_count2);
+}
+} // namespace
+
CodeProtectionInfo* CreateHandlerData(
void* base, size_t size, size_t num_protected_instructions,
ProtectedInstructionData* protected_instructions) {
@@ -91,6 +157,10 @@ int RegisterHandlerData(void* base, size_t size,
MetadataLock lock;
+ if (kEnableDebug) {
+ VerifyCodeRangeIsDisjoint(data);
+ }
+
size_t i = gNextCodeObject;
// Explicitly convert std::numeric_limits<int>::max() to unsigned to avoid
@@ -111,7 +181,7 @@ int RegisterHandlerData(void* base, size_t size,
new_size = int_max;
}
if (new_size == gNumCodeObjects) {
- return -1;
+ return kInvalidIndex;
}
// Now that we know our new size is valid, we can go ahead and realloc the
@@ -125,31 +195,36 @@ int RegisterHandlerData(void* base, size_t size,
memset(gCodeObjects + gNumCodeObjects, 0,
sizeof(*gCodeObjects) * (new_size - gNumCodeObjects));
+ for (size_t j = gNumCodeObjects; j < new_size; ++j) {
+ gCodeObjects[j].next_free = j + 1;
+ }
gNumCodeObjects = new_size;
}
DCHECK(gCodeObjects[i].code_info == nullptr);
// Find out where the next entry should go.
- if (gCodeObjects[i].next_free == 0) {
- // if this is a fresh entry, use the next one.
- gNextCodeObject = i + 1;
- DCHECK(gNextCodeObject == gNumCodeObjects ||
- (gCodeObjects[gNextCodeObject].code_info == nullptr &&
- gCodeObjects[gNextCodeObject].next_free == 0));
- } else {
- gNextCodeObject = gCodeObjects[i].next_free - 1;
- }
+ gNextCodeObject = gCodeObjects[i].next_free;
if (i <= int_max) {
gCodeObjects[i].code_info = data;
+
+ if (kEnableDebug) {
+ ValidateCodeObjects();
+ }
+
return static_cast<int>(i);
} else {
- return -1;
+ return kInvalidIndex;
}
}
void ReleaseHandlerData(int index) {
+ if (index == kInvalidIndex) {
+ return;
+ }
+ DCHECK_GE(index, 0);
+
// Remove the data from the global list if it's there.
CodeProtectionInfo* data = nullptr;
{
@@ -158,12 +233,16 @@ void ReleaseHandlerData(int index) {
data = gCodeObjects[index].code_info;
gCodeObjects[index].code_info = nullptr;
- // +1 because we reserve {next_entry == 0} to indicate a fresh list entry.
- gCodeObjects[index].next_free = gNextCodeObject + 1;
+ gCodeObjects[index].next_free = gNextCodeObject;
gNextCodeObject = index;
+
+ if (kEnableDebug) {
+ ValidateCodeObjects();
+ }
}
// TODO(eholk): on debug builds, ensure there are no more copies in
// the list.
+ DCHECK_NOT_NULL(data); // make sure we're releasing legitimate handler data.
free(data);
}
@@ -186,6 +265,10 @@ bool RegisterDefaultSignalHandler() {
#endif
}
+size_t GetRecoveredTrapCount() {
+ return gRecoveredTrapCount.load(std::memory_order_relaxed);
+}
+
} // namespace trap_handler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/trap-handler/handler-shared.cc b/deps/v8/src/trap-handler/handler-shared.cc
index d1b549a170..19f8b5bf68 100644
--- a/deps/v8/src/trap-handler/handler-shared.cc
+++ b/deps/v8/src/trap-handler/handler-shared.cc
@@ -34,6 +34,7 @@ static_assert(sizeof(g_thread_in_wasm_code) > 1,
size_t gNumCodeObjects = 0;
CodeProtectionInfoListEntry* gCodeObjects = nullptr;
+std::atomic_size_t gRecoveredTrapCount = {0};
std::atomic_flag MetadataLock::spinlock_ = ATOMIC_FLAG_INIT;
diff --git a/deps/v8/src/trap-handler/trap-handler-internal.h b/deps/v8/src/trap-handler/trap-handler-internal.h
index b4efd7ff9e..7897bd0ecc 100644
--- a/deps/v8/src/trap-handler/trap-handler-internal.h
+++ b/deps/v8/src/trap-handler/trap-handler-internal.h
@@ -60,6 +60,14 @@ struct CodeProtectionInfoListEntry {
extern size_t gNumCodeObjects;
extern CodeProtectionInfoListEntry* gCodeObjects;
+extern std::atomic_size_t gRecoveredTrapCount;
+
+// Searches the fault location table for an entry matching fault_addr. If found,
+// returns true and sets landing_pad to the address of a fragment of code that
+// can recover from this fault. Otherwise, returns false and leaves offset
+// unchanged.
+bool TryFindLandingPad(uintptr_t fault_addr, uintptr_t* landing_pad);
+
} // namespace trap_handler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index ed9459918b..7189c27e29 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -30,14 +30,17 @@ namespace trap_handler {
struct ProtectedInstructionData {
// The offset of this instruction from the start of its code object.
- intptr_t instr_offset;
+ // Wasm code never grows larger than 2GB, so uint32_t is sufficient.
+ uint32_t instr_offset;
// The offset of the landing pad from the start of its code object.
//
// TODO(eholk): Using a single landing pad and store parameters here.
- intptr_t landing_offset;
+ uint32_t landing_offset;
};
+const int kInvalidIndex = -1;
+
/// Adjusts the base code pointer.
void UpdateHandlerDataCodePointer(int index, void* base);
@@ -89,6 +92,8 @@ bool RegisterDefaultSignalHandler();
bool TryHandleSignal(int signum, siginfo_t* info, ucontext_t* context);
#endif // V8_OS_LINUX
+size_t GetRecoveredTrapCount();
+
} // namespace trap_handler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/type-hints.cc b/deps/v8/src/type-hints.cc
index 69c95712c2..8e64f3f583 100644
--- a/deps/v8/src/type-hints.cc
+++ b/deps/v8/src/type-hints.cc
@@ -53,6 +53,20 @@ std::ostream& operator<<(std::ostream& os, CompareOperationHint hint) {
UNREACHABLE();
}
+std::ostream& operator<<(std::ostream& os, ForInHint hint) {
+ switch (hint) {
+ case ForInHint::kNone:
+ return os << "None";
+ case ForInHint::kEnumCacheKeys:
+ return os << "EnumCacheKeys";
+ case ForInHint::kEnumCacheKeysAndIndices:
+ return os << "EnumCacheKeysAndIndices";
+ case ForInHint::kAny:
+ return os << "Any";
+ }
+ UNREACHABLE();
+}
+
std::ostream& operator<<(std::ostream& os, ToBooleanHint hint) {
switch (hint) {
case ToBooleanHint::kNone:
diff --git a/deps/v8/src/type-hints.h b/deps/v8/src/type-hints.h
index 4baec53ccc..6e50649646 100644
--- a/deps/v8/src/type-hints.h
+++ b/deps/v8/src/type-hints.h
@@ -48,6 +48,16 @@ inline size_t hash_value(CompareOperationHint hint) {
std::ostream& operator<<(std::ostream&, CompareOperationHint);
+// Type hints for for..in statements.
+enum class ForInHint : uint8_t {
+ kNone,
+ kEnumCacheKeysAndIndices,
+ kEnumCacheKeys,
+ kAny
+};
+
+std::ostream& operator<<(std::ostream&, ForInHint);
+
// Type hints for the ToBoolean type conversion.
enum class ToBooleanHint : uint16_t {
kNone = 0u,
diff --git a/deps/v8/src/unicode-cache-inl.h b/deps/v8/src/unicode-cache-inl.h
index c5a8a69dab..7f73589666 100644
--- a/deps/v8/src/unicode-cache-inl.h
+++ b/deps/v8/src/unicode-cache-inl.h
@@ -20,15 +20,9 @@ bool UnicodeCache::IsIdentifierPart(unibrow::uchar c) {
return kIsIdentifierPart.get(c);
}
-
-bool UnicodeCache::IsLineTerminator(unibrow::uchar c) {
- return kIsLineTerminator.get(c);
-}
-
-
bool UnicodeCache::IsLineTerminatorSequence(unibrow::uchar c,
unibrow::uchar next) {
- if (!IsLineTerminator(c)) return false;
+ if (!unibrow::IsLineTerminator(c)) return false;
if (c == 0x000d && next == 0x000a) return false; // CR with following LF.
return true;
}
diff --git a/deps/v8/src/unicode-cache.h b/deps/v8/src/unicode-cache.h
index 849025e4cb..8f4badae8f 100644
--- a/deps/v8/src/unicode-cache.h
+++ b/deps/v8/src/unicode-cache.h
@@ -32,7 +32,6 @@ class UnicodeCache {
private:
unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
- unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
unibrow::Predicate<WhiteSpace, 128> kIsWhiteSpace;
unibrow::Predicate<WhiteSpaceOrLineTerminator, 128>
kIsWhiteSpaceOrLineTerminator;
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index 838ce96c75..22e5ca606e 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -197,27 +197,27 @@ static inline uint8_t NonASCIISequenceLength(byte first) {
// clang-format off
static const uint8_t lengths[256] = {
// The first 128 entries correspond to ASCII characters.
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* OO - Of */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 10 - 1f */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 20 - 2f */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 30 - 3f */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 40 - 4f */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 50 - 5f */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 60 - 6f */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 70 - 7f */
// The following 64 entries correspond to continuation bytes.
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 80 - 8f */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 90 - 9f */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a0 - af */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* b0 - bf */
// The next are two invalid overlong encodings and 30 two-byte sequences.
- 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, /* c0-c1 + c2-cf */
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, /* d0-df */
// 16 three-byte sequences.
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, /* e0-ef */
// 5 four-byte sequences, followed by sequences that could only encode
// code points outside of the unicode range.
- 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; /* f0-f4 + f5-ff */
// clang-format on
return lengths[first];
}
@@ -227,9 +227,11 @@ static inline bool IsContinuationCharacter(byte chr) {
return chr >= 0x80 && chr <= 0xBF;
}
-
-// This method decodes an UTF-8 value according to RFC 3629.
+// This method decodes an UTF-8 value according to RFC 3629 and
+// https://encoding.spec.whatwg.org/#utf-8-decoder .
uchar Utf8::CalculateValue(const byte* str, size_t max_length, size_t* cursor) {
+ DCHECK_GT(str[0], kMaxOneByteChar);
+
size_t length = NonASCIISequenceLength(str[0]);
// Check continuation characters.
@@ -238,34 +240,46 @@ uchar Utf8::CalculateValue(const byte* str, size_t max_length, size_t* cursor) {
while (count < max_count && IsContinuationCharacter(str[count])) {
count++;
}
- *cursor += count;
- // There must be enough continuation characters.
- if (count != length) return kBadChar;
+ if (length >= 3 && count < 2) {
+ // Not enough continuation bytes to check overlong sequences.
+ *cursor += 1;
+ return kBadChar;
+ }
// Check overly long sequences & other conditions.
if (length == 3) {
if (str[0] == 0xE0 && (str[1] < 0xA0 || str[1] > 0xBF)) {
- // Overlong three-byte sequence?
+ // Overlong three-byte sequence? The first byte generates a kBadChar.
+ *cursor += 1;
return kBadChar;
} else if (str[0] == 0xED && (str[1] < 0x80 || str[1] > 0x9F)) {
- // High and low surrogate halves?
+ // High and low surrogate halves? The first byte generates a kBadChar.
+ *cursor += 1;
return kBadChar;
}
} else if (length == 4) {
if (str[0] == 0xF0 && (str[1] < 0x90 || str[1] > 0xBF)) {
- // Overlong four-byte sequence.
+ // Overlong four-byte sequence. The first byte generates a kBadChar.
+ *cursor += 1;
return kBadChar;
} else if (str[0] == 0xF4 && (str[1] < 0x80 || str[1] > 0x8F)) {
- // Code points outside of the unicode range.
+ // Code points outside of the unicode range. The first byte generates a
+ // kBadChar.
+ *cursor += 1;
return kBadChar;
}
}
+ *cursor += count;
+
+ if (count != length) {
+ // Not enough continuation characters.
+ return kBadChar;
+ }
+
// All errors have been handled, so we only have to assemble the result.
switch (length) {
- case 1:
- return str[0];
case 2:
return ((str[0] << 6) + str[1]) - 0x00003080;
case 3:
@@ -278,6 +292,25 @@ uchar Utf8::CalculateValue(const byte* str, size_t max_length, size_t* cursor) {
UNREACHABLE();
}
+/*
+Overlong sequence detection: Since Blink's TextCodecUTF8 rejects multi-byte
+characters which could be expressed with less bytes, we must too.
+
+Each continuation byte (10xxxxxx) carries 6 bits of payload. The lead bytes of
+1, 2, 3 and 4-byte characters are 0xxxxxxx, 110xxxxx, 1110xxxx and 11110xxx, and
+carry 7, 5, 4, and 3 bits of payload, respectively.
+
+Thus, a two-byte character can contain 11 bits of payload, a three-byte
+character 16, and a four-byte character 21.
+
+If we encounter a two-byte character which contains 7 bits or less, a three-byte
+character which contains 11 bits or less, or a four-byte character which
+contains 16 bits or less, we reject the character and generate a kBadChar for
+each of the bytes. This is because Blink handles overlong sequences by rejecting
+the first byte of the character (returning kBadChar); thus the rest are lonely
+continuation bytes and generate a kBadChar each.
+*/
+
uchar Utf8::ValueOfIncremental(byte next, Utf8IncrementalBuffer* buffer) {
DCHECK_NOT_NULL(buffer);
@@ -289,7 +322,8 @@ uchar Utf8::ValueOfIncremental(byte next, Utf8IncrementalBuffer* buffer) {
if (*buffer == 0) {
// We're at the start of a new character.
uint32_t kind = NonASCIISequenceLength(next);
- if (kind >= 2 && kind <= 4) {
+ CHECK_LE(kind, 4);
+ if (kind >= 2) {
// Start of 2..4 byte character, and no buffer.
// The mask for the lower bits depends on the kind, and is
@@ -300,11 +334,14 @@ uchar Utf8::ValueOfIncremental(byte next, Utf8IncrementalBuffer* buffer) {
// Store the kind in the top nibble, and kind - 1 (i.e., remaining bytes)
// in 2nd nibble, and the value in the bottom three. The 2nd nibble is
// intended as a counter about how many bytes are still needed.
- *buffer = kind << 28 | (kind - 1) << 24 | (next & mask);
+ uint32_t character_info = kind << 28 | (kind - 1) << 24;
+ DCHECK_EQ(character_info & mask, 0);
+ *buffer = character_info | (next & mask);
return kIncomplete;
} else {
// No buffer, and not the start of a 1-byte char (handled at the
- // beginning), and not the start of a 2..4 byte char? Bad char.
+ // beginning), and not the start of a 2..4 byte char (or the start of an
+ // overlong / invalid sequence)? Bad char.
*buffer = 0;
return kBadChar;
}
@@ -331,6 +368,47 @@ uchar Utf8::ValueOfIncremental(byte next, Utf8IncrementalBuffer* buffer) {
// How many bytes (excluding this one) do we still expect?
uint8_t bytes_expected = *buffer >> 28;
uint8_t bytes_left = (*buffer >> 24) & 0x0f;
+
+ // Two-byte overlong sequence detection is handled by
+ // NonASCIISequenceLength, so we don't need to check anything here.
+ if (bytes_expected == 3 && bytes_left == 2) {
+ // Check that there are at least 12 bytes of payload.
+ uint8_t lead_payload = *buffer & (0x7f >> bytes_expected);
+ DCHECK_LE(lead_payload, 0xf);
+ if (lead_payload == 0 && next < 0xa0) {
+ // 0xa0 = 0b10100000 (payload: 100000). Overlong sequence: 0 bits from
+ // the first byte, at most 5 from the second byte, and at most 6 from
+ // the third -> in total at most 11.
+
+ *buffer = next;
+ return kBadChar;
+ } else if (lead_payload == 0xd && next > 0x9f) {
+ // The resulting code point would be on a range which is reserved for
+ // UTF-16 surrogate halves.
+ *buffer = next;
+ return kBadChar;
+ }
+ } else if (bytes_expected == 4 && bytes_left == 3) {
+ // Check that there are at least 17 bytes of payload.
+ uint8_t lead_payload = *buffer & (0x7f >> bytes_expected);
+
+ // If the lead byte was bigger than 0xf4 (payload: 4), it's not a start of
+ // any valid character, and this is detected by NonASCIISequenceLength.
+ DCHECK_LE(lead_payload, 0x4);
+ if (lead_payload == 0 && next < 0x90) {
+ // 0x90 = 10010000 (payload 10000). Overlong sequence: 0 bits from the
+ // first byte, at most 4 from the second byte, at most 12 from the third
+ // and fourth bytes -> in total at most 16.
+ *buffer = next;
+ return kBadChar;
+ } else if (lead_payload == 4 && next > 0x8f) {
+ // Invalid code point; value greater than 0b100001111000000000000
+ // (0x10ffff).
+ *buffer = next;
+ return kBadChar;
+ }
+ }
+
bytes_left--;
// Update the value.
uint32_t value = ((*buffer & 0xffffff) << 6) | (next & 0x3F);
@@ -338,10 +416,15 @@ uchar Utf8::ValueOfIncremental(byte next, Utf8IncrementalBuffer* buffer) {
*buffer = (bytes_expected << 28 | bytes_left << 24 | value);
return kIncomplete;
} else {
- *buffer = 0;
+#ifdef DEBUG
+ // Check that overlong sequences were already detected.
bool sequence_was_too_long = (bytes_expected == 2 && value < 0x80) ||
- (bytes_expected == 3 && value < 0x800);
- return sequence_was_too_long ? kBadChar : value;
+ (bytes_expected == 3 && value < 0x800) ||
+ (bytes_expected == 4 && value < 0x8000);
+ DCHECK(!sequence_was_too_long);
+#endif
+ *buffer = 0;
+ return value;
}
} else {
// Within a character, but not a continuation character? Then the
@@ -1163,14 +1246,6 @@ bool WhiteSpace::Is(uchar c) {
}
#endif // !V8_INTL_SUPPORT
-// LineTerminator: 'JS_Line_Terminator' in point.properties
-// ES#sec-line-terminators lists exactly 4 code points:
-// LF (U+000A), CR (U+000D), LS(U+2028), PS(U+2029)
-
-bool LineTerminator::Is(uchar c) {
- return c == 0xA || c == 0xD || c == 0x2028 || c == 0x2029;
-}
-
#ifndef V8_INTL_SUPPORT
static const MultiCharacterSpecialCase<2> kToLowercaseMultiStrings0[2] = { // NOLINT
{{105, 775}}, {{kSentinel}} }; // NOLINT
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index f360b14634..04d58f3650 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -127,8 +127,7 @@ class Utf16 {
}
};
-
-class Utf8 {
+class V8_EXPORT_PRIVATE Utf8 {
public:
static inline uchar Length(uchar chr, int previous);
static inline unsigned EncodeOneByte(char* out, uint8_t c);
@@ -194,9 +193,14 @@ struct V8_EXPORT_PRIVATE WhiteSpace {
static bool Is(uchar c);
};
#endif // !V8_INTL_SUPPORT
-struct V8_EXPORT_PRIVATE LineTerminator {
- static bool Is(uchar c);
-};
+
+// LineTerminator: 'JS_Line_Terminator' in point.properties
+// ES#sec-line-terminators lists exactly 4 code points:
+// LF (U+000A), CR (U+000D), LS(U+2028), PS(U+2029)
+V8_INLINE bool IsLineTerminator(uchar c) {
+ return c == 0x000A || c == 0x000D || c == 0x2028 || c == 0x2029;
+}
+
#ifndef V8_INTL_SUPPORT
struct ToLowercase {
static const int kMaxWidth = 3;
diff --git a/deps/v8/src/uri.cc b/deps/v8/src/uri.cc
index 14e22146e7..a6ad3ddb9d 100644
--- a/deps/v8/src/uri.cc
+++ b/deps/v8/src/uri.cc
@@ -4,11 +4,13 @@
#include "src/uri.h"
+#include <vector>
+
#include "src/char-predicates-inl.h"
#include "src/handles.h"
#include "src/isolate-inl.h"
-#include "src/list.h"
#include "src/string-search.h"
+#include "src/unicode-inl.h"
namespace v8 {
namespace internal {
@@ -43,7 +45,8 @@ bool IsReplacementCharacter(const uint8_t* octets, int length) {
return true;
}
-bool DecodeOctets(const uint8_t* octets, int length, List<uc16>* buffer) {
+bool DecodeOctets(const uint8_t* octets, int length,
+ std::vector<uc16>* buffer) {
size_t cursor = 0;
uc32 value = unibrow::Utf8::ValueOf(octets, length, &cursor);
if (value == unibrow::Utf8::kBadChar &&
@@ -52,10 +55,10 @@ bool DecodeOctets(const uint8_t* octets, int length, List<uc16>* buffer) {
}
if (value <= static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
- buffer->Add(value);
+ buffer->push_back(value);
} else {
- buffer->Add(unibrow::Utf16::LeadSurrogate(value));
- buffer->Add(unibrow::Utf16::TrailSurrogate(value));
+ buffer->push_back(unibrow::Utf16::LeadSurrogate(value));
+ buffer->push_back(unibrow::Utf16::TrailSurrogate(value));
}
return true;
}
@@ -72,23 +75,23 @@ int TwoDigitHex(uc16 character1, uc16 character2) {
template <typename T>
void AddToBuffer(uc16 decoded, String::FlatContent* uri_content, int index,
- bool is_uri, List<T>* buffer) {
+ bool is_uri, std::vector<T>* buffer) {
if (is_uri && IsReservedPredicate(decoded)) {
- buffer->Add('%');
+ buffer->push_back('%');
uc16 first = uri_content->Get(index + 1);
uc16 second = uri_content->Get(index + 2);
DCHECK_GT(std::numeric_limits<T>::max(), first);
DCHECK_GT(std::numeric_limits<T>::max(), second);
- buffer->Add(first);
- buffer->Add(second);
+ buffer->push_back(first);
+ buffer->push_back(second);
} else {
- buffer->Add(decoded);
+ buffer->push_back(decoded);
}
}
bool IntoTwoByte(int index, bool is_uri, int uri_length,
- String::FlatContent* uri_content, List<uc16>* buffer) {
+ String::FlatContent* uri_content, std::vector<uc16>* buffer) {
for (int k = index; k < uri_length; k++) {
uc16 code = uri_content->Get(k);
if (code == '%') {
@@ -126,15 +129,15 @@ bool IntoTwoByte(int index, bool is_uri, int uri_length,
AddToBuffer(decoded, uri_content, k - 2, is_uri, buffer);
}
} else {
- buffer->Add(code);
+ buffer->push_back(code);
}
}
return true;
}
bool IntoOneAndTwoByte(Handle<String> uri, bool is_uri,
- List<uint8_t>* one_byte_buffer,
- List<uc16>* two_byte_buffer) {
+ std::vector<uint8_t>* one_byte_buffer,
+ std::vector<uc16>* two_byte_buffer) {
DisallowHeapAllocation no_gc;
String::FlatContent uri_content = uri->GetFlatContent();
@@ -162,7 +165,7 @@ bool IntoOneAndTwoByte(Handle<String> uri, bool is_uri,
return IntoTwoByte(k, is_uri, uri_length, &uri_content,
two_byte_buffer);
}
- one_byte_buffer->Add(code);
+ one_byte_buffer->push_back(code);
}
}
return true;
@@ -173,28 +176,28 @@ bool IntoOneAndTwoByte(Handle<String> uri, bool is_uri,
MaybeHandle<String> Uri::Decode(Isolate* isolate, Handle<String> uri,
bool is_uri) {
uri = String::Flatten(uri);
- List<uint8_t> one_byte_buffer;
- List<uc16> two_byte_buffer;
+ std::vector<uint8_t> one_byte_buffer;
+ std::vector<uc16> two_byte_buffer;
if (!IntoOneAndTwoByte(uri, is_uri, &one_byte_buffer, &two_byte_buffer)) {
THROW_NEW_ERROR(isolate, NewURIError(), String);
}
- if (two_byte_buffer.is_empty()) {
- return isolate->factory()->NewStringFromOneByte(
- one_byte_buffer.ToConstVector());
+ if (two_byte_buffer.empty()) {
+ return isolate->factory()->NewStringFromOneByte(Vector<const uint8_t>(
+ one_byte_buffer.data(), static_cast<int>(one_byte_buffer.size())));
}
Handle<SeqTwoByteString> result;
+ int result_length =
+ static_cast<int>(one_byte_buffer.size() + two_byte_buffer.size());
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result, isolate->factory()->NewRawTwoByteString(
- one_byte_buffer.length() + two_byte_buffer.length()),
+ isolate, result, isolate->factory()->NewRawTwoByteString(result_length),
String);
- CopyChars(result->GetChars(), one_byte_buffer.ToConstVector().start(),
- one_byte_buffer.length());
- CopyChars(result->GetChars() + one_byte_buffer.length(),
- two_byte_buffer.ToConstVector().start(), two_byte_buffer.length());
+ CopyChars(result->GetChars(), one_byte_buffer.data(), one_byte_buffer.size());
+ CopyChars(result->GetChars() + one_byte_buffer.size(), two_byte_buffer.data(),
+ two_byte_buffer.size());
return result;
}
@@ -240,13 +243,13 @@ bool IsUriSeparator(uc16 c) {
}
}
-void AddEncodedOctetToBuffer(uint8_t octet, List<uint8_t>* buffer) {
- buffer->Add('%');
- buffer->Add(HexCharOfValue(octet >> 4));
- buffer->Add(HexCharOfValue(octet & 0x0F));
+void AddEncodedOctetToBuffer(uint8_t octet, std::vector<uint8_t>* buffer) {
+ buffer->push_back('%');
+ buffer->push_back(HexCharOfValue(octet >> 4));
+ buffer->push_back(HexCharOfValue(octet & 0x0F));
}
-void EncodeSingle(uc16 c, List<uint8_t>* buffer) {
+void EncodeSingle(uc16 c, std::vector<uint8_t>* buffer) {
char s[4] = {};
int number_of_bytes;
number_of_bytes =
@@ -256,7 +259,7 @@ void EncodeSingle(uc16 c, List<uint8_t>* buffer) {
}
}
-void EncodePair(uc16 cc1, uc16 cc2, List<uint8_t>* buffer) {
+void EncodePair(uc16 cc1, uc16 cc2, std::vector<uint8_t>* buffer) {
char s[4] = {};
int number_of_bytes =
unibrow::Utf8::Encode(s, unibrow::Utf16::CombineSurrogatePair(cc1, cc2),
@@ -272,7 +275,8 @@ MaybeHandle<String> Uri::Encode(Isolate* isolate, Handle<String> uri,
bool is_uri) {
uri = String::Flatten(uri);
int uri_length = uri->length();
- List<uint8_t> buffer(uri_length);
+ std::vector<uint8_t> buffer;
+ buffer.reserve(uri_length);
{
DisallowHeapAllocation no_gc;
@@ -292,7 +296,7 @@ MaybeHandle<String> Uri::Encode(Isolate* isolate, Handle<String> uri,
} else if (!unibrow::Utf16::IsTrailSurrogate(cc1)) {
if (IsUnescapePredicateInUriComponent(cc1) ||
(is_uri && IsUriSeparator(cc1))) {
- buffer.Add(cc1);
+ buffer.push_back(cc1);
} else {
EncodeSingle(cc1, &buffer);
}
@@ -304,7 +308,8 @@ MaybeHandle<String> Uri::Encode(Isolate* isolate, Handle<String> uri,
}
}
- return isolate->factory()->NewStringFromOneByte(buffer.ToConstVector());
+ return isolate->factory()->NewStringFromOneByte(
+ Vector<const uint8_t>(buffer.data(), static_cast<int>(buffer.size())));
}
namespace { // Anonymous namespace for Escape and Unescape
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 292ff535cb..db20fe0b99 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -196,6 +196,13 @@ typename std::make_unsigned<T>::type Abs(T a) {
return (x ^ y) - y;
}
+// Returns the negative absolute value of its argument.
+template <typename T,
+ typename = typename std::enable_if<std::is_signed<T>::value>::type>
+T Nabs(T a) {
+ return a < 0 ? a : -a;
+}
+
// Floor(-0.0) == 0.0
inline double Floor(double x) {
#if V8_CC_MSVC
@@ -233,6 +240,47 @@ inline double Pow(double x, double y) {
return std::pow(x, y);
}
+template <typename T>
+T SaturateAdd(T a, T b) {
+ if (std::is_signed<T>::value) {
+ if (a > 0 && b > 0) {
+ if (a > std::numeric_limits<T>::max() - b) {
+ return std::numeric_limits<T>::max();
+ }
+ } else if (a < 0 && b < 0) {
+ if (a < std::numeric_limits<T>::min() - b) {
+ return std::numeric_limits<T>::min();
+ }
+ }
+ } else {
+ CHECK(std::is_unsigned<T>::value);
+ if (a > std::numeric_limits<T>::max() - b) {
+ return std::numeric_limits<T>::max();
+ }
+ }
+ return a + b;
+}
+
+template <typename T>
+T SaturateSub(T a, T b) {
+ if (std::is_signed<T>::value) {
+ if (a > 0 && b < 0) {
+ if (a > std::numeric_limits<T>::max() + b) {
+ return std::numeric_limits<T>::max();
+ }
+ } else if (a < 0 && b > 0) {
+ if (a < std::numeric_limits<T>::min() + b) {
+ return std::numeric_limits<T>::min();
+ }
+ }
+ } else {
+ CHECK(std::is_unsigned<T>::value);
+ if (a < b) {
+ return static_cast<T>(0);
+ }
+ }
+ return a - b;
+}
// ----------------------------------------------------------------------------
// BitField is a help template for encoding and decode bitfield with
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index e8ced8d082..4e1c96b187 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -66,7 +66,7 @@ void V8::InitializeOncePerProcessImpl() {
FLAG_max_semi_space_size = 1;
}
- base::OS::Initialize(FLAG_random_seed, FLAG_hard_abort, FLAG_gc_fake_mmap);
+ base::OS::Initialize(FLAG_hard_abort, FLAG_gc_fake_mmap);
Isolate::InitializeOncePerProcess();
diff --git a/deps/v8/src/v8.gyp b/deps/v8/src/v8.gyp
index bf7635ee33..89eb271f61 100644
--- a/deps/v8/src/v8.gyp
+++ b/deps/v8/src/v8.gyp
@@ -99,7 +99,7 @@
# The dependency on v8_base should come from a transitive
# dependency however the Android toolchain requires libv8_base.a
# to appear before libv8_snapshot.a so it's listed explicitly.
- 'dependencies': ['v8_base', 'v8_builtins_setup', 'v8_nosnapshot'],
+ 'dependencies': ['v8_base', 'v8_init', 'v8_nosnapshot'],
}],
['v8_use_snapshot=="true" and v8_use_external_startup_data==0', {
# The dependency on v8_base should come from a transitive
@@ -133,10 +133,10 @@
]
},
{
- 'target_name': 'v8_builtins_setup',
+ 'target_name': 'v8_init',
'type': 'static_library',
'dependencies': [
- 'v8_builtins_generators',
+ 'v8_initializers',
],
'variables': {
'optimize': 'max',
@@ -157,7 +157,7 @@
],
},
{
- 'target_name': 'v8_builtins_generators',
+ 'target_name': 'v8_initializers',
'type': 'static_library',
'dependencies': [
'v8_base',
@@ -189,8 +189,6 @@
'builtins/builtins-conversion-gen.cc',
'builtins/builtins-date-gen.cc',
'builtins/builtins-debug-gen.cc',
- 'builtins/builtins-forin-gen.cc',
- 'builtins/builtins-forin-gen.h',
'builtins/builtins-function-gen.cc',
'builtins/builtins-generator-gen.cc',
'builtins/builtins-global-gen.cc',
@@ -208,8 +206,6 @@
'builtins/builtins-promise-gen.h',
'builtins/builtins-proxy-gen.cc',
'builtins/builtins-proxy-gen.h',
- 'builtins/builtins-proxy-helpers-gen.cc',
- 'builtins/builtins-proxy-helpers-gen.h',
'builtins/builtins-regexp-gen.cc',
'builtins/builtins-regexp-gen.h',
'builtins/builtins-sharedarraybuffer-gen.cc',
@@ -220,6 +216,7 @@
'builtins/builtins-utils-gen.h',
'builtins/builtins-wasm-gen.cc',
'builtins/setup-builtins-internal.cc',
+ 'heap/setup-heap-internal.cc',
'ic/accessor-assembler.cc',
'ic/accessor-assembler.h',
'ic/binary-op-assembler.cc',
@@ -625,6 +622,7 @@
'builtins/builtins-api.cc',
'builtins/builtins-arraybuffer.cc',
'builtins/builtins-array.cc',
+ 'builtins/builtins-bigint.cc',
'builtins/builtins-boolean.cc',
'builtins/builtins-call.cc',
'builtins/builtins-callsite.cc',
@@ -805,10 +803,6 @@
'compiler/memory-optimizer.h',
'compiler/move-optimizer.cc',
'compiler/move-optimizer.h',
- 'compiler/new-escape-analysis.cc',
- 'compiler/new-escape-analysis.h',
- 'compiler/new-escape-analysis-reducer.cc',
- 'compiler/new-escape-analysis-reducer.h',
'compiler/node-aux-data.h',
'compiler/node-cache.cc',
'compiler/node-cache.h',
@@ -924,6 +918,8 @@
'debug/debug-scopes.h',
'debug/debug-stack-trace-iterator.cc',
'debug/debug-stack-trace-iterator.h',
+ 'debug/debug-type-profile.cc',
+ 'debug/debug-type-profile.h',
'debug/debug.cc',
'debug/debug.h',
'debug/interface-types.h',
@@ -933,6 +929,7 @@
'deoptimize-reason.h',
'deoptimizer.cc',
'deoptimizer.h',
+ 'detachable-vector.h',
'disasm.h',
'disassembler.cc',
'disassembler.h',
@@ -963,6 +960,7 @@
'extensions/trigger-failure-extension.h',
'external-reference-table.cc',
'external-reference-table.h',
+ 'factory-inl.h',
'factory.cc',
'factory.h',
'fast-dtoa.cc',
@@ -970,13 +968,10 @@
'feedback-vector-inl.h',
'feedback-vector.cc',
'feedback-vector.h',
- 'ffi/ffi-compiler.cc',
- 'ffi/ffi-compiler.h',
'field-index.h',
'field-index-inl.h',
'field-type.cc',
'field-type.h',
- 'find-and-replace-pattern.h',
'fixed-dtoa.cc',
'fixed-dtoa.h',
'flag-definitions.h',
@@ -1000,6 +995,7 @@
'heap/array-buffer-tracker-inl.h',
'heap/array-buffer-tracker.cc',
'heap/array-buffer-tracker.h',
+ 'heap/barrier.h',
'heap/code-stats.cc',
'heap/code-stats.h',
'heap/concurrent-marking.cc',
@@ -1041,8 +1037,6 @@
'heap/scavenger-inl.h',
'heap/scavenger.cc',
'heap/scavenger.h',
- 'heap/sequential-marking-deque.cc',
- 'heap/sequential-marking-deque.h',
'heap/slot-set.h',
'heap/spaces-inl.h',
'heap/spaces.cc',
@@ -1131,8 +1125,6 @@
'layout-descriptor-inl.h',
'layout-descriptor.cc',
'layout-descriptor.h',
- 'list-inl.h',
- 'list.h',
'locked-queue-inl.h',
'locked-queue.h',
'log-inl.h',
@@ -1164,8 +1156,9 @@
'objects.h',
'objects/arguments-inl.h',
'objects/arguments.h',
- 'objects/code-cache.h',
- 'objects/code-cache-inl.h',
+ 'objects/bigint-inl.h',
+ 'objects/bigint.cc',
+ 'objects/bigint.h',
'objects/compilation-cache.h',
'objects/compilation-cache-inl.h',
'objects/debug-objects-inl.h',
@@ -1190,6 +1183,8 @@
'objects/module.h',
'objects/object-macros.h',
'objects/object-macros-undef.h',
+ 'objects/property-descriptor-object.h',
+ 'objects/property-descriptor-object-inl.h',
'objects/regexp-match-info.h',
'objects/scope-info.cc',
'objects/scope-info.h',
@@ -1200,6 +1195,8 @@
'objects/string-inl.h',
'objects/string.h',
'objects/string-table.h',
+ 'objects/template-objects.cc',
+ 'objects/template-objects.h',
'ostreams.cc',
'ostreams.h',
'parsing/duplicate-finder.h',
@@ -1296,6 +1293,7 @@
'runtime-profiler.h',
'runtime/runtime-array.cc',
'runtime/runtime-atomics.cc',
+ 'runtime/runtime-bigint.cc',
'runtime/runtime-classes.cc',
'runtime/runtime-collections.cc',
'runtime/runtime-compiler.cc',
@@ -1333,9 +1331,14 @@
'setup-isolate.h',
'signature.h',
'simulator.h',
- 'small-pointer-list.h',
+ 'snapshot/builtin-deserializer.cc',
+ 'snapshot/builtin-deserializer.h',
+ 'snapshot/builtin-serializer.cc',
+ 'snapshot/builtin-serializer.h',
'snapshot/code-serializer.cc',
'snapshot/code-serializer.h',
+ 'snapshot/default-serializer-allocator.cc',
+ 'snapshot/default-serializer-allocator.h',
'snapshot/deserializer.cc',
'snapshot/deserializer.h',
'snapshot/natives-common.cc',
@@ -1429,6 +1432,8 @@
'wasm/leb-helper.h',
'wasm/local-decl-encoder.cc',
'wasm/local-decl-encoder.h',
+ 'wasm/memory-tracing.cc',
+ 'wasm/memory-tracing.h',
'wasm/module-compiler.cc',
'wasm/module-compiler.h',
'wasm/module-decoder.cc',
@@ -1444,15 +1449,20 @@
'wasm/wasm-debug.cc',
'wasm/wasm-external-refs.cc',
'wasm/wasm-external-refs.h',
+ 'wasm/wasm-heap.cc',
+ 'wasm/wasm-heap.h',
'wasm/wasm-js.cc',
'wasm/wasm-js.h',
'wasm/wasm-limits.h',
+ 'wasm/wasm-memory.cc',
+ 'wasm/wasm-memory.h',
'wasm/wasm-module.cc',
'wasm/wasm-module.h',
'wasm/wasm-module-builder.cc',
'wasm/wasm-module-builder.h',
'wasm/wasm-interpreter.cc',
'wasm/wasm-interpreter.h',
+ 'wasm/wasm-objects-inl.h',
'wasm/wasm-objects.cc',
'wasm/wasm-objects.h',
'wasm/wasm-opcodes.cc',
@@ -1474,6 +1484,7 @@
'zone/zone-allocator.h',
'zone/zone-containers.h',
'zone/zone-handle-set.h',
+ 'zone/zone-list-inl.h',
],
'conditions': [
['want_separate_host_toolset==1', {
@@ -1536,6 +1547,7 @@
'arm64/disasm-arm64.h',
'arm64/frame-constants-arm64.cc',
'arm64/frame-constants-arm64.h',
+ 'arm64/instructions-arm64-constants.cc',
'arm64/instructions-arm64.cc',
'arm64/instructions-arm64.h',
'arm64/instrument-arm64.cc',
@@ -1891,6 +1903,7 @@
'base/sys-info.h',
'base/template-utils.h',
'base/timezone-cache.h',
+ 'base/tsan.h',
'base/utils/random-number-generator.cc',
'base/utils/random-number-generator.h',
],
@@ -2308,14 +2321,12 @@
'js/macros.py',
'messages.h',
'js/prologue.js',
- 'js/max-min.js',
'js/v8natives.js',
'js/array.js',
'js/string.js',
'js/typedarray.js',
'js/weak-collection.js',
'js/messages.js',
- 'js/templates.js',
'js/spread.js',
'js/proxy.js',
'debug/mirrors.js',
@@ -2473,7 +2484,7 @@
'type': 'executable',
'dependencies': [
'v8_base',
- 'v8_builtins_setup',
+ 'v8_init',
'v8_libbase',
'v8_libplatform',
'v8_nosnapshot',
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 364e2ea3e8..48efbc7c31 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -7,6 +7,7 @@
#include <type_traits>
#include "include/v8-value-serializer-version.h"
+#include "src/api.h"
#include "src/base/logging.h"
#include "src/conversions.h"
#include "src/factory.h"
@@ -17,8 +18,8 @@
#include "src/objects.h"
#include "src/snapshot/code-serializer.h"
#include "src/transitions.h"
-#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
namespace v8 {
@@ -1754,7 +1755,7 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
bool transitioning = true;
Handle<Map> map(object->map(), isolate_);
DCHECK(!map->is_dictionary_map());
- DCHECK(map->instance_descriptors()->IsEmpty());
+ DCHECK_EQ(0, map->instance_descriptors()->number_of_descriptors());
std::vector<Handle<Object>> properties;
properties.reserve(8);
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index b050fc9ab5..12caf9835e 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -28,19 +28,12 @@ const char* Version::version_string_ = V8_VERSION_STRING;
// Calculate the V8 version string.
void Version::GetString(Vector<char> str) {
const char* candidate = IsCandidate() ? " (candidate)" : "";
-#ifdef USE_SIMULATOR
- const char* is_simulator = " SIMULATOR";
-#else
- const char* is_simulator = "";
-#endif // USE_SIMULATOR
if (GetPatch() > 0) {
- SNPrintF(str, "%d.%d.%d.%d%s%s%s",
- GetMajor(), GetMinor(), GetBuild(), GetPatch(), GetEmbedder(),
- candidate, is_simulator);
+ SNPrintF(str, "%d.%d.%d.%d%s%s", GetMajor(), GetMinor(), GetBuild(),
+ GetPatch(), GetEmbedder(), candidate);
} else {
- SNPrintF(str, "%d.%d.%d%s%s%s",
- GetMajor(), GetMinor(), GetBuild(), GetEmbedder(), candidate,
- is_simulator);
+ SNPrintF(str, "%d.%d.%d%s%s", GetMajor(), GetMinor(), GetBuild(),
+ GetEmbedder(), candidate);
}
}
diff --git a/deps/v8/src/wasm/compilation-manager.cc b/deps/v8/src/wasm/compilation-manager.cc
index 01e0755e14..a19a228f1f 100644
--- a/deps/v8/src/wasm/compilation-manager.cc
+++ b/deps/v8/src/wasm/compilation-manager.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/wasm/compilation-manager.h"
+#include "src/base/template-utils.h"
#include "src/objects-inl.h"
@@ -10,19 +11,37 @@ namespace v8 {
namespace internal {
namespace wasm {
-void CompilationManager::StartAsyncCompileJob(
+AsyncCompileJob* CompilationManager::CreateAsyncCompileJob(
Isolate* isolate, std::unique_ptr<byte[]> bytes_copy, size_t length,
Handle<Context> context, Handle<JSPromise> promise) {
std::shared_ptr<AsyncCompileJob> job(new AsyncCompileJob(
isolate, std::move(bytes_copy), length, context, promise));
jobs_.insert({job.get(), job});
+ return job.get();
+}
+
+void CompilationManager::StartAsyncCompileJob(
+ Isolate* isolate, std::unique_ptr<byte[]> bytes_copy, size_t length,
+ Handle<Context> context, Handle<JSPromise> promise) {
+ AsyncCompileJob* job = CreateAsyncCompileJob(isolate, std::move(bytes_copy),
+ length, context, promise);
job->Start();
}
-void CompilationManager::RemoveJob(AsyncCompileJob* job) {
- size_t num_removed = jobs_.erase(job);
- USE(num_removed);
- DCHECK_EQ(1, num_removed);
+std::shared_ptr<StreamingDecoder> CompilationManager::StartStreamingCompilation(
+ Isolate* isolate, Handle<Context> context, Handle<JSPromise> promise) {
+ AsyncCompileJob* job = CreateAsyncCompileJob(
+ isolate, std::unique_ptr<byte[]>(nullptr), 0, context, promise);
+ return job->CreateStreamingDecoder();
+}
+
+std::shared_ptr<AsyncCompileJob> CompilationManager::RemoveJob(
+ AsyncCompileJob* job) {
+ auto item = jobs_.find(job);
+ DCHECK(item != jobs_.end());
+ std::shared_ptr<AsyncCompileJob> result = std::move(item->second);
+ jobs_.erase(item);
+ return result;
}
void CompilationManager::TearDown() { jobs_.clear(); }
diff --git a/deps/v8/src/wasm/compilation-manager.h b/deps/v8/src/wasm/compilation-manager.h
index 85b6fd5ce2..e359b11c26 100644
--- a/deps/v8/src/wasm/compilation-manager.h
+++ b/deps/v8/src/wasm/compilation-manager.h
@@ -26,12 +26,20 @@ class CompilationManager {
std::unique_ptr<byte[]> bytes_copy, size_t length,
Handle<Context> context, Handle<JSPromise> promise);
- // Removes {job} from the list of active compile jobs. This will delete {job}.
- void RemoveJob(AsyncCompileJob* job);
+ std::shared_ptr<StreamingDecoder> StartStreamingCompilation(
+ Isolate* isolate, Handle<Context> context, Handle<JSPromise> promise);
+
+ // Removes {job} from the list of active compile jobs.
+ std::shared_ptr<AsyncCompileJob> RemoveJob(AsyncCompileJob* job);
void TearDown();
private:
+ AsyncCompileJob* CreateAsyncCompileJob(Isolate* isolate,
+ std::unique_ptr<byte[]> bytes_copy,
+ size_t length, Handle<Context> context,
+ Handle<JSPromise> promise);
+
// We use an AsyncCompileJob as the key for itself so that we can delete the
// job from the map when it is finished.
std::unordered_map<AsyncCompileJob*, std::shared_ptr<AsyncCompileJob>> jobs_;
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 9be5b1aedc..87373100f5 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -33,6 +33,12 @@ namespace wasm {
#define TRACE_IF(...)
#endif
+// A {DecodeResult} only stores the failure / success status, but no data. Thus
+// we use {nullptr_t} as data value, such that the only valid data stored in
+// this type is a nullptr.
+// Storing {void} would require template specialization.
+using DecodeResult = Result<std::nullptr_t>;
+
// A helper utility to decode bytes, integers, fields, varints, etc, from
// a buffer of bytes.
class Decoder {
@@ -355,6 +361,27 @@ class Decoder {
}
};
+// Reference to a string in the wire bytes.
+class WireBytesRef {
+ public:
+ WireBytesRef() : WireBytesRef(0, 0) {}
+ WireBytesRef(uint32_t offset, uint32_t length)
+ : offset_(offset), length_(length) {
+ DCHECK_IMPLIES(offset_ == 0, length_ == 0);
+ DCHECK_LE(offset_, offset_ + length_); // no uint32_t overflow.
+ }
+
+ uint32_t offset() const { return offset_; }
+ uint32_t length() const { return length_; }
+ uint32_t end_offset() const { return offset_ + length_; }
+ bool is_empty() const { return length_ == 0; }
+ bool is_set() const { return offset_ != 0; }
+
+ private:
+ uint32_t offset_;
+ uint32_t length_;
+};
+
#undef TRACE
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 1a7278c78e..de17401752 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -31,36 +31,16 @@ struct WasmException;
#define TRACE(...)
#endif
-// Return the evaluation of `condition` if validate==true, DCHECK
-// and always return true otherwise.
+// Return the evaluation of `condition` if validate==true, DCHECK that it's
+// true and always return true otherwise.
#define VALIDATE(condition) \
(validate ? (condition) : [&] { \
DCHECK(condition); \
return true; \
}())
-// Return the evaluation of `condition` if validate==true, DCHECK that it's
-// false and always return false otherwise.
-#define CHECK_ERROR(condition) \
- (validate ? (condition) : [&] { \
- DCHECK(!(condition)); \
- return false; \
- }())
-
-// Use this macro to check a condition if checked == true, and DCHECK the
-// condition otherwise.
-// TODO(clemensh): Rename all "checked" to "validate" and replace
-// "CHECKED_COND" with "CHECK_ERROR".
-#define CHECKED_COND(cond) \
- (checked ? (cond) : ([&] { \
- DCHECK(cond); \
- return true; \
- })())
-
#define CHECK_PROTOTYPE_OPCODE(flag) \
- if (this->module_ != nullptr && this->module_->is_asm_js()) { \
- this->error("Opcode not supported for asmjs modules"); \
- } \
+ DCHECK(!this->module_ || !this->module_->is_asm_js()); \
if (!FLAG_experimental_wasm_##flag) { \
this->error("Invalid opcode (enable with --experimental-wasm-" #flag ")"); \
break; \
@@ -70,75 +50,106 @@ struct WasmException;
(this->errorf(this->pc_, "%s: %s", WasmOpcodes::OpcodeName(opcode), \
(message)))
+#define ATOMIC_OP_LIST(V) \
+ V(I32AtomicLoad, Uint32) \
+ V(I32AtomicAdd, Uint32) \
+ V(I32AtomicSub, Uint32) \
+ V(I32AtomicAnd, Uint32) \
+ V(I32AtomicOr, Uint32) \
+ V(I32AtomicXor, Uint32) \
+ V(I32AtomicExchange, Uint32) \
+ V(I32AtomicLoad8U, Uint8) \
+ V(I32AtomicAdd8U, Uint8) \
+ V(I32AtomicSub8U, Uint8) \
+ V(I32AtomicAnd8U, Uint8) \
+ V(I32AtomicOr8U, Uint8) \
+ V(I32AtomicXor8U, Uint8) \
+ V(I32AtomicExchange8U, Uint8) \
+ V(I32AtomicLoad16U, Uint16) \
+ V(I32AtomicAdd16U, Uint16) \
+ V(I32AtomicSub16U, Uint16) \
+ V(I32AtomicAnd16U, Uint16) \
+ V(I32AtomicOr16U, Uint16) \
+ V(I32AtomicXor16U, Uint16) \
+ V(I32AtomicExchange16U, Uint16) \
+ V(I32AtomicCompareExchange, Uint32) \
+ V(I32AtomicCompareExchange8U, Uint8) \
+ V(I32AtomicCompareExchange16U, Uint16)
+
+#define ATOMIC_STORE_OP_LIST(V) \
+ V(I32AtomicStore, Uint32) \
+ V(I32AtomicStore8U, Uint8) \
+ V(I32AtomicStore16U, Uint16)
+
template <typename T>
Vector<T> vec2vec(std::vector<T>& vec) {
return Vector<T>(vec.data(), vec.size());
}
// Helpers for decoding different kinds of operands which follow bytecodes.
-template <bool checked>
+template <bool validate>
struct LocalIndexOperand {
uint32_t index;
ValueType type = kWasmStmt;
unsigned length;
inline LocalIndexOperand(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<checked>(pc + 1, &length, "local index");
+ index = decoder->read_u32v<validate>(pc + 1, &length, "local index");
}
};
-template <bool checked>
+template <bool validate>
struct ExceptionIndexOperand {
uint32_t index;
const WasmException* exception = nullptr;
unsigned length;
inline ExceptionIndexOperand(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<checked>(pc + 1, &length, "exception index");
+ index = decoder->read_u32v<validate>(pc + 1, &length, "exception index");
}
};
-template <bool checked>
+template <bool validate>
struct ImmI32Operand {
int32_t value;
unsigned length;
inline ImmI32Operand(Decoder* decoder, const byte* pc) {
- value = decoder->read_i32v<checked>(pc + 1, &length, "immi32");
+ value = decoder->read_i32v<validate>(pc + 1, &length, "immi32");
}
};
-template <bool checked>
+template <bool validate>
struct ImmI64Operand {
int64_t value;
unsigned length;
inline ImmI64Operand(Decoder* decoder, const byte* pc) {
- value = decoder->read_i64v<checked>(pc + 1, &length, "immi64");
+ value = decoder->read_i64v<validate>(pc + 1, &length, "immi64");
}
};
-template <bool checked>
+template <bool validate>
struct ImmF32Operand {
float value;
unsigned length = 4;
inline ImmF32Operand(Decoder* decoder, const byte* pc) {
// Avoid bit_cast because it might not preserve the signalling bit of a NaN.
- uint32_t tmp = decoder->read_u32<checked>(pc + 1, "immf32");
+ uint32_t tmp = decoder->read_u32<validate>(pc + 1, "immf32");
memcpy(&value, &tmp, sizeof(value));
}
};
-template <bool checked>
+template <bool validate>
struct ImmF64Operand {
double value;
unsigned length = 8;
inline ImmF64Operand(Decoder* decoder, const byte* pc) {
// Avoid bit_cast because it might not preserve the signalling bit of a NaN.
- uint64_t tmp = decoder->read_u64<checked>(pc + 1, "immf64");
+ uint64_t tmp = decoder->read_u64<validate>(pc + 1, "immf64");
memcpy(&value, &tmp, sizeof(value));
}
};
-template <bool checked>
+template <bool validate>
struct GlobalIndexOperand {
uint32_t index;
ValueType type = kWasmStmt;
@@ -146,35 +157,36 @@ struct GlobalIndexOperand {
unsigned length;
inline GlobalIndexOperand(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<checked>(pc + 1, &length, "global index");
+ index = decoder->read_u32v<validate>(pc + 1, &length, "global index");
}
};
-template <bool checked>
+template <bool validate>
struct BlockTypeOperand {
uint32_t arity = 0;
const byte* types = nullptr; // pointer to encoded types for the block.
unsigned length = 1;
inline BlockTypeOperand(Decoder* decoder, const byte* pc) {
- uint8_t val = decoder->read_u8<checked>(pc + 1, "block type");
+ uint8_t val = decoder->read_u8<validate>(pc + 1, "block type");
ValueType type = kWasmStmt;
if (decode_local_type(val, &type)) {
arity = type == kWasmStmt ? 0 : 1;
types = pc + 1;
} else {
// Handle multi-value blocks.
- if (!CHECKED_COND(FLAG_experimental_wasm_mv)) {
+ if (!VALIDATE(FLAG_experimental_wasm_mv)) {
decoder->error(pc + 1, "invalid block arity > 1");
return;
}
- if (!CHECKED_COND(val == kMultivalBlock)) {
+ if (!VALIDATE(val == kMultivalBlock)) {
decoder->error(pc + 1, "invalid block type");
return;
}
// Decode and check the types vector of the block.
unsigned len = 0;
- uint32_t count = decoder->read_u32v<checked>(pc + 2, &len, "block arity");
+ uint32_t count =
+ decoder->read_u32v<validate>(pc + 2, &len, "block arity");
// {count} is encoded as {arity-2}, so that a {0} count here corresponds
// to a block with 2 values. This makes invalid/redundant encodings
// impossible.
@@ -184,9 +196,9 @@ struct BlockTypeOperand {
for (uint32_t i = 0; i < arity; i++) {
uint32_t offset = 1 + 1 + len + i;
- val = decoder->read_u8<checked>(pc + offset, "block type");
+ val = decoder->read_u8<validate>(pc + offset, "block type");
decode_local_type(val, &type);
- if (!CHECKED_COND(type != kWasmStmt)) {
+ if (!VALIDATE(type != kWasmStmt)) {
decoder->error(pc + offset, "invalid block type");
return;
}
@@ -232,16 +244,16 @@ struct BlockTypeOperand {
}
};
-template <bool checked>
+template <bool validate>
struct BreakDepthOperand {
uint32_t depth;
unsigned length;
inline BreakDepthOperand(Decoder* decoder, const byte* pc) {
- depth = decoder->read_u32v<checked>(pc + 1, &length, "break depth");
+ depth = decoder->read_u32v<validate>(pc + 1, &length, "break depth");
}
};
-template <bool checked>
+template <bool validate>
struct CallIndirectOperand {
uint32_t table_index;
uint32_t index;
@@ -249,9 +261,9 @@ struct CallIndirectOperand {
unsigned length;
inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
unsigned len = 0;
- index = decoder->read_u32v<checked>(pc + 1, &len, "signature index");
- table_index = decoder->read_u8<checked>(pc + 1 + len, "table index");
- if (!CHECKED_COND(table_index == 0)) {
+ index = decoder->read_u32v<validate>(pc + 1, &len, "signature index");
+ table_index = decoder->read_u8<validate>(pc + 1 + len, "table index");
+ if (!VALIDATE(table_index == 0)) {
decoder->errorf(pc + 1 + len, "expected table index 0, found %u",
table_index);
}
@@ -259,44 +271,44 @@ struct CallIndirectOperand {
}
};
-template <bool checked>
+template <bool validate>
struct CallFunctionOperand {
uint32_t index;
FunctionSig* sig = nullptr;
unsigned length;
inline CallFunctionOperand(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<checked>(pc + 1, &length, "function index");
+ index = decoder->read_u32v<validate>(pc + 1, &length, "function index");
}
};
-template <bool checked>
+template <bool validate>
struct MemoryIndexOperand {
uint32_t index;
unsigned length = 1;
inline MemoryIndexOperand(Decoder* decoder, const byte* pc) {
- index = decoder->read_u8<checked>(pc + 1, "memory index");
- if (!CHECKED_COND(index == 0)) {
+ index = decoder->read_u8<validate>(pc + 1, "memory index");
+ if (!VALIDATE(index == 0)) {
decoder->errorf(pc + 1, "expected memory index 0, found %u", index);
}
}
};
-template <bool checked>
+template <bool validate>
struct BranchTableOperand {
uint32_t table_count;
const byte* start;
const byte* table;
inline BranchTableOperand(Decoder* decoder, const byte* pc) {
- DCHECK_EQ(kExprBrTable, decoder->read_u8<checked>(pc, "opcode"));
+ DCHECK_EQ(kExprBrTable, decoder->read_u8<validate>(pc, "opcode"));
start = pc + 1;
unsigned len = 0;
- table_count = decoder->read_u32v<checked>(pc + 1, &len, "table count");
+ table_count = decoder->read_u32v<validate>(pc + 1, &len, "table count");
table = pc + 1 + len;
}
};
// A helper to iterate over a branch table.
-template <bool checked>
+template <bool validate>
class BranchTableIterator {
public:
unsigned cur_index() { return index_; }
@@ -306,7 +318,7 @@ class BranchTableIterator {
index_++;
unsigned length;
uint32_t result =
- decoder_->read_u32v<checked>(pc_, &length, "branch table entry");
+ decoder_->read_u32v<validate>(pc_, &length, "branch table entry");
pc_ += length;
return result;
}
@@ -319,7 +331,7 @@ class BranchTableIterator {
const byte* pc() { return pc_; }
BranchTableIterator(Decoder* decoder,
- const BranchTableOperand<checked>& operand)
+ const BranchTableOperand<validate>& operand)
: decoder_(decoder),
start_(operand.start),
pc_(operand.table),
@@ -334,7 +346,7 @@ class BranchTableIterator {
uint32_t table_count_; // the count of entries, not including default.
};
-template <bool checked>
+template <bool validate>
struct MemoryAccessOperand {
uint32_t alignment;
uint32_t offset;
@@ -343,80 +355,74 @@ struct MemoryAccessOperand {
uint32_t max_alignment) {
unsigned alignment_length;
alignment =
- decoder->read_u32v<checked>(pc + 1, &alignment_length, "alignment");
- if (!CHECKED_COND(alignment <= max_alignment)) {
+ decoder->read_u32v<validate>(pc + 1, &alignment_length, "alignment");
+ if (!VALIDATE(alignment <= max_alignment)) {
decoder->errorf(pc + 1,
"invalid alignment; expected maximum alignment is %u, "
"actual alignment is %u",
max_alignment, alignment);
}
unsigned offset_length;
- offset = decoder->read_u32v<checked>(pc + 1 + alignment_length,
- &offset_length, "offset");
+ offset = decoder->read_u32v<validate>(pc + 1 + alignment_length,
+ &offset_length, "offset");
length = alignment_length + offset_length;
}
};
// Operand for SIMD lane operations.
-template <bool checked>
+template <bool validate>
struct SimdLaneOperand {
uint8_t lane;
unsigned length = 1;
inline SimdLaneOperand(Decoder* decoder, const byte* pc) {
- lane = decoder->read_u8<checked>(pc + 2, "lane");
+ lane = decoder->read_u8<validate>(pc + 2, "lane");
}
};
// Operand for SIMD shift operations.
-template <bool checked>
+template <bool validate>
struct SimdShiftOperand {
uint8_t shift;
unsigned length = 1;
inline SimdShiftOperand(Decoder* decoder, const byte* pc) {
- shift = decoder->read_u8<checked>(pc + 2, "shift");
+ shift = decoder->read_u8<validate>(pc + 2, "shift");
}
};
// Operand for SIMD S8x16 shuffle operations.
-template <bool checked>
+template <bool validate>
struct Simd8x16ShuffleOperand {
uint8_t shuffle[kSimd128Size];
inline Simd8x16ShuffleOperand(Decoder* decoder, const byte* pc) {
for (uint32_t i = 0; i < kSimd128Size; ++i) {
- shuffle[i] = decoder->read_u8<checked>(pc + 2 + i, "shuffle");
+ shuffle[i] = decoder->read_u8<validate>(pc + 2 + i, "shuffle");
}
}
};
// An entry on the value stack.
-template <typename Interface>
-struct AbstractValue {
+struct ValueBase {
const byte* pc;
ValueType type;
- typename Interface::IValue interface_data;
// Named constructors.
- static AbstractValue Unreachable(const byte* pc) {
- return {pc, kWasmVar, Interface::IValue::Unreachable()};
- }
+ static ValueBase Unreachable(const byte* pc) { return {pc, kWasmVar}; }
- static AbstractValue New(const byte* pc, ValueType type) {
- return {pc, type, Interface::IValue::New()};
- }
+ static ValueBase New(const byte* pc, ValueType type) { return {pc, type}; }
};
-template <typename Interface>
-struct AbstractMerge {
+template <typename Value>
+struct Merge {
uint32_t arity;
union {
- AbstractValue<Interface>* array;
- AbstractValue<Interface> first;
+ Value* array;
+ Value first;
} vals; // Either multiple values or a single value.
- AbstractValue<Interface>& operator[](size_t i) {
+ Value& operator[](uint32_t i) {
DCHECK_GT(arity, i);
return arity == 1 ? vals.first : vals.array[i];
}
@@ -432,16 +438,15 @@ enum ControlKind {
};
// An entry on the control stack (i.e. if, block, loop, or try).
-template <typename Interface>
-struct AbstractControl {
+template <typename Value>
+struct ControlBase {
const byte* pc;
ControlKind kind;
- size_t stack_depth; // stack height at the beginning of the construct.
- typename Interface::IControl interface_data;
- bool unreachable; // The current block has been ended.
+ uint32_t stack_depth; // stack height at the beginning of the construct.
+ bool unreachable; // The current block has been ended.
// Values merged into the end of this control construct.
- AbstractMerge<Interface> merge;
+ Merge<Value> merge;
inline bool is_if() const { return is_onearmed_if() || is_if_else(); }
inline bool is_onearmed_if() const { return kind == kControlIf; }
@@ -453,26 +458,59 @@ struct AbstractControl {
inline bool is_try_catch() const { return kind == kControlTryCatch; }
// Named constructors.
- static AbstractControl Block(const byte* pc, size_t stack_depth) {
- return {pc, kControlBlock, stack_depth, Interface::IControl::Block(), false,
- {}};
+ static ControlBase Block(const byte* pc, size_t stack_depth) {
+ return {pc, kControlBlock, static_cast<uint32_t>(stack_depth), false, {}};
}
- static AbstractControl If(const byte* pc, size_t stack_depth) {
- return {pc, kControlIf, stack_depth, Interface::IControl::If(), false, {}};
+ static ControlBase If(const byte* pc, size_t stack_depth) {
+ return {pc, kControlIf, static_cast<uint32_t>(stack_depth), false, {}};
}
- static AbstractControl Loop(const byte* pc, size_t stack_depth) {
- return {pc, kControlLoop, stack_depth, Interface::IControl::Loop(), false,
- {}};
+ static ControlBase Loop(const byte* pc, size_t stack_depth) {
+ return {pc, kControlLoop, static_cast<uint32_t>(stack_depth), false, {}};
}
- static AbstractControl Try(const byte* pc, size_t stack_depth) {
- return {pc, kControlTry, stack_depth, Interface::IControl::Try(),
- false, {}};
+ static ControlBase Try(const byte* pc, size_t stack_depth) {
+ return {pc, kControlTry, static_cast<uint32_t>(stack_depth), false, {}};
}
};
+#define CONCRETE_NAMED_CONSTRUCTOR(concrete_type, abstract_type, name) \
+ template <typename... Args> \
+ static concrete_type name(Args&&... args) { \
+ concrete_type val; \
+ static_cast<abstract_type&>(val) = \
+ abstract_type::name(std::forward<Args>(args)...); \
+ return val; \
+ }
+
+// Provide the default named constructors, which default-initialize the
+// ConcreteType and the initialize the fields of ValueBase correctly.
+// Use like this:
+// struct Value : public ValueWithNamedConstructors<Value> { int new_field; };
+template <typename ConcreteType>
+struct ValueWithNamedConstructors : public ValueBase {
+ // Named constructors.
+ CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ValueBase, Unreachable)
+ CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ValueBase, New)
+};
+
+// Provide the default named constructors, which default-initialize the
+// ConcreteType and the initialize the fields of ControlBase correctly.
+// Use like this:
+// struct Control : public ControlWithNamedConstructors<Control, Value> {
+// int my_uninitialized_field;
+// char* other_field = nullptr;
+// };
+template <typename ConcreteType, typename Value>
+struct ControlWithNamedConstructors : public ControlBase<Value> {
+ // Named constructors.
+ CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ControlBase<Value>, Block)
+ CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ControlBase<Value>, If)
+ CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ControlBase<Value>, Loop)
+ CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ControlBase<Value>, Try)
+};
+
// This is the list of callback functions that an interface for the
// WasmFullDecoder should implement.
// F(Name, args...)
@@ -508,8 +546,8 @@ struct AbstractControl {
F(Unreachable) \
F(Select, const Value& cond, const Value& fval, const Value& tval, \
Value* result) \
- F(BreakTo, Control* block) \
- F(BrIf, const Value& cond, Control* block) \
+ F(BreakTo, uint32_t depth) \
+ F(BrIf, const Value& cond, uint32_t depth) \
F(BrTable, const BranchTableOperand<validate>& operand, const Value& key) \
F(Else, Control* if_block) \
F(LoadMem, ValueType type, MachineType mem_type, \
@@ -532,9 +570,12 @@ struct AbstractControl {
const Value& input, Value* result) \
F(Simd8x16ShuffleOp, const Simd8x16ShuffleOperand<validate>& operand, \
const Value& input0, const Value& input1, Value* result) \
- F(Throw, const ExceptionIndexOperand<validate>&) \
- F(Catch, const ExceptionIndexOperand<validate>& operand, Control* block) \
- F(AtomicOp, WasmOpcode opcode, Vector<Value> args, Value* result)
+ F(Throw, const ExceptionIndexOperand<validate>&, Control* block, \
+ const Vector<Value>& args) \
+ F(CatchException, const ExceptionIndexOperand<validate>& operand, \
+ Control* block, Vector<Value> caught_values) \
+ F(AtomicOp, WasmOpcode opcode, Vector<Value> args, \
+ const MemoryAccessOperand<validate>& operand, Value* result)
// Generic Wasm bytecode decoder with utilities for decoding operands,
// lengths, etc.
@@ -552,8 +593,10 @@ class WasmDecoder : public Decoder {
ZoneVector<ValueType>* local_types_;
- size_t total_locals() const {
- return local_types_ == nullptr ? 0 : local_types_->size();
+ uint32_t total_locals() const {
+ return local_types_ == nullptr
+ ? 0
+ : static_cast<uint32_t>(local_types_->size());
}
static bool DecodeLocals(Decoder* decoder, const FunctionSig* sig,
@@ -595,8 +638,11 @@ class WasmDecoder : public Decoder {
type = kWasmF64;
break;
case kLocalS128:
- type = kWasmS128;
- break;
+ if (FLAG_experimental_wasm_simd) {
+ type = kWasmS128;
+ break;
+ }
+ // else fall through to default.
default:
decoder->error(decoder->pc() - 1, "invalid local type");
return false;
@@ -608,10 +654,12 @@ class WasmDecoder : public Decoder {
}
static BitVector* AnalyzeLoopAssignment(Decoder* decoder, const byte* pc,
- int locals_count, Zone* zone) {
+ uint32_t locals_count, Zone* zone) {
if (pc >= decoder->end()) return nullptr;
if (*pc != kExprLoop) return nullptr;
+ // The number of locals_count is augmented by 2 so that 'locals_count - 2'
+ // can be used to track mem_size, and 'locals_count - 1' to track mem_start.
BitVector* assigned = new (zone) BitVector(locals_count, zone);
int depth = 0;
// Iteratively process all AST nodes nested inside the loop.
@@ -637,6 +685,14 @@ class WasmDecoder : public Decoder {
length = 1 + operand.length;
break;
}
+ case kExprGrowMemory:
+ case kExprCallFunction:
+ case kExprCallIndirect:
+ // Add mem_size and mem_start to the assigned set.
+ assigned->Add(locals_count - 2); // mem_size
+ assigned->Add(locals_count - 1); // mem_start
+ length = OpcodeLength(decoder, pc);
+ break;
case kExprEnd:
depth--;
break;
@@ -651,46 +707,43 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, LocalIndexOperand<validate>& operand) {
- if (VALIDATE(operand.index < total_locals())) {
- if (local_types_) {
- operand.type = local_types_->at(operand.index);
- } else {
- operand.type = kWasmStmt;
- }
- return true;
+ if (!VALIDATE(operand.index < total_locals())) {
+ errorf(pc + 1, "invalid local index: %u", operand.index);
+ return false;
}
- errorf(pc + 1, "invalid local index: %u", operand.index);
- return false;
+ operand.type = local_types_ ? local_types_->at(operand.index) : kWasmStmt;
+ return true;
}
inline bool Validate(const byte* pc,
ExceptionIndexOperand<validate>& operand) {
- if (module_ != nullptr && operand.index < module_->exceptions.size()) {
- operand.exception = &module_->exceptions[operand.index];
- return true;
+ if (!VALIDATE(module_ != nullptr &&
+ operand.index < module_->exceptions.size())) {
+ errorf(pc + 1, "Invalid exception index: %u", operand.index);
+ return false;
}
- errorf(pc + 1, "Invalid exception index: %u", operand.index);
- return false;
+ operand.exception = &module_->exceptions[operand.index];
+ return true;
}
inline bool Validate(const byte* pc, GlobalIndexOperand<validate>& operand) {
- if (VALIDATE(module_ != nullptr &&
- operand.index < module_->globals.size())) {
- operand.global = &module_->globals[operand.index];
- operand.type = operand.global->type;
- return true;
+ if (!VALIDATE(module_ != nullptr &&
+ operand.index < module_->globals.size())) {
+ errorf(pc + 1, "invalid global index: %u", operand.index);
+ return false;
}
- errorf(pc + 1, "invalid global index: %u", operand.index);
- return false;
+ operand.global = &module_->globals[operand.index];
+ operand.type = operand.global->type;
+ return true;
}
inline bool Complete(const byte* pc, CallFunctionOperand<validate>& operand) {
- if (VALIDATE(module_ != nullptr &&
- operand.index < module_->functions.size())) {
- operand.sig = module_->functions[operand.index].sig;
- return true;
+ if (!VALIDATE(module_ != nullptr &&
+ operand.index < module_->functions.size())) {
+ return false;
}
- return false;
+ operand.sig = module_->functions[operand.index].sig;
+ return true;
}
inline bool Validate(const byte* pc, CallFunctionOperand<validate>& operand) {
@@ -702,38 +755,38 @@ class WasmDecoder : public Decoder {
}
inline bool Complete(const byte* pc, CallIndirectOperand<validate>& operand) {
- if (VALIDATE(module_ != nullptr &&
- operand.index < module_->signatures.size())) {
- operand.sig = module_->signatures[operand.index];
- return true;
+ if (!VALIDATE(module_ != nullptr &&
+ operand.index < module_->signatures.size())) {
+ return false;
}
- return false;
+ operand.sig = module_->signatures[operand.index];
+ return true;
}
inline bool Validate(const byte* pc, CallIndirectOperand<validate>& operand) {
- if (CHECK_ERROR(module_ == nullptr || module_->function_tables.empty())) {
+ if (!VALIDATE(module_ != nullptr && !module_->function_tables.empty())) {
error("function table has to exist to execute call_indirect");
return false;
}
- if (Complete(pc, operand)) {
- return true;
+ if (!Complete(pc, operand)) {
+ errorf(pc + 1, "invalid signature index: #%u", operand.index);
+ return false;
}
- errorf(pc + 1, "invalid signature index: #%u", operand.index);
- return false;
+ return true;
}
inline bool Validate(const byte* pc, BreakDepthOperand<validate>& operand,
size_t control_depth) {
- if (VALIDATE(operand.depth < control_depth)) {
- return true;
+ if (!VALIDATE(operand.depth < control_depth)) {
+ errorf(pc + 1, "invalid break depth: %u", operand.depth);
+ return false;
}
- errorf(pc + 1, "invalid break depth: %u", operand.depth);
- return false;
+ return true;
}
bool Validate(const byte* pc, BranchTableOperand<validate>& operand,
size_t block_depth) {
- if (CHECK_ERROR(operand.table_count >= kV8MaxWasmFunctionSize)) {
+ if (!VALIDATE(operand.table_count < kV8MaxWasmFunctionSize)) {
errorf(pc + 1, "invalid table count (> max function size): %u",
operand.table_count);
return false;
@@ -763,7 +816,7 @@ class WasmDecoder : public Decoder {
UNREACHABLE();
break;
}
- if (CHECK_ERROR(operand.lane < 0 || operand.lane >= num_lanes)) {
+ if (!VALIDATE(operand.lane >= 0 && operand.lane < num_lanes)) {
error(pc_ + 2, "invalid lane index");
return false;
} else {
@@ -794,7 +847,7 @@ class WasmDecoder : public Decoder {
UNREACHABLE();
break;
}
- if (CHECK_ERROR(operand.shift < 0 || operand.shift >= max_shift)) {
+ if (!VALIDATE(operand.shift >= 0 && operand.shift < max_shift)) {
error(pc_ + 2, "invalid shift amount");
return false;
} else {
@@ -808,12 +861,11 @@ class WasmDecoder : public Decoder {
for (uint32_t i = 0; i < kSimd128Size; ++i)
max_lane = std::max(max_lane, operand.shuffle[i]);
// Shuffle indices must be in [0..31] for a 16 lane shuffle.
- if (CHECK_ERROR(max_lane > 2 * kSimd128Size)) {
+ if (!VALIDATE(max_lane <= 2 * kSimd128Size)) {
error(pc_ + 2, "invalid shuffle mask");
return false;
- } else {
- return true;
}
+ return true;
}
static unsigned OpcodeLength(Decoder* decoder, const byte* pc) {
@@ -917,6 +969,23 @@ class WasmDecoder : public Decoder {
return 2;
}
}
+ case kAtomicPrefix: {
+ byte atomic_index = decoder->read_u8<validate>(pc + 1, "atomic_index");
+ WasmOpcode opcode =
+ static_cast<WasmOpcode>(kAtomicPrefix << 8 | atomic_index);
+ switch (opcode) {
+#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
+ FOREACH_ATOMIC_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+ {
+ MemoryAccessOperand<validate> operand(decoder, pc + 1, UINT32_MAX);
+ return 2 + operand.length;
+ }
+ default:
+ decoder->error(pc, "invalid Atomics opcode");
+ return 2;
+ }
+ }
default:
return 1;
}
@@ -993,9 +1062,9 @@ class WasmDecoder : public Decoder {
template <bool validate, typename Interface>
class WasmFullDecoder : public WasmDecoder<validate> {
- using Value = AbstractValue<Interface>;
- using Control = AbstractControl<Interface>;
- using MergeValues = AbstractMerge<Interface>;
+ using Value = typename Interface::Value;
+ using Control = typename Interface::Control;
+ using MergeValues = Merge<Value>;
// All Value and Control types should be trivially copyable for
// performance. We push and pop them, and store them in local variables.
@@ -1118,9 +1187,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return &stack_[stack_.size() - depth - 1];
}
- inline const Value& GetMergeValueFromStack(Control* c, size_t i) {
+ inline Value& GetMergeValueFromStack(Control* c, uint32_t i) {
DCHECK_GT(c->merge.arity, i);
- DCHECK_GE(stack_.size(), c->merge.arity);
+ DCHECK_GE(stack_.size(), c->stack_depth + c->merge.arity);
return stack_[stack_.size() - c->merge.arity + i];
}
@@ -1137,9 +1206,19 @@ class WasmFullDecoder : public WasmDecoder<validate> {
bool last_end_found_;
bool CheckHasMemory() {
- if (VALIDATE(this->module_->has_memory)) return true;
- this->error(this->pc_ - 1, "memory instruction with no memory");
- return false;
+ if (!VALIDATE(this->module_->has_memory)) {
+ this->error(this->pc_ - 1, "memory instruction with no memory");
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckHasSharedMemory() {
+ if (!VALIDATE(this->module_->has_shared_memory)) {
+ this->error(this->pc_ - 1, "Atomic opcodes used without shared memory");
+ return false;
+ }
+ return true;
}
// Decodes the body of a function.
@@ -1203,16 +1282,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ExceptionIndexOperand<true> operand(this, this->pc_);
len = 1 + operand.length;
if (!this->Validate(this->pc_, operand)) break;
- if (operand.exception->sig->parameter_count() > 0) {
- // TODO(kschimpf): Fix to pull values off stack and build throw.
- OPCODE_ERROR(opcode, "can't handle exceptions with values yet");
- break;
- }
- interface_.Throw(this, operand);
- // TODO(titzer): Throw should end control, but currently we build a
- // (reachable) runtime call instead of connecting it directly to
- // end.
- // EndControl();
+ std::vector<Value> args;
+ PopArgs(operand.exception->ToFunctionSig(), &args);
+ interface_.Throw(this, operand, &control_.back(), vec2vec(args));
break;
}
case kExprTry: {
@@ -1232,26 +1304,31 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!this->Validate(this->pc_, operand)) break;
- if (CHECK_ERROR(control_.empty())) {
+ if (!VALIDATE(!control_.empty())) {
this->error("catch does not match any try");
break;
}
Control* c = &control_.back();
- if (CHECK_ERROR(!c->is_try())) {
+ if (!VALIDATE(c->is_try())) {
this->error("catch does not match any try");
break;
}
- if (CHECK_ERROR(c->is_try_catch())) {
+ if (!VALIDATE(c->is_incomplete_try())) {
OPCODE_ERROR(opcode, "multiple catch blocks not implemented");
break;
}
c->kind = kControlTryCatch;
FallThruTo(c);
stack_.resize(c->stack_depth);
-
- interface_.Catch(this, operand, c);
+ const WasmExceptionSig* sig = operand.exception->sig;
+ for (size_t i = 0, e = sig->parameter_count(); i < e; ++i) {
+ Push(sig->GetParam(i));
+ }
+ Vector<Value> values(stack_.data() + c->stack_depth,
+ sig->parameter_count());
+ interface_.CatchException(this, operand, c, values);
break;
}
case kExprCatchAll: {
@@ -1280,12 +1357,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprElse: {
- if (CHECK_ERROR(control_.empty())) {
+ if (!VALIDATE(!control_.empty())) {
this->error("else does not match any if");
break;
}
Control* c = &control_.back();
- if (CHECK_ERROR(!c->is_if())) {
+ if (!VALIDATE(c->is_if())) {
this->error(this->pc_, "else does not match an if");
break;
}
@@ -1300,7 +1377,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprEnd: {
- if (CHECK_ERROR(control_.empty())) {
+ if (!VALIDATE(!control_.empty())) {
this->error("end does not match any if, try, or block");
return;
}
@@ -1308,21 +1385,20 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (c->is_loop()) {
// A loop just leaves the values on the stack.
TypeCheckFallThru(c);
- if (c->unreachable) PushEndValues(c);
PopControl(c);
break;
}
if (c->is_onearmed_if()) {
// End the true branch of a one-armed if.
- if (CHECK_ERROR(!c->unreachable &&
- stack_.size() != c->stack_depth)) {
+ if (!VALIDATE(c->unreachable ||
+ stack_.size() == c->stack_depth)) {
this->error("end of if expected empty stack");
stack_.resize(c->stack_depth);
}
- if (CHECK_ERROR(c->merge.arity > 0)) {
+ if (!VALIDATE(c->merge.arity == 0)) {
this->error("non-void one-armed if");
}
- } else if (CHECK_ERROR(c->is_incomplete_try())) {
+ } else if (!VALIDATE(!c->is_incomplete_try())) {
this->error(this->pc_, "missing catch in try");
break;
}
@@ -1331,21 +1407,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (control_.size() == 1) {
// If at the last (implicit) control, check we are at end.
- if (CHECK_ERROR(this->pc_ + 1 != this->end_)) {
+ if (!VALIDATE(this->pc_ + 1 == this->end_)) {
this->error(this->pc_ + 1, "trailing code after function end");
break;
}
last_end_found_ = true;
- if (c->unreachable) {
- TypeCheckFallThru(c);
- } else {
- // The result of the block is the return value.
- TRACE(" @%-8d #xx:%-20s|", startrel(this->pc_),
- "(implicit) return");
- DoReturn();
- TRACE("\n");
- }
+ // The result of the block is the return value.
+ TRACE(" @%-8d #xx:%-20s|", startrel(this->pc_),
+ "(implicit) return");
+ DoReturn();
+ TRACE("\n");
}
+
PopControl(c);
break;
}
@@ -1359,9 +1432,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprBr: {
BreakDepthOperand<validate> operand(this, this->pc_);
- if (VALIDATE(this->Validate(this->pc_, operand, control_.size()) &&
- TypeCheckBreak(operand.depth))) {
- interface_.BreakTo(this, control_at(operand.depth));
+ if (this->Validate(this->pc_, operand, control_.size()) &&
+ TypeCheckBreak(operand.depth)) {
+ interface_.BreakTo(this, operand.depth);
}
len = 1 + operand.length;
EndControl();
@@ -1370,10 +1443,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprBrIf: {
BreakDepthOperand<validate> operand(this, this->pc_);
auto cond = Pop(0, kWasmI32);
- if (VALIDATE(this->ok() &&
- this->Validate(this->pc_, operand, control_.size()) &&
- TypeCheckBreak(operand.depth))) {
- interface_.BrIf(this, cond, control_at(operand.depth));
+ if (this->Validate(this->pc_, operand, control_.size()) &&
+ TypeCheckBreak(operand.depth)) {
+ interface_.BrIf(this, cond, operand.depth);
}
len = 1 + operand.length;
break;
@@ -1383,42 +1455,29 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BranchTableIterator<validate> iterator(this, operand);
if (!this->Validate(this->pc_, operand, control_.size())) break;
auto key = Pop(0, kWasmI32);
- MergeValues* merge = nullptr;
+ uint32_t br_arity = 0;
while (iterator.has_next()) {
const uint32_t i = iterator.cur_index();
const byte* pos = iterator.pc();
uint32_t target = iterator.next();
- if (CHECK_ERROR(target >= control_.size())) {
+ if (!VALIDATE(target < control_.size())) {
this->error(pos, "improper branch in br_table");
break;
}
// Check that label types match up.
- static MergeValues loop_dummy = {0, {nullptr}};
Control* c = control_at(target);
- MergeValues* current = c->is_loop() ? &loop_dummy : &c->merge;
+ uint32_t arity = c->is_loop() ? 0 : c->merge.arity;
if (i == 0) {
- merge = current;
- } else if (CHECK_ERROR(merge->arity != current->arity)) {
+ br_arity = arity;
+ } else if (!VALIDATE(br_arity == arity)) {
this->errorf(pos,
"inconsistent arity in br_table target %d"
" (previous was %u, this one %u)",
- i, merge->arity, current->arity);
- } else if (control_at(0)->unreachable) {
- for (uint32_t j = 0; VALIDATE(this->ok()) && j < merge->arity;
- ++j) {
- if (CHECK_ERROR((*merge)[j].type != (*current)[j].type)) {
- this->errorf(pos,
- "type error in br_table target %d operand %d"
- " (previous expected %s, this one %s)",
- i, j, WasmOpcodes::TypeName((*merge)[j].type),
- WasmOpcodes::TypeName((*current)[j].type));
- }
- }
+ i, br_arity, arity);
}
- bool valid = TypeCheckBreak(target);
- if (CHECK_ERROR(!valid)) break;
+ if (!VALIDATE(TypeCheckBreak(target))) break;
}
- if (CHECK_ERROR(this->failed())) break;
+ if (!VALIDATE(this->ok())) break;
if (operand.table_count > 0) {
interface_.BrTable(this, operand, key);
@@ -1427,11 +1486,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BranchTableIterator<validate> iterator(this, operand);
const byte* pos = iterator.pc();
uint32_t target = iterator.next();
- if (CHECK_ERROR(target >= control_.size())) {
+ if (!VALIDATE(target < control_.size())) {
this->error(pos, "improper branch in br_table");
break;
}
- interface_.BreakTo(this, control_at(target));
+ interface_.BreakTo(this, target);
}
len = 1 + iterator.length();
EndControl();
@@ -1515,7 +1574,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
GlobalIndexOperand<validate> operand(this, this->pc_);
len = 1 + operand.length;
if (!this->Validate(this->pc_, operand)) break;
- if (CHECK_ERROR(!operand.global->mutability)) {
+ if (!VALIDATE(operand.global->mutability)) {
this->errorf(this->pc_, "immutable global #%u cannot be assigned",
operand.index);
break;
@@ -1598,7 +1657,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
MemoryIndexOperand<validate> operand(this, this->pc_);
len = 1 + operand.length;
DCHECK_NOT_NULL(this->module_);
- if (CHECK_ERROR(!this->module_->is_wasm())) {
+ if (!VALIDATE(this->module_->is_wasm())) {
this->error("grow_memory is not supported for asmjs modules");
break;
}
@@ -1651,6 +1710,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kAtomicPrefix: {
CHECK_PROTOTYPE_OPCODE(threads);
+ if (!CheckHasSharedMemory()) break;
len++;
byte atomic_index =
this->template read_u8<validate>(this->pc_ + 1, "atomic index");
@@ -1715,14 +1775,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
PrintF("[%d]", operand.value);
break;
}
- case kExprGetLocal: {
+ case kExprGetLocal:
+ case kExprSetLocal:
+ case kExprTeeLocal: {
LocalIndexOperand<validate> operand(this, val.pc);
PrintF("[%u]", operand.index);
break;
}
- case kExprSetLocal: // fallthru
- case kExprTeeLocal: {
- LocalIndexOperand<validate> operand(this, val.pc);
+ case kExprGetGlobal:
+ case kExprSetGlobal: {
+ GlobalIndexOperand<validate> operand(this, val.pc);
PrintF("[%u]", operand.index);
break;
}
@@ -1932,7 +1994,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
default: {
FunctionSig* sig = WasmOpcodes::Signature(opcode);
- if (CHECK_ERROR(sig == nullptr)) {
+ if (!VALIDATE(sig != nullptr)) {
this->error("invalid simd opcode");
break;
}
@@ -1948,15 +2010,43 @@ class WasmFullDecoder : public WasmDecoder<validate> {
unsigned DecodeAtomicOpcode(WasmOpcode opcode) {
unsigned len = 0;
+ ValueType ret_type;
FunctionSig* sig = WasmOpcodes::AtomicSignature(opcode);
if (sig != nullptr) {
+ MachineType memtype;
+ switch (opcode) {
+#define CASE_ATOMIC_STORE_OP(Name, Type) \
+ case kExpr##Name: { \
+ memtype = MachineType::Type(); \
+ ret_type = MachineRepresentation::kNone; \
+ break; \
+ }
+ ATOMIC_STORE_OP_LIST(CASE_ATOMIC_STORE_OP)
+#undef CASE_ATOMIC_OP
+#define CASE_ATOMIC_OP(Name, Type) \
+ case kExpr##Name: { \
+ memtype = MachineType::Type(); \
+ ret_type = GetReturnType(sig); \
+ break; \
+ }
+ ATOMIC_OP_LIST(CASE_ATOMIC_OP)
+#undef CASE_ATOMIC_OP
+ default:
+ this->error("invalid atomic opcode");
+ break;
+ }
// TODO(clemensh): Better memory management here.
std::vector<Value> args(sig->parameter_count());
+ MemoryAccessOperand<validate> operand(
+ this, this->pc_ + 1, ElementSizeLog2Of(memtype.representation()));
+ len += operand.length;
for (int i = static_cast<int>(sig->parameter_count() - 1); i >= 0; --i) {
args[i] = Pop(i, sig->GetParam(i));
}
- auto* result = Push(GetReturnType(sig));
- interface_.AtomicOp(this, opcode, vec2vec(args), result);
+ auto result = ret_type == MachineRepresentation::kNone
+ ? nullptr
+ : Push(GetReturnType(sig));
+ interface_.AtomicOp(this, opcode, vec2vec(args), operand, result);
} else {
this->error("invalid atomic opcode");
}
@@ -1979,7 +2069,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
inline Value* Push(ValueType type) {
- DCHECK(type != kWasmStmt);
+ DCHECK_NE(kWasmStmt, type);
stack_.push_back(Value::New(this->pc_, type));
return &stack_.back();
}
@@ -2009,8 +2099,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value Pop(int index, ValueType expected) {
auto val = Pop();
- if (CHECK_ERROR(val.type != expected && val.type != kWasmVar &&
- expected != kWasmVar)) {
+ if (!VALIDATE(val.type == expected || val.type == kWasmVar ||
+ expected == kWasmVar)) {
this->errorf(val.pc, "%s[%d] expected type %s, found %s of type %s",
SafeOpcodeNameAt(this->pc_), index,
WasmOpcodes::TypeName(expected), SafeOpcodeNameAt(val.pc),
@@ -2021,10 +2111,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value Pop() {
DCHECK(!control_.empty());
- size_t limit = control_.back().stack_depth;
+ uint32_t limit = control_.back().stack_depth;
if (stack_.size() <= limit) {
// Popping past the current control start in reachable code.
- if (CHECK_ERROR(!control_.back().unreachable)) {
+ if (!VALIDATE(control_.back().unreachable)) {
this->errorf(this->pc_, "%s found empty stack",
SafeOpcodeNameAt(this->pc_));
}
@@ -2037,26 +2127,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int startrel(const byte* ptr) { return static_cast<int>(ptr - this->start_); }
- bool TypeCheckBreak(unsigned depth) {
- DCHECK(validate); // Only call this for validation.
- Control* c = control_at(depth);
- if (c->is_loop()) {
- // This is the inner loop block, which does not have a value.
- return true;
- }
- size_t expected = control_.back().stack_depth + c->merge.arity;
- if (stack_.size() < expected && !control_.back().unreachable) {
- this->errorf(
- this->pc_,
- "expected at least %u values on the stack for br to @%d, found %d",
- c->merge.arity, startrel(c->pc),
- static_cast<int>(stack_.size() - c->stack_depth));
- return false;
- }
-
- return TypeCheckMergeValues(c);
- }
-
void FallThruTo(Control* c) {
DCHECK_EQ(c, &control_.back());
if (!TypeCheckFallThru(c)) return;
@@ -2066,17 +2136,22 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
bool TypeCheckMergeValues(Control* c) {
- // Typecheck the values left on the stack.
- size_t avail = stack_.size() - c->stack_depth;
- size_t start = avail >= c->merge.arity ? 0 : c->merge.arity - avail;
- for (size_t i = start; i < c->merge.arity; ++i) {
+ DCHECK_GE(stack_.size(), c->stack_depth + c->merge.arity);
+ // Typecheck the topmost {c->merge.arity} values on the stack.
+ for (uint32_t i = 0; i < c->merge.arity; ++i) {
auto& val = GetMergeValueFromStack(c, i);
auto& old = c->merge[i];
- if (val.type != old.type && val.type != kWasmVar) {
- this->errorf(
- this->pc_, "type error in merge[%zu] (expected %s, got %s)", i,
- WasmOpcodes::TypeName(old.type), WasmOpcodes::TypeName(val.type));
- return false;
+ if (val.type != old.type) {
+ // If {val.type} is polymorphic, which results from unreachable, make
+ // it more specific by using the merge value's expected type.
+ // If it is not polymorphic, this is a type error.
+ if (!VALIDATE(val.type == kWasmVar)) {
+ this->errorf(
+ this->pc_, "type error in merge[%u] (expected %s, got %s)", i,
+ WasmOpcodes::TypeName(old.type), WasmOpcodes::TypeName(val.type));
+ return false;
+ }
+ val.type = old.type;
}
}
@@ -2086,19 +2161,59 @@ class WasmFullDecoder : public WasmDecoder<validate> {
bool TypeCheckFallThru(Control* c) {
DCHECK_EQ(c, &control_.back());
if (!validate) return true;
- // Fallthru must match arity exactly.
- size_t expected = c->stack_depth + c->merge.arity;
- if (stack_.size() != expected &&
- (stack_.size() > expected || !c->unreachable)) {
- this->errorf(this->pc_,
- "expected %u elements on the stack for fallthru to @%d",
- c->merge.arity, startrel(c->pc));
+ uint32_t expected = c->merge.arity;
+ DCHECK_GE(stack_.size(), c->stack_depth);
+ uint32_t actual = static_cast<uint32_t>(stack_.size()) - c->stack_depth;
+ // Fallthrus must match the arity of the control exactly.
+ if (!InsertUnreachablesIfNecessary(expected, actual) || actual > expected) {
+ this->errorf(
+ this->pc_,
+ "expected %u elements on the stack for fallthru to @%d, found %u",
+ expected, startrel(c->pc), actual);
return false;
}
return TypeCheckMergeValues(c);
}
+ bool TypeCheckBreak(unsigned depth) {
+ Control* c = control_at(depth);
+ if (c->is_loop()) {
+ // This is the inner loop block, which does not have a value.
+ return true;
+ }
+ // Breaks must have at least the number of values expected; can have more.
+ uint32_t expected = c->merge.arity;
+ DCHECK_GE(stack_.size(), control_.back().stack_depth);
+ uint32_t actual =
+ static_cast<uint32_t>(stack_.size()) - control_.back().stack_depth;
+ if (!InsertUnreachablesIfNecessary(expected, actual)) {
+ this->errorf(this->pc_,
+ "expected %u elements on the stack for br to @%d, found %u",
+ expected, startrel(c->pc), actual);
+ return false;
+ }
+ return TypeCheckMergeValues(c);
+ }
+
+ inline bool InsertUnreachablesIfNecessary(uint32_t expected,
+ uint32_t actual) {
+ if (V8_LIKELY(actual >= expected)) {
+ return true; // enough actual values are there.
+ }
+ if (!VALIDATE(control_.back().unreachable)) {
+ // There aren't enough values on the stack.
+ return false;
+ }
+ // A slow path. When the actual number of values on the stack is less
+ // than the expected number of values and the current control is
+ // unreachable, insert unreachable values below the actual values.
+ // This simplifies {TypeCheckMergeValues}.
+ auto pos = stack_.begin() + (stack_.size() - actual);
+ stack_.insert(pos, (expected - actual), Value::Unreachable(this->pc_));
+ return true;
+ }
+
virtual void onFirstError() {
this->end_ = this->pc_; // Terminate decoding loop.
TRACE(" !%s\n", this->error_msg_.c_str());
@@ -2127,28 +2242,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
};
-template <bool decoder_validate, typename Interface>
-class InterfaceTemplate {
+class EmptyInterface {
public:
- constexpr static bool validate = decoder_validate;
- using Decoder = WasmFullDecoder<validate, Interface>;
- using Control = AbstractControl<Interface>;
- using Value = AbstractValue<Interface>;
- using MergeValues = AbstractMerge<Interface>;
-};
-
-class EmptyInterface : public InterfaceTemplate<true, EmptyInterface> {
- public:
- struct IValue {
- static IValue Unreachable() { return {}; }
- static IValue New() { return {}; }
- };
- struct IControl {
- static IControl Block() { return {}; }
- static IControl If() { return {}; }
- static IControl Loop() { return {}; }
- static IControl Try() { return {}; }
- };
+ constexpr static bool validate = true;
+ using Value = ValueBase;
+ using Control = ControlBase<Value>;
+ using Decoder = WasmFullDecoder<validate, EmptyInterface>;
#define DEFINE_EMPTY_CALLBACK(name, ...) \
void name(Decoder* decoder, ##__VA_ARGS__) {}
@@ -2156,12 +2255,10 @@ class EmptyInterface : public InterfaceTemplate<true, EmptyInterface> {
#undef DEFINE_EMPTY_CALLBACK
};
-#undef CHECKED_COND
-#undef VALIDATE
-#undef CHECK_ERROR
#undef TRACE
+#undef VALIDATE
#undef CHECK_PROTOTYPE_OPCODE
-#undef PROTOTYPE_NOT_FUNCTIONAL
+#undef OPCODE_ERROR
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index bb3fc544bb..bcd57fe616 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -27,6 +27,11 @@ namespace wasm {
namespace {
+template <typename T>
+Vector<T> vec2vec(ZoneVector<T>& vec) {
+ return Vector<T>(vec.data(), vec.size());
+}
+
// An SsaEnv environment carries the current local variable renaming
// as well as the current effect and control dependency in the TF graph.
// It maintains a control state that tracks whether the environment
@@ -37,6 +42,8 @@ struct SsaEnv {
State state;
TFNode* control;
TFNode* effect;
+ TFNode* mem_size;
+ TFNode* mem_start;
TFNode** locals;
bool go() { return state >= kReached; }
@@ -45,6 +52,8 @@ struct SsaEnv {
locals = nullptr;
control = nullptr;
effect = nullptr;
+ mem_size = nullptr;
+ mem_start = nullptr;
}
void SetNotMerged() {
if (state == kMerged) state = kReached;
@@ -60,14 +69,12 @@ struct SsaEnv {
constexpr uint32_t kNullCatch = static_cast<uint32_t>(-1);
-class WasmGraphBuildingInterface
- : public InterfaceTemplate<true, WasmGraphBuildingInterface> {
+class WasmGraphBuildingInterface {
public:
- struct IValue {
- TFNode* node;
+ using Decoder = WasmFullDecoder<true, WasmGraphBuildingInterface>;
- static IValue Unreachable() { return {nullptr}; }
- static IValue New() { return {nullptr}; }
+ struct Value : public ValueWithNamedConstructors<Value> {
+ TFNode* node;
};
struct TryInfo : public ZoneObject {
@@ -77,16 +84,11 @@ class WasmGraphBuildingInterface
explicit TryInfo(SsaEnv* c) : catch_env(c), exception(nullptr) {}
};
- struct IControl {
+ struct Control : public ControlWithNamedConstructors<Control, Value> {
SsaEnv* end_env; // end environment for the construct.
SsaEnv* false_env; // false environment (only for if).
TryInfo* try_info; // information used for compiling try statements.
int32_t previous_catch; // previous Control (on the stack) with a catch.
-
- static IControl Block() { return {}; }
- static IControl If() { return {}; }
- static IControl Loop() { return {}; }
- static IControl Try() { return {}; }
};
explicit WasmGraphBuildingInterface(TFBuilder* builder) : builder_(builder) {}
@@ -94,37 +96,61 @@ class WasmGraphBuildingInterface
void StartFunction(Decoder* decoder) {
SsaEnv* ssa_env =
reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
- uint32_t env_count = decoder->NumLocals();
+ uint32_t num_locals = decoder->NumLocals();
+ // The '+ 2' here is to accommodate for mem_size and mem_start nodes.
+ uint32_t env_count = num_locals + 2;
size_t size = sizeof(TFNode*) * env_count;
ssa_env->state = SsaEnv::kReached;
ssa_env->locals =
size > 0 ? reinterpret_cast<TFNode**>(decoder->zone()->New(size))
: nullptr;
- TFNode* start =
- builder_->Start(static_cast<int>(decoder->sig_->parameter_count() + 1));
- // Initialize local variables.
+ // The first '+ 1' is needed by TF Start node, the second '+ 1' is for the
+ // wasm_context parameter.
+ TFNode* start = builder_->Start(
+ static_cast<int>(decoder->sig_->parameter_count() + 1 + 1));
+ // Initialize the wasm_context (the paramater at index 0).
+ builder_->set_wasm_context(
+ builder_->Param(compiler::kWasmContextParameterIndex));
+ // Initialize local variables. Parameters are shifted by 1 because of the
+ // the wasm_context.
uint32_t index = 0;
for (; index < decoder->sig_->parameter_count(); ++index) {
- ssa_env->locals[index] = builder_->Param(index);
+ ssa_env->locals[index] = builder_->Param(index + 1);
}
- while (index < env_count) {
+ while (index < num_locals) {
ValueType type = decoder->GetLocalType(index);
TFNode* node = DefaultValue(type);
- while (index < env_count && decoder->GetLocalType(index) == type) {
+ while (index < num_locals && decoder->GetLocalType(index) == type) {
// Do a whole run of like-typed locals at a time.
ssa_env->locals[index++] = node;
}
}
- ssa_env->control = start;
ssa_env->effect = start;
+ ssa_env->control = start;
+ // Initialize effect and control before loading the context.
+ builder_->set_effect_ptr(&ssa_env->effect);
+ builder_->set_control_ptr(&ssa_env->control);
+ // Always load mem_size and mem_start from the WasmContext into the ssa.
+ LoadContextIntoSsa(ssa_env);
SetEnv(ssa_env);
}
+ // Reload the wasm context variables from the WasmContext structure attached
+ // to the memory object into the Ssa Environment. This does not automatically
+ // set the mem_size_ and mem_start_ pointers in WasmGraphBuilder.
+ void LoadContextIntoSsa(SsaEnv* ssa_env) {
+ if (!ssa_env || !ssa_env->go()) return;
+ DCHECK_NOT_NULL(builder_->Effect());
+ DCHECK_NOT_NULL(builder_->Control());
+ ssa_env->mem_size = builder_->LoadMemSize();
+ ssa_env->mem_start = builder_->LoadMemStart();
+ }
+
void StartFunctionBody(Decoder* decoder, Control* block) {
SsaEnv* break_env = ssa_env_;
SetEnv(Steal(decoder->zone(), break_env));
- block->interface_data.end_env = break_env;
+ block->end_env = break_env;
}
void FinishFunction(Decoder* decoder) {
@@ -133,13 +159,13 @@ class WasmGraphBuildingInterface
void Block(Decoder* decoder, Control* block) {
// The break environment is the outer environment.
- block->interface_data.end_env = ssa_env_;
+ block->end_env = ssa_env_;
SetEnv(Steal(decoder->zone(), ssa_env_));
}
void Loop(Decoder* decoder, Control* block) {
SsaEnv* finish_try_env = Steal(decoder->zone(), ssa_env_);
- block->interface_data.end_env = finish_try_env;
+ block->end_env = finish_try_env;
// The continue environment is the inner environment.
SetEnv(PrepareForLoop(decoder, finish_try_env));
ssa_env_->SetNotMerged();
@@ -147,39 +173,41 @@ class WasmGraphBuildingInterface
void Try(Decoder* decoder, Control* block) {
SsaEnv* outer_env = ssa_env_;
+ SsaEnv* catch_env = Split(decoder, outer_env);
+ // Mark catch environment as unreachable, since only accessable
+ // through catch unwinding (i.e. landing pads).
+ catch_env->state = SsaEnv::kUnreachable;
SsaEnv* try_env = Steal(decoder->zone(), outer_env);
- SsaEnv* catch_env = UnreachableEnv(decoder->zone());
SetEnv(try_env);
TryInfo* try_info = new (decoder->zone()) TryInfo(catch_env);
- block->interface_data.end_env = outer_env;
- block->interface_data.try_info = try_info;
- block->interface_data.previous_catch = current_catch_;
+ block->end_env = outer_env;
+ block->try_info = try_info;
+ block->previous_catch = current_catch_;
current_catch_ = static_cast<int32_t>(decoder->control_depth() - 1);
}
void If(Decoder* decoder, const Value& cond, Control* if_block) {
TFNode* if_true = nullptr;
TFNode* if_false = nullptr;
- BUILD(BranchNoHint, cond.interface_data.node, &if_true, &if_false);
+ BUILD(BranchNoHint, cond.node, &if_true, &if_false);
SsaEnv* end_env = ssa_env_;
SsaEnv* false_env = Split(decoder, ssa_env_);
false_env->control = if_false;
SsaEnv* true_env = Steal(decoder->zone(), ssa_env_);
true_env->control = if_true;
- if_block->interface_data.end_env = end_env;
- if_block->interface_data.false_env = false_env;
+ if_block->end_env = end_env;
+ if_block->false_env = false_env;
SetEnv(true_env);
}
void FallThruTo(Decoder* decoder, Control* c) {
MergeValuesInto(decoder, c);
- SetEnv(c->interface_data.end_env);
+ SetEnv(c->end_env);
}
void PopControl(Decoder* decoder, Control& block) {
if (block.is_onearmed_if()) {
- Goto(decoder, block.interface_data.false_env,
- block.interface_data.end_env);
+ Goto(decoder, block.false_env, block.end_env);
}
}
@@ -187,38 +215,36 @@ class WasmGraphBuildingInterface
void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig* sig,
const Value& value, Value* result) {
- result->interface_data.node =
- BUILD(Unop, opcode, value.interface_data.node, decoder->position());
+ result->node = BUILD(Unop, opcode, value.node, decoder->position());
}
void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig* sig,
const Value& lhs, const Value& rhs, Value* result) {
- result->interface_data.node =
- BUILD(Binop, opcode, lhs.interface_data.node, rhs.interface_data.node,
- decoder->position());
+ result->node =
+ BUILD(Binop, opcode, lhs.node, rhs.node, decoder->position());
}
void I32Const(Decoder* decoder, Value* result, int32_t value) {
- result->interface_data.node = builder_->Int32Constant(value);
+ result->node = builder_->Int32Constant(value);
}
void I64Const(Decoder* decoder, Value* result, int64_t value) {
- result->interface_data.node = builder_->Int64Constant(value);
+ result->node = builder_->Int64Constant(value);
}
void F32Const(Decoder* decoder, Value* result, float value) {
- result->interface_data.node = builder_->Float32Constant(value);
+ result->node = builder_->Float32Constant(value);
}
void F64Const(Decoder* decoder, Value* result, double value) {
- result->interface_data.node = builder_->Float64Constant(value);
+ result->node = builder_->Float64Constant(value);
}
void DoReturn(Decoder* decoder, Vector<Value> values) {
size_t num_values = values.size();
TFNode** buffer = GetNodes(values);
for (size_t i = 0; i < num_values; ++i) {
- buffer[i] = values[i].interface_data.node;
+ buffer[i] = values[i].node;
}
BUILD(Return, static_cast<unsigned>(values.size()), buffer);
}
@@ -226,30 +252,30 @@ class WasmGraphBuildingInterface
void GetLocal(Decoder* decoder, Value* result,
const LocalIndexOperand<true>& operand) {
if (!ssa_env_->locals) return; // unreachable
- result->interface_data.node = ssa_env_->locals[operand.index];
+ result->node = ssa_env_->locals[operand.index];
}
void SetLocal(Decoder* decoder, const Value& value,
const LocalIndexOperand<true>& operand) {
if (!ssa_env_->locals) return; // unreachable
- ssa_env_->locals[operand.index] = value.interface_data.node;
+ ssa_env_->locals[operand.index] = value.node;
}
void TeeLocal(Decoder* decoder, const Value& value, Value* result,
const LocalIndexOperand<true>& operand) {
- result->interface_data.node = value.interface_data.node;
+ result->node = value.node;
if (!ssa_env_->locals) return; // unreachable
- ssa_env_->locals[operand.index] = value.interface_data.node;
+ ssa_env_->locals[operand.index] = value.node;
}
void GetGlobal(Decoder* decoder, Value* result,
const GlobalIndexOperand<true>& operand) {
- result->interface_data.node = BUILD(GetGlobal, operand.index);
+ result->node = BUILD(GetGlobal, operand.index);
}
void SetGlobal(Decoder* decoder, const Value& value,
const GlobalIndexOperand<true>& operand) {
- BUILD(SetGlobal, operand.index, value.interface_data.node);
+ BUILD(SetGlobal, operand.index, value.node);
}
void Unreachable(Decoder* decoder) {
@@ -259,30 +285,30 @@ class WasmGraphBuildingInterface
void Select(Decoder* decoder, const Value& cond, const Value& fval,
const Value& tval, Value* result) {
TFNode* controls[2];
- BUILD(BranchNoHint, cond.interface_data.node, &controls[0], &controls[1]);
+ BUILD(BranchNoHint, cond.node, &controls[0], &controls[1]);
TFNode* merge = BUILD(Merge, 2, controls);
- TFNode* vals[2] = {tval.interface_data.node, fval.interface_data.node};
+ TFNode* vals[2] = {tval.node, fval.node};
TFNode* phi = BUILD(Phi, tval.type, 2, vals, merge);
- result->interface_data.node = phi;
+ result->node = phi;
ssa_env_->control = merge;
}
- void BreakTo(Decoder* decoder, Control* block) {
- if (block->is_loop()) {
- Goto(decoder, ssa_env_, block->interface_data.end_env);
+ void BreakTo(Decoder* decoder, uint32_t depth) {
+ Control* target = decoder->control_at(depth);
+ if (target->is_loop()) {
+ Goto(decoder, ssa_env_, target->end_env);
} else {
- MergeValuesInto(decoder, block);
+ MergeValuesInto(decoder, target);
}
}
- void BrIf(Decoder* decoder, const Value& cond, Control* block) {
+ void BrIf(Decoder* decoder, const Value& cond, uint32_t depth) {
SsaEnv* fenv = ssa_env_;
SsaEnv* tenv = Split(decoder, fenv);
fenv->SetNotMerged();
- BUILD(BranchNoHint, cond.interface_data.node, &tenv->control,
- &fenv->control);
+ BUILD(BranchNoHint, cond.node, &tenv->control, &fenv->control);
ssa_env_ = tenv;
- BreakTo(decoder, block);
+ BreakTo(decoder, depth);
ssa_env_ = fenv;
}
@@ -290,8 +316,7 @@ class WasmGraphBuildingInterface
const Value& key) {
SsaEnv* break_env = ssa_env_;
// Build branches to the various blocks based on the table.
- TFNode* sw =
- BUILD(Switch, operand.table_count + 1, key.interface_data.node);
+ TFNode* sw = BUILD(Switch, operand.table_count + 1, key.node);
SsaEnv* copy = Steal(decoder->zone(), break_env);
ssa_env_ = copy;
@@ -302,38 +327,38 @@ class WasmGraphBuildingInterface
ssa_env_ = Split(decoder, copy);
ssa_env_->control = (i == operand.table_count) ? BUILD(IfDefault, sw)
: BUILD(IfValue, i, sw);
- BreakTo(decoder, decoder->control_at(target));
+ BreakTo(decoder, target);
}
DCHECK(decoder->ok());
ssa_env_ = break_env;
}
void Else(Decoder* decoder, Control* if_block) {
- SetEnv(if_block->interface_data.false_env);
+ SetEnv(if_block->false_env);
}
void LoadMem(Decoder* decoder, ValueType type, MachineType mem_type,
const MemoryAccessOperand<true>& operand, const Value& index,
Value* result) {
- result->interface_data.node =
- BUILD(LoadMem, type, mem_type, index.interface_data.node,
- operand.offset, operand.alignment, decoder->position());
+ result->node = BUILD(LoadMem, type, mem_type, index.node, operand.offset,
+ operand.alignment, decoder->position());
}
void StoreMem(Decoder* decoder, ValueType type, MachineType mem_type,
const MemoryAccessOperand<true>& operand, const Value& index,
const Value& value) {
- BUILD(StoreMem, mem_type, index.interface_data.node, operand.offset,
- operand.alignment, value.interface_data.node, decoder->position(),
- type);
+ BUILD(StoreMem, mem_type, index.node, operand.offset, operand.alignment,
+ value.node, decoder->position(), type);
}
void CurrentMemoryPages(Decoder* decoder, Value* result) {
- result->interface_data.node = BUILD(CurrentMemoryPages);
+ result->node = BUILD(CurrentMemoryPages);
}
void GrowMemory(Decoder* decoder, const Value& value, Value* result) {
- result->interface_data.node = BUILD(GrowMemory, value.interface_data.node);
+ result->node = BUILD(GrowMemory, value.node);
+ // Reload mem_size and mem_start after growing memory.
+ LoadContextIntoSsa(ssa_env_);
}
void CallDirect(Decoder* decoder, const CallFunctionOperand<true>& operand,
@@ -344,40 +369,36 @@ class WasmGraphBuildingInterface
void CallIndirect(Decoder* decoder, const Value& index,
const CallIndirectOperand<true>& operand,
const Value args[], Value returns[]) {
- DoCall(decoder, index.interface_data.node, operand, args, returns, true);
+ DoCall(decoder, index.node, operand, args, returns, true);
}
void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
TFNode** inputs = GetNodes(args);
TFNode* node = BUILD(SimdOp, opcode, inputs);
- if (result) result->interface_data.node = node;
+ if (result) result->node = node;
}
void SimdLaneOp(Decoder* decoder, WasmOpcode opcode,
const SimdLaneOperand<true> operand, Vector<Value> inputs,
Value* result) {
TFNode** nodes = GetNodes(inputs);
- result->interface_data.node =
- BUILD(SimdLaneOp, opcode, operand.lane, nodes);
+ result->node = BUILD(SimdLaneOp, opcode, operand.lane, nodes);
}
void SimdShiftOp(Decoder* decoder, WasmOpcode opcode,
const SimdShiftOperand<true> operand, const Value& input,
Value* result) {
- TFNode* inputs[] = {input.interface_data.node};
- result->interface_data.node =
- BUILD(SimdShiftOp, opcode, operand.shift, inputs);
+ TFNode* inputs[] = {input.node};
+ result->node = BUILD(SimdShiftOp, opcode, operand.shift, inputs);
}
void Simd8x16ShuffleOp(Decoder* decoder,
const Simd8x16ShuffleOperand<true>& operand,
const Value& input0, const Value& input1,
Value* result) {
- TFNode* input_nodes[] = {input0.interface_data.node,
- input1.interface_data.node};
- result->interface_data.node =
- BUILD(Simd8x16ShuffleOp, operand.shuffle, input_nodes);
+ TFNode* input_nodes[] = {input0.node, input1.node};
+ result->node = BUILD(Simd8x16ShuffleOp, operand.shuffle, input_nodes);
}
TFNode* GetExceptionTag(Decoder* decoder,
@@ -387,47 +408,84 @@ class WasmGraphBuildingInterface
return BUILD(Int32Constant, operand.index);
}
- void Throw(Decoder* decoder, const ExceptionIndexOperand<true>& operand) {
- BUILD(Throw, GetExceptionTag(decoder, operand));
+ void Throw(Decoder* decoder, const ExceptionIndexOperand<true>& operand,
+ Control* block, const Vector<Value>& value_args) {
+ int count = value_args.length();
+ ZoneVector<TFNode*> args(count, decoder->zone());
+ for (int i = 0; i < count; ++i) {
+ args[i] = value_args[i].node;
+ }
+ BUILD(Throw, operand.index, operand.exception, vec2vec(args));
+ Unreachable(decoder);
+ EndControl(decoder, block);
}
- void Catch(Decoder* decoder, const ExceptionIndexOperand<true>& operand,
- Control* block) {
+ void CatchException(Decoder* decoder,
+ const ExceptionIndexOperand<true>& operand,
+ Control* block, Vector<Value> values) {
DCHECK(block->is_try_catch());
- current_catch_ = block->interface_data.previous_catch;
- SsaEnv* catch_env = block->interface_data.try_info->catch_env;
+ current_catch_ = block->previous_catch;
+ SsaEnv* catch_env = block->try_info->catch_env;
SetEnv(catch_env);
- // Get the exception and see if wanted exception.
- TFNode* exception_as_i32 = BUILD(
- Catch, block->interface_data.try_info->exception, decoder->position());
- TFNode* exception_tag = GetExceptionTag(decoder, operand);
- TFNode* compare_i32 = BUILD(Binop, kExprI32Eq, exception_as_i32,
- exception_tag, decoder->position());
- TFNode* if_true = nullptr;
- TFNode* if_false = nullptr;
- BUILD(BranchNoHint, compare_i32, &if_true, &if_false);
- SsaEnv* false_env = Split(decoder, catch_env);
- false_env->control = if_false;
- SsaEnv* true_env = Steal(decoder->zone(), catch_env);
- true_env->control = if_true;
- block->interface_data.try_info->catch_env = false_env;
+ TFNode* compare_i32 = nullptr;
+ if (block->try_info->exception == nullptr) {
+ // Catch not applicable, no possible throws in the try
+ // block. Create dummy code so that body of catch still
+ // compiles. Note: This only happens because the current
+ // implementation only builds a landing pad if some node in the
+ // try block can (possibly) throw.
+ //
+ // TODO(kschimpf): Always generate a landing pad for a try block.
+ compare_i32 = BUILD(Int32Constant, 0);
+ } else {
+ // Get the exception and see if wanted exception.
+ TFNode* caught_tag = BUILD(GetExceptionRuntimeId);
+ TFNode* exception_tag =
+ BUILD(ConvertExceptionTagToRuntimeId, operand.index);
+ compare_i32 = BUILD(Binop, kExprI32Eq, caught_tag, exception_tag);
+ }
- // Generate code to re-throw the exception.
- DCHECK_NOT_NULL(block->interface_data.try_info->catch_env);
- SetEnv(false_env);
+ TFNode* if_catch = nullptr;
+ TFNode* if_no_catch = nullptr;
+ BUILD(BranchNoHint, compare_i32, &if_catch, &if_no_catch);
+
+ SsaEnv* if_no_catch_env = Split(decoder, ssa_env_);
+ if_no_catch_env->control = if_no_catch;
+ SsaEnv* if_catch_env = Steal(decoder->zone(), ssa_env_);
+ if_catch_env->control = if_catch;
+
+ // TODO(kschimpf): Generalize to allow more catches. Will force
+ // moving no_catch code to END opcode.
+ SetEnv(if_no_catch_env);
BUILD(Rethrow);
- FallThruTo(decoder, block);
+ Unreachable(decoder);
+ EndControl(decoder, block);
- SetEnv(true_env);
- // TODO(kschimpf): Add code to pop caught exception from isolate.
+ SetEnv(if_catch_env);
+
+ if (block->try_info->exception == nullptr) {
+ // No caught value, make up filler nodes so that catch block still
+ // compiles.
+ for (Value& value : values) {
+ value.node = DefaultValue(value.type);
+ }
+ } else {
+ // TODO(kschimpf): Can't use BUILD() here, GetExceptionValues() returns
+ // TFNode** rather than TFNode*. Fix to add landing pads.
+ TFNode** caught_values = builder_->GetExceptionValues(operand.exception);
+ for (size_t i = 0, e = values.size(); i < e; ++i) {
+ values[i].node = caught_values[i];
+ }
+ }
}
void AtomicOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
- Value* result) {
+ const MemoryAccessOperand<true>& operand, Value* result) {
TFNode** inputs = GetNodes(args);
- TFNode* node = BUILD(AtomicOp, opcode, inputs, decoder->position());
- if (result) result->interface_data.node = node;
+ TFNode* node = BUILD(AtomicOp, opcode, inputs, operand.alignment,
+ operand.offset, decoder->position());
+ if (result) result->node = node;
}
private:
@@ -439,13 +497,13 @@ class WasmGraphBuildingInterface
TryInfo* current_try_info(Decoder* decoder) {
return decoder->control_at(decoder->control_depth() - 1 - current_catch_)
- ->interface_data.try_info;
+ ->try_info;
}
TFNode** GetNodes(Value* values, size_t count) {
TFNode** nodes = builder_->Buffer(count);
for (size_t i = 0; i < count; ++i) {
- nodes[i] = values[i].interface_data.node;
+ nodes[i] = values[i].node;
}
return nodes;
}
@@ -483,8 +541,13 @@ class WasmGraphBuildingInterface
}
#endif
ssa_env_ = env;
+ // TODO(wasm): Create a WasmEnv class with control, effect, mem_size and
+ // mem_start. SsaEnv can inherit from it. This way WasmEnv can be passed
+ // directly to WasmGraphBuilder instead of always copying four pointers.
builder_->set_control_ptr(&env->control);
builder_->set_effect_ptr(&env->effect);
+ builder_->set_mem_size(&env->mem_size);
+ builder_->set_mem_start(&env->mem_start);
}
TFNode* CheckForException(Decoder* decoder, TFNode* node) {
@@ -542,24 +605,21 @@ class WasmGraphBuildingInterface
void MergeValuesInto(Decoder* decoder, Control* c) {
if (!ssa_env_->go()) return;
- SsaEnv* target = c->interface_data.end_env;
+ SsaEnv* target = c->end_env;
const bool first = target->state == SsaEnv::kUnreachable;
Goto(decoder, ssa_env_, target);
- size_t avail = decoder->stack_size() - decoder->control_at(0)->stack_depth;
- size_t start = avail >= c->merge.arity ? 0 : c->merge.arity - avail;
- for (size_t i = start; i < c->merge.arity; ++i) {
+ uint32_t avail =
+ decoder->stack_size() - decoder->control_at(0)->stack_depth;
+ uint32_t start = avail >= c->merge.arity ? 0 : c->merge.arity - avail;
+ for (uint32_t i = start; i < c->merge.arity; ++i) {
auto& val = decoder->GetMergeValueFromStack(c, i);
auto& old = c->merge[i];
- DCHECK_NOT_NULL(val.interface_data.node);
- // TODO(clemensh): Remove first.
- DCHECK_EQ(first, old.interface_data.node == nullptr);
+ DCHECK_NOT_NULL(val.node);
DCHECK(val.type == old.type || val.type == kWasmVar);
- old.interface_data.node =
- first ? val.interface_data.node
- : CreateOrMergeIntoPhi(old.type, target->control,
- old.interface_data.node,
- val.interface_data.node);
+ old.node = first ? val.node
+ : CreateOrMergeIntoPhi(old.type, target->control,
+ old.node, val.node);
}
}
@@ -572,6 +632,8 @@ class WasmGraphBuildingInterface
to->locals = from->locals;
to->control = from->control;
to->effect = from->effect;
+ to->mem_size = from->mem_size;
+ to->mem_start = from->mem_start;
break;
}
case SsaEnv::kReached: { // Create a new merge.
@@ -595,6 +657,17 @@ class WasmGraphBuildingInterface
builder_->Phi(decoder->GetLocalType(i), 2, vals, merge);
}
}
+ // Merge mem_size and mem_start.
+ if (to->mem_size != from->mem_size) {
+ TFNode* vals[] = {to->mem_size, from->mem_size};
+ to->mem_size =
+ builder_->Phi(MachineRepresentation::kWord32, 2, vals, merge);
+ }
+ if (to->mem_start != from->mem_start) {
+ TFNode* vals[] = {to->mem_start, from->mem_start};
+ to->mem_start = builder_->Phi(MachineType::PointerRepresentation(), 2,
+ vals, merge);
+ }
break;
}
case SsaEnv::kMerged: {
@@ -615,21 +688,16 @@ class WasmGraphBuildingInterface
}
// Merge locals.
for (int i = decoder->NumLocals() - 1; i >= 0; i--) {
- TFNode* tnode = to->locals[i];
- TFNode* fnode = from->locals[i];
- if (builder_->IsPhiWithMerge(tnode, merge)) {
- builder_->AppendToPhi(tnode, fnode);
- } else if (tnode != fnode) {
- uint32_t count = builder_->InputCount(merge);
- TFNode** vals = builder_->Buffer(count);
- for (uint32_t j = 0; j < count - 1; j++) {
- vals[j] = tnode;
- }
- vals[count - 1] = fnode;
- to->locals[i] =
- builder_->Phi(decoder->GetLocalType(i), count, vals, merge);
- }
+ to->locals[i] = CreateOrMergeIntoPhi(decoder->GetLocalType(i), merge,
+ to->locals[i], from->locals[i]);
}
+ // Merge mem_size and mem_start.
+ to->mem_size =
+ CreateOrMergeIntoPhi(MachineRepresentation::kWord32, merge,
+ to->mem_size, from->mem_size);
+ to->mem_start =
+ CreateOrMergeIntoPhi(MachineType::PointerRepresentation(), merge,
+ to->mem_start, from->mem_start);
break;
}
default:
@@ -659,17 +727,29 @@ class WasmGraphBuildingInterface
env->control = builder_->Loop(env->control);
env->effect = builder_->EffectPhi(1, &env->effect, env->control);
builder_->Terminate(env->effect, env->control);
+ // The '+ 2' here is to be able to set mem_size and mem_start as assigned.
BitVector* assigned = WasmDecoder<true>::AnalyzeLoopAssignment(
- decoder, decoder->pc(), static_cast<int>(decoder->total_locals()),
- decoder->zone());
+ decoder, decoder->pc(), decoder->total_locals() + 2, decoder->zone());
if (decoder->failed()) return env;
if (assigned != nullptr) {
// Only introduce phis for variables assigned in this loop.
+ int mem_size_index = decoder->total_locals();
+ int mem_start_index = decoder->total_locals() + 1;
for (int i = decoder->NumLocals() - 1; i >= 0; i--) {
if (!assigned->Contains(i)) continue;
env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1,
&env->locals[i], env->control);
}
+ // Introduce phis for mem_size and mem_start if necessary.
+ if (assigned->Contains(mem_size_index)) {
+ env->mem_size = builder_->Phi(MachineRepresentation::kWord32, 1,
+ &env->mem_size, env->control);
+ }
+ if (assigned->Contains(mem_start_index)) {
+ env->mem_start = builder_->Phi(MachineType::PointerRepresentation(), 1,
+ &env->mem_start, env->control);
+ }
+
SsaEnv* loop_body_env = Split(decoder, env);
builder_->StackCheck(decoder->position(), &(loop_body_env->effect),
&(loop_body_env->control));
@@ -682,6 +762,12 @@ class WasmGraphBuildingInterface
&env->locals[i], env->control);
}
+ // Conservatively introduce phis for mem_size and mem_start.
+ env->mem_size = builder_->Phi(MachineRepresentation::kWord32, 1,
+ &env->mem_size, env->control);
+ env->mem_start = builder_->Phi(MachineType::PointerRepresentation(), 1,
+ &env->mem_start, env->control);
+
SsaEnv* loop_body_env = Split(decoder, env);
builder_->StackCheck(decoder->position(), &loop_body_env->effect,
&loop_body_env->control);
@@ -693,7 +779,8 @@ class WasmGraphBuildingInterface
DCHECK_NOT_NULL(from);
SsaEnv* result =
reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
- size_t size = sizeof(TFNode*) * decoder->NumLocals();
+ // The '+ 2' here is to accommodate for mem_size and mem_start nodes.
+ size_t size = sizeof(TFNode*) * (decoder->NumLocals() + 2);
result->control = from->control;
result->effect = from->effect;
@@ -703,9 +790,13 @@ class WasmGraphBuildingInterface
size > 0 ? reinterpret_cast<TFNode**>(decoder->zone()->New(size))
: nullptr;
memcpy(result->locals, from->locals, size);
+ result->mem_size = from->mem_size;
+ result->mem_start = from->mem_start;
} else {
result->state = SsaEnv::kUnreachable;
result->locals = nullptr;
+ result->mem_size = nullptr;
+ result->mem_start = nullptr;
}
return result;
@@ -721,6 +812,8 @@ class WasmGraphBuildingInterface
result->locals = from->locals;
result->control = from->control;
result->effect = from->effect;
+ result->mem_size = from->mem_size;
+ result->mem_start = from->mem_start;
from->Kill(SsaEnv::kUnreachable);
return result;
}
@@ -732,6 +825,8 @@ class WasmGraphBuildingInterface
result->control = nullptr;
result->effect = nullptr;
result->locals = nullptr;
+ result->mem_size = nullptr;
+ result->mem_start = nullptr;
return result;
}
@@ -745,7 +840,7 @@ class WasmGraphBuildingInterface
TFNode** return_nodes = nullptr;
arg_nodes[0] = index_node;
for (int i = 0; i < param_count; ++i) {
- arg_nodes[i + 1] = args[i].interface_data.node;
+ arg_nodes[i + 1] = args[i].node;
}
if (is_indirect) {
builder_->CallIndirect(operand.index, arg_nodes, &return_nodes,
@@ -756,8 +851,11 @@ class WasmGraphBuildingInterface
}
int return_count = static_cast<int>(operand.sig->return_count());
for (int i = 0; i < return_count; ++i) {
- returns[i].interface_data.node = return_nodes[i];
+ returns[i].node = return_nodes[i];
}
+ // The invoked function could have used grow_memory, so we need to
+ // reload mem_size and mem_start
+ LoadContextIntoSsa(ssa_env_);
}
};
@@ -798,11 +896,7 @@ DecodeResult VerifyWasmCodeWithStats(AccountingAllocator* allocator,
const wasm::WasmModule* module,
FunctionBody& body, bool is_wasm,
Counters* counters) {
- auto size_histogram = is_wasm ? counters->wasm_wasm_function_size_bytes()
- : counters->wasm_asm_function_size_bytes();
- // TODO(bradnelson): Improve histogram handling of ptrdiff_t.
- CHECK((body.end - body.start) >= 0);
- size_histogram->AddSample(static_cast<int>(body.end - body.start));
+ CHECK_LE(0, body.end - body.start);
auto time_counter = is_wasm ? counters->wasm_decode_wasm_function_time()
: counters->wasm_decode_asm_function_time();
TimedHistogramScope wasm_decode_function_time_scope(time_counter);
@@ -909,7 +1003,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
os << RawOpcodeName(opcode) << ",";
- for (size_t j = 1; j < length; ++j) {
+ for (unsigned j = 1; j < length; ++j) {
os << " 0x" << AsHex(i.pc()[j], 2) << ",";
}
@@ -979,10 +1073,10 @@ BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
const byte* start, const byte* end) {
Decoder decoder(start, end);
return WasmDecoder<true>::AnalyzeLoopAssignment(
- &decoder, start, static_cast<int>(num_locals), zone);
+ &decoder, start, static_cast<uint32_t>(num_locals), zone);
}
-#undef TRACE
+#undef BUILD
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 0c44b87ec4..a244e294c8 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -41,12 +41,6 @@ static inline FunctionBody FunctionBodyForTesting(const byte* start,
return {nullptr, 0, start, end};
}
-// A {DecodeResult} only stores the failure / success status, but no data. Thus
-// we use {nullptr_t} as data value, such that the only valid data stored in
-// this type is a nullptr.
-// Storing {void} would require template specialization.
-using DecodeResult = Result<std::nullptr_t>;
-
V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const wasm::WasmModule* module,
FunctionBody& body);
@@ -195,6 +189,12 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
}
bool has_next() { return pc_ < end_; }
+
+ WasmOpcode prefixed_opcode() {
+ byte prefix = read_u8<false>(pc_, "expected prefix");
+ byte index = read_u8<false>(pc_ + 1, "expected index");
+ return static_cast<WasmOpcode>(prefix << 8 | index);
+ }
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/local-decl-encoder.cc b/deps/v8/src/wasm/local-decl-encoder.cc
index 0f3da2f383..ab179f3a9d 100644
--- a/deps/v8/src/wasm/local-decl-encoder.cc
+++ b/deps/v8/src/wasm/local-decl-encoder.cc
@@ -6,19 +6,9 @@
#include "src/wasm/leb-helper.h"
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
-
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic pop
-#endif
+namespace v8 {
+namespace internal {
+namespace wasm {
void LocalDeclEncoder::Prepend(Zone* zone, const byte** start,
const byte** end) const {
@@ -60,3 +50,7 @@ size_t LocalDeclEncoder::Size() const {
for (auto p : local_decls) size += 1 + LEBHelper::sizeof_u32v(p.first);
return size;
}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
new file mode 100644
index 0000000000..d6e7891fc0
--- /dev/null
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -0,0 +1,49 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/memory-tracing.h"
+
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace tracing {
+
+void TraceMemoryOperation(ExecutionEngine engine, bool is_store,
+ MachineRepresentation rep, uint32_t addr,
+ int func_index, int position, uint8_t* mem_start) {
+ EmbeddedVector<char, 64> value;
+ switch (rep) {
+#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
+ case MachineRepresentation::rep: \
+ SNPrintF(value, str ":" format, \
+ ReadLittleEndianValue<ctype1>(mem_start + addr), \
+ ReadLittleEndianValue<ctype2>(mem_start + addr)); \
+ break;
+ TRACE_TYPE(kWord8, " i8", "%d / %02x", uint8_t, uint8_t)
+ TRACE_TYPE(kWord16, "i16", "%d / %04x", uint16_t, uint16_t)
+ TRACE_TYPE(kWord32, "i32", "%d / %08x", uint32_t, uint32_t)
+ TRACE_TYPE(kWord64, "i64", "%" PRId64 " / %016" PRIx64, uint64_t, uint64_t)
+ TRACE_TYPE(kFloat32, "f32", "%f / %08x", float, uint32_t)
+ TRACE_TYPE(kFloat64, "f64", "%f / %016" PRIx64, double, uint64_t)
+#undef TRACE_TYPE
+ default:
+ SNPrintF(value, "???");
+ }
+ char eng_c = '?';
+ switch (engine) {
+ case kWasmCompiled:
+ eng_c = 'C';
+ break;
+ case kWasmInterpreted:
+ eng_c = 'I';
+ break;
+ }
+ printf("%c %8d+0x%-6x %s @%08x %s\n", eng_c, func_index, position,
+ is_store ? "store" : "read ", addr, value.start());
+}
+
+} // namespace tracing
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/memory-tracing.h b/deps/v8/src/wasm/memory-tracing.h
new file mode 100644
index 0000000000..7d7bc288c0
--- /dev/null
+++ b/deps/v8/src/wasm/memory-tracing.h
@@ -0,0 +1,28 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MEMORY_TRACING_H
+#define V8_MEMORY_TRACING_H
+
+#include <cstdint>
+
+#include "src/machine-type.h"
+
+namespace v8 {
+namespace internal {
+namespace tracing {
+
+enum ExecutionEngine { kWasmCompiled, kWasmInterpreted };
+
+// Callback for tracing a memory operation for debugging.
+// Triggered by --wasm-trace-memory.
+void TraceMemoryOperation(ExecutionEngine, bool is_store, MachineRepresentation,
+ uint32_t addr, int func_index, int position,
+ uint8_t* mem_start);
+
+} // namespace tracing
+} // namespace internal
+} // namespace v8
+
+#endif /* !V8_MEMORY_TRACING_H */
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index e1523e17d9..e42c139ce1 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -2,20 +2,25 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <src/wasm/module-compiler.h>
+#include "src/wasm/module-compiler.h"
#include <atomic>
+#include "src/api.h"
#include "src/asmjs/asm-js.h"
#include "src/assembler-inl.h"
+#include "src/base/template-utils.h"
+#include "src/base/utils/random-number-generator.h"
#include "src/code-stubs.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/counters.h"
#include "src/property-descriptor.h"
#include "src/wasm/compilation-manager.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-code-specialization.h"
#include "src/wasm/wasm-js.h"
-#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-memory.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
#define TRACE(...) \
@@ -33,12 +38,866 @@
if (FLAG_trace_wasm_compiler) PrintF(__VA_ARGS__); \
} while (false)
+#define TRACE_STREAMING(...) \
+ do { \
+ if (FLAG_trace_wasm_streaming) PrintF(__VA_ARGS__); \
+ } while (false)
static const int kInvalidSigIndex = -1;
namespace v8 {
namespace internal {
namespace wasm {
+// A class compiling an entire module.
+class ModuleCompiler {
+ public:
+ ModuleCompiler(Isolate* isolate, WasmModule* module,
+ Handle<Code> centry_stub);
+
+ // The actual runnable task that performs compilations in the background.
+ class CompilationTask : public CancelableTask {
+ public:
+ ModuleCompiler* compiler_;
+ explicit CompilationTask(ModuleCompiler* compiler)
+ : CancelableTask(&compiler->background_task_manager_),
+ compiler_(compiler) {}
+
+ void RunInternal() override {
+ while (compiler_->executed_units_.CanAcceptWork() &&
+ compiler_->FetchAndExecuteCompilationUnit()) {
+ }
+
+ compiler_->OnBackgroundTaskStopped();
+ }
+ };
+
+ // The CompilationUnitBuilder builds compilation units and stores them in an
+ // internal buffer. The buffer is moved into the working queue of the
+ // ModuleCompiler when {Commit} is called.
+ class CompilationUnitBuilder {
+ public:
+ explicit CompilationUnitBuilder(ModuleCompiler* compiler)
+ : compiler_(compiler) {}
+
+ ~CompilationUnitBuilder() { DCHECK(units_.empty()); }
+
+ void AddUnit(compiler::ModuleEnv* module_env, const WasmFunction* function,
+ uint32_t buffer_offset, Vector<const uint8_t> bytes,
+ WasmName name) {
+ units_.emplace_back(new compiler::WasmCompilationUnit(
+ compiler_->isolate_, module_env,
+ wasm::FunctionBody{function->sig, buffer_offset, bytes.begin(),
+ bytes.end()},
+ name, function->func_index, compiler_->centry_stub_,
+ compiler_->counters()));
+ }
+
+ void Commit() {
+ {
+ base::LockGuard<base::Mutex> guard(
+ &compiler_->compilation_units_mutex_);
+ compiler_->compilation_units_.insert(
+ compiler_->compilation_units_.end(),
+ std::make_move_iterator(units_.begin()),
+ std::make_move_iterator(units_.end()));
+ }
+ units_.clear();
+ }
+
+ void Clear() { units_.clear(); }
+
+ private:
+ ModuleCompiler* compiler_;
+ std::vector<std::unique_ptr<compiler::WasmCompilationUnit>> units_;
+ };
+
+ class CodeGenerationSchedule {
+ public:
+ explicit CodeGenerationSchedule(
+ base::RandomNumberGenerator* random_number_generator,
+ size_t max_memory = 0);
+
+ void Schedule(std::unique_ptr<compiler::WasmCompilationUnit>&& item);
+
+ bool IsEmpty() const { return schedule_.empty(); }
+
+ std::unique_ptr<compiler::WasmCompilationUnit> GetNext();
+
+ bool CanAcceptWork() const;
+
+ bool ShouldIncreaseWorkload() const;
+
+ void EnableThrottling() { throttle_ = true; }
+
+ private:
+ size_t GetRandomIndexInSchedule();
+
+ base::RandomNumberGenerator* random_number_generator_ = nullptr;
+ std::vector<std::unique_ptr<compiler::WasmCompilationUnit>> schedule_;
+ const size_t max_memory_;
+ bool throttle_ = false;
+ base::AtomicNumber<size_t> allocated_memory_{0};
+ };
+
+ Counters* counters() const { return async_counters_.get(); }
+
+ // Run by each compilation task and by the main thread (i.e. in both
+ // foreground and background threads). The no_finisher_callback is called
+ // within the result_mutex_ lock when no finishing task is running, i.e. when
+ // the finisher_is_running_ flag is not set.
+ bool FetchAndExecuteCompilationUnit(
+ std::function<void()> no_finisher_callback = nullptr);
+
+ void OnBackgroundTaskStopped();
+
+ void EnableThrottling() { executed_units_.EnableThrottling(); }
+
+ bool CanAcceptWork() const { return executed_units_.CanAcceptWork(); }
+
+ bool ShouldIncreaseWorkload() const {
+ return executed_units_.ShouldIncreaseWorkload();
+ }
+
+ size_t InitializeCompilationUnits(const std::vector<WasmFunction>& functions,
+ const ModuleWireBytes& wire_bytes,
+ compiler::ModuleEnv* module_env);
+
+ void RestartCompilationTasks();
+
+ size_t FinishCompilationUnits(std::vector<Handle<Code>>& results,
+ ErrorThrower* thrower);
+
+ bool IsFinisherRunning() const { return finisher_is_running_; }
+
+ void SetFinisherIsRunning(bool value);
+
+ MaybeHandle<Code> FinishCompilationUnit(ErrorThrower* thrower,
+ int* func_index);
+
+ void CompileInParallel(const ModuleWireBytes& wire_bytes,
+ compiler::ModuleEnv* module_env,
+ std::vector<Handle<Code>>& results,
+ ErrorThrower* thrower);
+
+ void CompileSequentially(const ModuleWireBytes& wire_bytes,
+ compiler::ModuleEnv* module_env,
+ std::vector<Handle<Code>>& results,
+ ErrorThrower* thrower);
+
+ void ValidateSequentially(const ModuleWireBytes& wire_bytes,
+ compiler::ModuleEnv* module_env,
+ ErrorThrower* thrower);
+
+ static MaybeHandle<WasmModuleObject> CompileToModuleObject(
+ Isolate* isolate, ErrorThrower* thrower,
+ std::unique_ptr<WasmModule> module, const ModuleWireBytes& wire_bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes);
+
+ private:
+ MaybeHandle<WasmModuleObject> CompileToModuleObjectInternal(
+ ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
+ const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes);
+
+ Isolate* isolate_;
+ WasmModule* module_;
+ const std::shared_ptr<Counters> async_counters_;
+ std::vector<std::unique_ptr<compiler::WasmCompilationUnit>>
+ compilation_units_;
+ base::Mutex compilation_units_mutex_;
+ CodeGenerationSchedule executed_units_;
+ base::Mutex result_mutex_;
+ const size_t num_background_tasks_;
+ // This flag should only be set while holding result_mutex_.
+ bool finisher_is_running_ = false;
+ CancelableTaskManager background_task_manager_;
+ size_t stopped_compilation_tasks_ = 0;
+ base::Mutex tasks_mutex_;
+ Handle<Code> centry_stub_;
+};
+
+class JSToWasmWrapperCache {
+ public:
+ void SetContextAddress(Address context_address) {
+ // Prevent to have different context addresses in the cache.
+ DCHECK(code_cache_.empty());
+ context_address_ = context_address;
+ }
+
+ Handle<Code> CloneOrCompileJSToWasmWrapper(Isolate* isolate,
+ wasm::WasmModule* module,
+ Handle<Code> wasm_code,
+ uint32_t index) {
+ const wasm::WasmFunction* func = &module->functions[index];
+ int cached_idx = sig_map_.Find(func->sig);
+ if (cached_idx >= 0) {
+ Handle<Code> code = isolate->factory()->CopyCode(code_cache_[cached_idx]);
+ // Now patch the call to wasm code.
+ for (RelocIterator it(*code, RelocInfo::kCodeTargetMask);; it.next()) {
+ DCHECK(!it.done());
+ Code* target =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (target->kind() == Code::WASM_FUNCTION ||
+ target->kind() == Code::WASM_TO_JS_FUNCTION ||
+ target->builtin_index() == Builtins::kIllegal ||
+ target->builtin_index() == Builtins::kWasmCompileLazy) {
+ it.rinfo()->set_target_address(isolate,
+ wasm_code->instruction_start());
+ break;
+ }
+ }
+ return code;
+ }
+
+ Handle<Code> code = compiler::CompileJSToWasmWrapper(
+ isolate, module, wasm_code, index, context_address_);
+ uint32_t new_cache_idx = sig_map_.FindOrInsert(func->sig);
+ DCHECK_EQ(code_cache_.size(), new_cache_idx);
+ USE(new_cache_idx);
+ code_cache_.push_back(code);
+ return code;
+ }
+
+ private:
+ // sig_map_ maps signatures to an index in code_cache_.
+ wasm::SignatureMap sig_map_;
+ std::vector<Handle<Code>> code_cache_;
+ Address context_address_ = nullptr;
+};
+
+// A helper class to simplify instantiating a module from a compiled module.
+// It closes over the {Isolate}, the {ErrorThrower}, the {WasmCompiledModule},
+// etc.
+class InstanceBuilder {
+ public:
+ InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> ffi,
+ MaybeHandle<JSArrayBuffer> memory,
+ WeakCallbackInfo<void>::Callback instance_finalizer_callback);
+
+ // Build an instance, in all of its glory.
+ MaybeHandle<WasmInstanceObject> Build();
+
+ private:
+ // Represents the initialized state of a table.
+ struct TableInstance {
+ Handle<WasmTableObject> table_object; // WebAssembly.Table instance
+ Handle<FixedArray> js_wrappers; // JSFunctions exported
+ Handle<FixedArray> function_table; // internal code array
+ Handle<FixedArray> signature_table; // internal sig array
+ };
+
+ // A pre-evaluated value to use in import binding.
+ struct SanitizedImport {
+ Handle<String> module_name;
+ Handle<String> import_name;
+ Handle<Object> value;
+ };
+
+ Isolate* isolate_;
+ WasmModule* const module_;
+ const std::shared_ptr<Counters> async_counters_;
+ ErrorThrower* thrower_;
+ Handle<WasmModuleObject> module_object_;
+ MaybeHandle<JSReceiver> ffi_;
+ MaybeHandle<JSArrayBuffer> memory_;
+ Handle<JSArrayBuffer> globals_;
+ Handle<WasmCompiledModule> compiled_module_;
+ std::vector<TableInstance> table_instances_;
+ std::vector<Handle<JSFunction>> js_wrappers_;
+ JSToWasmWrapperCache js_to_wasm_cache_;
+ WeakCallbackInfo<void>::Callback instance_finalizer_callback_;
+ std::vector<SanitizedImport> sanitized_imports_;
+
+ const std::shared_ptr<Counters>& async_counters() const {
+ return async_counters_;
+ }
+ Counters* counters() const { return async_counters().get(); }
+
+// Helper routines to print out errors with imports.
+#define ERROR_THROWER_WITH_MESSAGE(TYPE) \
+ void Report##TYPE(const char* error, uint32_t index, \
+ Handle<String> module_name, Handle<String> import_name) { \
+ thrower_->TYPE("Import #%d module=\"%s\" function=\"%s\" error: %s", \
+ index, module_name->ToCString().get(), \
+ import_name->ToCString().get(), error); \
+ } \
+ \
+ MaybeHandle<Object> Report##TYPE(const char* error, uint32_t index, \
+ Handle<String> module_name) { \
+ thrower_->TYPE("Import #%d module=\"%s\" error: %s", index, \
+ module_name->ToCString().get(), error); \
+ return MaybeHandle<Object>(); \
+ }
+
+ ERROR_THROWER_WITH_MESSAGE(LinkError)
+ ERROR_THROWER_WITH_MESSAGE(TypeError)
+
+ // Look up an import value in the {ffi_} object.
+ MaybeHandle<Object> LookupImport(uint32_t index, Handle<String> module_name,
+ Handle<String> import_name);
+
+ // Look up an import value in the {ffi_} object specifically for linking an
+ // asm.js module. This only performs non-observable lookups, which allows
+ // falling back to JavaScript proper (and hence re-executing all lookups) if
+ // module instantiation fails.
+ MaybeHandle<Object> LookupImportAsm(uint32_t index,
+ Handle<String> import_name);
+
+ uint32_t EvalUint32InitExpr(const WasmInitExpr& expr);
+
+ // Load data segments into the memory.
+ void LoadDataSegments(Address mem_addr, size_t mem_size);
+
+ void WriteGlobalValue(WasmGlobal& global, Handle<Object> value);
+
+ void SanitizeImports();
+
+ Handle<FixedArray> SetupWasmToJSImportsTable(
+ Handle<WasmInstanceObject> instance);
+
+ // Process the imports, including functions, tables, globals, and memory, in
+ // order, loading them from the {ffi_} object. Returns the number of imported
+ // functions.
+ int ProcessImports(Handle<FixedArray> code_table,
+ Handle<WasmInstanceObject> instance);
+
+ template <typename T>
+ T* GetRawGlobalPtr(WasmGlobal& global);
+
+ // Process initialization of globals.
+ void InitGlobals();
+
+ // Allocate memory for a module instance as a new JSArrayBuffer.
+ Handle<JSArrayBuffer> AllocateMemory(uint32_t num_pages);
+
+ bool NeedsWrappers() const;
+
+ // Process the exports, creating wrappers for functions, tables, memories,
+ // and globals.
+ void ProcessExports(Handle<WasmInstanceObject> instance,
+ Handle<WasmCompiledModule> compiled_module);
+
+ void InitializeTables(Handle<WasmInstanceObject> instance,
+ CodeSpecialization* code_specialization);
+
+ void LoadTableSegments(Handle<FixedArray> code_table,
+ Handle<WasmInstanceObject> instance);
+};
+
+// TODO(titzer): move to wasm-objects.cc
+static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
+ DisallowHeapAllocation no_gc;
+ JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
+ WasmInstanceObject* owner = reinterpret_cast<WasmInstanceObject*>(*p);
+ Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
+ // If a link to shared memory instances exists, update the list of memory
+ // instances before the instance is destroyed.
+ WasmCompiledModule* compiled_module = owner->compiled_module();
+ TRACE("Finalizing %d {\n", compiled_module->instance_id());
+ DCHECK(compiled_module->has_weak_wasm_module());
+ WeakCell* weak_wasm_module = compiled_module->ptr_to_weak_wasm_module();
+
+ if (trap_handler::UseTrapHandler()) {
+ Handle<FixedArray> code_table = compiled_module->code_table();
+ for (int i = 0; i < code_table->length(); ++i) {
+ Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
+ int index = code->trap_handler_index()->value();
+ if (index >= 0) {
+ trap_handler::ReleaseHandlerData(index);
+ code->set_trap_handler_index(Smi::FromInt(trap_handler::kInvalidIndex));
+ }
+ }
+ }
+
+ // Since the order of finalizers is not guaranteed, it can be the case
+ // that {instance->compiled_module()->module()}, which is a
+ // {Managed<WasmModule>} has been collected earlier in this GC cycle.
+ // Weak references to this instance won't be cleared until
+ // the next GC cycle, so we need to manually break some links (such as
+ // the weak references from {WasmMemoryObject::instances}.
+ if (owner->has_memory_object()) {
+ Handle<WasmMemoryObject> memory(owner->memory_object(), isolate);
+ Handle<WasmInstanceObject> instance(owner, isolate);
+ WasmMemoryObject::RemoveInstance(isolate, memory, instance);
+ }
+
+ // weak_wasm_module may have been cleared, meaning the module object
+ // was GC-ed. In that case, there won't be any new instances created,
+ // and we don't need to maintain the links between instances.
+ if (!weak_wasm_module->cleared()) {
+ WasmModuleObject* wasm_module =
+ WasmModuleObject::cast(weak_wasm_module->value());
+ WasmCompiledModule* current_template = wasm_module->compiled_module();
+
+ TRACE("chain before {\n");
+ TRACE_CHAIN(current_template);
+ TRACE("}\n");
+
+ DCHECK(!current_template->has_weak_prev_instance());
+ WeakCell* next = compiled_module->maybe_ptr_to_weak_next_instance();
+ WeakCell* prev = compiled_module->maybe_ptr_to_weak_prev_instance();
+
+ if (current_template == compiled_module) {
+ if (next == nullptr) {
+ WasmCompiledModule::Reset(isolate, compiled_module);
+ } else {
+ WasmCompiledModule* next_compiled_module =
+ WasmCompiledModule::cast(next->value());
+ WasmModuleObject::cast(wasm_module)
+ ->set_compiled_module(next_compiled_module);
+ DCHECK_NULL(prev);
+ next_compiled_module->reset_weak_prev_instance();
+ }
+ } else {
+ DCHECK(!(prev == nullptr && next == nullptr));
+ // the only reason prev or next would be cleared is if the
+ // respective objects got collected, but if that happened,
+ // we would have relinked the list.
+ if (prev != nullptr) {
+ DCHECK(!prev->cleared());
+ if (next == nullptr) {
+ WasmCompiledModule::cast(prev->value())->reset_weak_next_instance();
+ } else {
+ WasmCompiledModule::cast(prev->value())
+ ->set_ptr_to_weak_next_instance(next);
+ }
+ }
+ if (next != nullptr) {
+ DCHECK(!next->cleared());
+ if (prev == nullptr) {
+ WasmCompiledModule::cast(next->value())->reset_weak_prev_instance();
+ } else {
+ WasmCompiledModule::cast(next->value())
+ ->set_ptr_to_weak_prev_instance(prev);
+ }
+ }
+ }
+ TRACE("chain after {\n");
+ TRACE_CHAIN(wasm_module->compiled_module());
+ TRACE("}\n");
+ }
+ compiled_module->reset_weak_owning_instance();
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
+ TRACE("}\n");
+}
+
+bool SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes) {
+ if (bytes.start() == nullptr || bytes.length() == 0) return false;
+ ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
+ bytes.end(), true, kWasmOrigin);
+ return result.ok();
+}
+
+MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes) {
+ ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
+ bytes.end(), false, kAsmJsOrigin);
+ if (result.failed()) {
+ thrower->CompileFailed("Wasm decoding failed", result);
+ return {};
+ }
+
+ // Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
+ // in {CompileToModuleObject}.
+ return ModuleCompiler::CompileToModuleObject(
+ isolate, thrower, std::move(result.val), bytes, asm_js_script,
+ asm_js_offset_table_bytes);
+}
+
+MaybeHandle<WasmModuleObject> SyncCompile(Isolate* isolate,
+ ErrorThrower* thrower,
+ const ModuleWireBytes& bytes) {
+ // TODO(titzer): only make a copy of the bytes if SharedArrayBuffer
+ std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ ModuleWireBytes bytes_copy(copy.get(), copy.get() + bytes.length());
+
+ ModuleResult result = SyncDecodeWasmModule(
+ isolate, bytes_copy.start(), bytes_copy.end(), false, kWasmOrigin);
+ if (result.failed()) {
+ thrower->CompileFailed("Wasm decoding failed", result);
+ return {};
+ }
+
+ // Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
+ // in {CompileToModuleObject}.
+ return ModuleCompiler::CompileToModuleObject(
+ isolate, thrower, std::move(result.val), bytes_copy, Handle<Script>(),
+ Vector<const byte>());
+}
+
+MaybeHandle<WasmInstanceObject> SyncInstantiate(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+ MaybeHandle<JSArrayBuffer> memory) {
+ InstanceBuilder builder(isolate, thrower, module_object, imports, memory,
+ &InstanceFinalizer);
+ return builder.Build();
+}
+
+MaybeHandle<WasmInstanceObject> SyncCompileAndInstantiate(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory) {
+ MaybeHandle<WasmModuleObject> module = SyncCompile(isolate, thrower, bytes);
+ DCHECK_EQ(thrower->error(), module.is_null());
+ if (module.is_null()) return {};
+
+ return SyncInstantiate(isolate, thrower, module.ToHandleChecked(),
+ Handle<JSReceiver>::null(),
+ Handle<JSArrayBuffer>::null());
+}
+
+void RejectPromise(Isolate* isolate, Handle<Context> context,
+ ErrorThrower& thrower, Handle<JSPromise> promise) {
+ Local<Promise::Resolver> resolver =
+ Utils::PromiseToLocal(promise).As<Promise::Resolver>();
+ auto maybe = resolver->Reject(Utils::ToLocal(context),
+ Utils::ToLocal(thrower.Reify()));
+ CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
+}
+
+void ResolvePromise(Isolate* isolate, Handle<Context> context,
+ Handle<JSPromise> promise, Handle<Object> result) {
+ Local<Promise::Resolver> resolver =
+ Utils::PromiseToLocal(promise).As<Promise::Resolver>();
+ auto maybe =
+ resolver->Resolve(Utils::ToLocal(context), Utils::ToLocal(result));
+ CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
+}
+
+void AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> imports) {
+ ErrorThrower thrower(isolate, nullptr);
+ MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
+ isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
+ if (thrower.error()) {
+ RejectPromise(isolate, handle(isolate->context()), thrower, promise);
+ return;
+ }
+ ResolvePromise(isolate, handle(isolate->context()), promise,
+ instance_object.ToHandleChecked());
+}
+
+void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
+ const ModuleWireBytes& bytes) {
+ if (!FLAG_wasm_async_compilation) {
+ ErrorThrower thrower(isolate, "WasmCompile");
+ // Compile the module.
+ MaybeHandle<WasmModuleObject> module_object =
+ SyncCompile(isolate, &thrower, bytes);
+ if (thrower.error()) {
+ RejectPromise(isolate, handle(isolate->context()), thrower, promise);
+ return;
+ }
+ Handle<WasmModuleObject> module = module_object.ToHandleChecked();
+ ResolvePromise(isolate, handle(isolate->context()), promise, module);
+ return;
+ }
+
+ if (FLAG_wasm_test_streaming) {
+ std::shared_ptr<StreamingDecoder> streaming_decoder =
+ isolate->wasm_compilation_manager()->StartStreamingCompilation(
+ isolate, handle(isolate->context()), promise);
+ streaming_decoder->OnBytesReceived(bytes.module_bytes());
+ streaming_decoder->Finish();
+ return;
+ }
+ // Make a copy of the wire bytes in case the user program changes them
+ // during asynchronous compilation.
+ std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ isolate->wasm_compilation_manager()->StartAsyncCompileJob(
+ isolate, std::move(copy), bytes.length(), handle(isolate->context()),
+ promise);
+}
+
+Handle<Code> CompileLazy(Isolate* isolate) {
+ HistogramTimerScope lazy_time_scope(
+ isolate->counters()->wasm_lazy_compilation_time());
+
+ // Find the wasm frame which triggered the lazy compile, to get the wasm
+ // instance.
+ StackFrameIterator it(isolate);
+ // First frame: C entry stub.
+ DCHECK(!it.done());
+ DCHECK_EQ(StackFrame::EXIT, it.frame()->type());
+ it.Advance();
+ // Second frame: WasmCompileLazy builtin.
+ DCHECK(!it.done());
+ Handle<Code> lazy_compile_code(it.frame()->LookupCode(), isolate);
+ DCHECK_EQ(Builtins::kWasmCompileLazy, lazy_compile_code->builtin_index());
+ Handle<WasmInstanceObject> instance;
+ Handle<FixedArray> exp_deopt_data;
+ int func_index = -1;
+ if (lazy_compile_code->deoptimization_data()->length() > 0) {
+ // Then it's an indirect call or via JS->wasm wrapper.
+ DCHECK_LE(2, lazy_compile_code->deoptimization_data()->length());
+ exp_deopt_data = handle(lazy_compile_code->deoptimization_data(), isolate);
+ auto* weak_cell = WeakCell::cast(exp_deopt_data->get(0));
+ instance = handle(WasmInstanceObject::cast(weak_cell->value()), isolate);
+ func_index = Smi::ToInt(exp_deopt_data->get(1));
+ }
+ it.Advance();
+ // Third frame: The calling wasm code or js-to-wasm wrapper.
+ DCHECK(!it.done());
+ DCHECK(it.frame()->is_js_to_wasm() || it.frame()->is_wasm_compiled());
+ Handle<Code> caller_code = handle(it.frame()->LookupCode(), isolate);
+ if (it.frame()->is_js_to_wasm()) {
+ DCHECK(!instance.is_null());
+ } else if (instance.is_null()) {
+ // Then this is a direct call (otherwise we would have attached the instance
+ // via deopt data to the lazy compile stub). Just use the instance of the
+ // caller.
+ instance =
+ handle(WasmInstanceObject::GetOwningInstance(*caller_code), isolate);
+ }
+ int offset =
+ static_cast<int>(it.frame()->pc() - caller_code->instruction_start());
+ // Only patch the caller code if this is *no* indirect call.
+ // exp_deopt_data will be null if the called function is not exported at all,
+ // and its length will be <= 2 if all entries in tables were already patched.
+ // Note that this check is conservative: If the first call to an exported
+ // function is direct, we will just patch the export tables, and only on the
+ // second call we will patch the caller.
+ bool patch_caller = caller_code->kind() == Code::JS_TO_WASM_FUNCTION ||
+ exp_deopt_data.is_null() || exp_deopt_data->length() <= 2;
+
+ Handle<Code> compiled_code = WasmCompiledModule::CompileLazy(
+ isolate, instance, caller_code, offset, func_index, patch_caller);
+ if (!exp_deopt_data.is_null() && exp_deopt_data->length() > 2) {
+ // See EnsureExportedLazyDeoptData: exp_deopt_data[2...(len-1)] are pairs of
+ // <export_table, index> followed by undefined values.
+ // Use this information here to patch all export tables.
+ DCHECK_EQ(0, exp_deopt_data->length() % 2);
+ for (int idx = 2, end = exp_deopt_data->length(); idx < end; idx += 2) {
+ if (exp_deopt_data->get(idx)->IsUndefined(isolate)) break;
+ FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
+ int exp_index = Smi::ToInt(exp_deopt_data->get(idx + 1));
+ DCHECK(exp_table->get(exp_index) == *lazy_compile_code);
+ exp_table->set(exp_index, *compiled_code);
+ }
+ // After processing, remove the list of exported entries, such that we don't
+ // do the patching redundantly.
+ Handle<FixedArray> new_deopt_data =
+ isolate->factory()->CopyFixedArrayUpTo(exp_deopt_data, 2, TENURED);
+ lazy_compile_code->set_deoptimization_data(*new_deopt_data);
+ }
+
+ return compiled_code;
+}
+
+compiler::ModuleEnv CreateModuleEnvFromCompiledModule(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
+ DisallowHeapAllocation no_gc;
+ WasmModule* module = compiled_module->module();
+
+ std::vector<GlobalHandleAddress> function_tables;
+ std::vector<GlobalHandleAddress> signature_tables;
+ std::vector<SignatureMap*> signature_maps;
+
+ int num_function_tables = static_cast<int>(module->function_tables.size());
+ for (int i = 0; i < num_function_tables; ++i) {
+ FixedArray* ft = compiled_module->ptr_to_function_tables();
+ FixedArray* st = compiled_module->ptr_to_signature_tables();
+
+ // TODO(clemensh): defer these handles for concurrent compilation.
+ function_tables.push_back(WasmCompiledModule::GetTableValue(ft, i));
+ signature_tables.push_back(WasmCompiledModule::GetTableValue(st, i));
+ signature_maps.push_back(&module->function_tables[i].map);
+ }
+
+ std::vector<Handle<Code>> empty_code;
+
+ compiler::ModuleEnv result = {
+ module, // --
+ function_tables, // --
+ signature_tables, // --
+ signature_maps, // --
+ empty_code, // --
+ BUILTIN_CODE(isolate, WasmCompileLazy), // --
+ reinterpret_cast<uintptr_t>( // --
+ compiled_module->GetGlobalsStartOrNull()) // --
+ };
+ return result;
+}
+
+void LazyCompilationOrchestrator::CompileFunction(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, int func_index) {
+ Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
+ isolate);
+ if (Code::cast(compiled_module->code_table()->get(func_index))->kind() ==
+ Code::WASM_FUNCTION) {
+ return;
+ }
+
+ compiler::ModuleEnv module_env =
+ CreateModuleEnvFromCompiledModule(isolate, compiled_module);
+
+ const uint8_t* module_start = compiled_module->module_bytes()->GetChars();
+
+ const WasmFunction* func = &module_env.module->functions[func_index];
+ FunctionBody body{func->sig, func->code.offset(),
+ module_start + func->code.offset(),
+ module_start + func->code.end_offset()};
+ // TODO(wasm): Refactor this to only get the name if it is really needed for
+ // tracing / debugging.
+ std::string func_name;
+ {
+ WasmName name = Vector<const char>::cast(
+ compiled_module->GetRawFunctionName(func_index));
+ // Copy to std::string, because the underlying string object might move on
+ // the heap.
+ func_name.assign(name.start(), static_cast<size_t>(name.length()));
+ }
+ ErrorThrower thrower(isolate, "WasmLazyCompile");
+ compiler::WasmCompilationUnit unit(isolate, &module_env, body,
+ CStrVector(func_name.c_str()), func_index,
+ CEntryStub(isolate, 1).GetCode());
+ unit.ExecuteCompilation();
+ MaybeHandle<Code> maybe_code = unit.FinishCompilation(&thrower);
+
+ // If there is a pending error, something really went wrong. The module was
+ // verified before starting execution with lazy compilation.
+ // This might be OOM, but then we cannot continue execution anyway.
+ // TODO(clemensh): According to the spec, we can actually skip validation at
+ // module creation time, and return a function that always traps here.
+ CHECK(!thrower.error());
+ Handle<Code> code = maybe_code.ToHandleChecked();
+
+ Handle<FixedArray> deopt_data = isolate->factory()->NewFixedArray(2, TENURED);
+ Handle<WeakCell> weak_instance = isolate->factory()->NewWeakCell(instance);
+ // TODO(wasm): Introduce constants for the indexes in wasm deopt data.
+ deopt_data->set(0, *weak_instance);
+ deopt_data->set(1, Smi::FromInt(func_index));
+ code->set_deoptimization_data(*deopt_data);
+
+ DCHECK_EQ(Builtins::kWasmCompileLazy,
+ Code::cast(compiled_module->code_table()->get(func_index))
+ ->builtin_index());
+ compiled_module->code_table()->set(func_index, *code);
+
+ // Now specialize the generated code for this instance.
+ Zone specialization_zone(isolate->allocator(), ZONE_NAME);
+ CodeSpecialization code_specialization(isolate, &specialization_zone);
+ code_specialization.RelocateDirectCalls(instance);
+ code_specialization.ApplyToWasmCode(*code, SKIP_ICACHE_FLUSH);
+ Assembler::FlushICache(isolate, code->instruction_start(),
+ code->instruction_size());
+ auto counters = isolate->counters();
+ counters->wasm_lazily_compiled_functions()->Increment();
+ counters->wasm_generated_code_size()->Increment(code->body_size());
+ counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
+}
+
+int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
+ int offset) {
+ DCHECK(!iterator.done());
+ int byte_pos;
+ do {
+ byte_pos = iterator.source_position().ScriptOffset();
+ iterator.Advance();
+ } while (!iterator.done() && iterator.code_offset() <= offset);
+ return byte_pos;
+}
+
+Handle<Code> LazyCompilationOrchestrator::CompileLazy(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<Code> caller,
+ int call_offset, int exported_func_index, bool patch_caller) {
+ struct NonCompiledFunction {
+ int offset;
+ int func_index;
+ };
+ std::vector<NonCompiledFunction> non_compiled_functions;
+ int func_to_return_idx = exported_func_index;
+ Decoder decoder(nullptr, nullptr);
+ bool is_js_to_wasm = caller->kind() == Code::JS_TO_WASM_FUNCTION;
+ Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
+ isolate);
+
+ if (is_js_to_wasm) {
+ non_compiled_functions.push_back({0, exported_func_index});
+ } else if (patch_caller) {
+ DisallowHeapAllocation no_gc;
+ SeqOneByteString* module_bytes = compiled_module->module_bytes();
+ SourcePositionTableIterator source_pos_iterator(
+ caller->SourcePositionTable());
+ DCHECK_EQ(2, caller->deoptimization_data()->length());
+ int caller_func_index = Smi::ToInt(caller->deoptimization_data()->get(1));
+ const byte* func_bytes =
+ module_bytes->GetChars() +
+ compiled_module->module()->functions[caller_func_index].code.offset();
+ for (RelocIterator it(*caller, RelocInfo::kCodeTargetMask); !it.done();
+ it.next()) {
+ Code* callee =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (callee->builtin_index() != Builtins::kWasmCompileLazy) continue;
+ // TODO(clemensh): Introduce safe_cast<T, bool> which (D)CHECKS
+ // (depending on the bool) against limits of T and then static_casts.
+ size_t offset_l = it.rinfo()->pc() - caller->instruction_start();
+ DCHECK_GE(kMaxInt, offset_l);
+ int offset = static_cast<int>(offset_l);
+ int byte_pos =
+ AdvanceSourcePositionTableIterator(source_pos_iterator, offset);
+ int called_func_index =
+ ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
+ non_compiled_functions.push_back({offset, called_func_index});
+ // Call offset one instruction after the call. Remember the last called
+ // function before that offset.
+ if (offset < call_offset) func_to_return_idx = called_func_index;
+ }
+ }
+
+ // TODO(clemensh): compile all functions in non_compiled_functions in
+ // background, wait for func_to_return_idx.
+ CompileFunction(isolate, instance, func_to_return_idx);
+
+ if (is_js_to_wasm || patch_caller) {
+ DisallowHeapAllocation no_gc;
+ // Now patch the code object with all functions which are now compiled.
+ int idx = 0;
+ for (RelocIterator it(*caller, RelocInfo::kCodeTargetMask); !it.done();
+ it.next()) {
+ Code* callee =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (callee->builtin_index() != Builtins::kWasmCompileLazy) continue;
+ DCHECK_GT(non_compiled_functions.size(), idx);
+ int called_func_index = non_compiled_functions[idx].func_index;
+ // Check that the callee agrees with our assumed called_func_index.
+ DCHECK_IMPLIES(callee->deoptimization_data()->length() > 0,
+ Smi::ToInt(callee->deoptimization_data()->get(1)) ==
+ called_func_index);
+ if (is_js_to_wasm) {
+ DCHECK_EQ(func_to_return_idx, called_func_index);
+ } else {
+ DCHECK_EQ(non_compiled_functions[idx].offset,
+ it.rinfo()->pc() - caller->instruction_start());
+ }
+ ++idx;
+ Handle<Code> callee_compiled(
+ Code::cast(compiled_module->code_table()->get(called_func_index)));
+ if (callee_compiled->builtin_index() == Builtins::kWasmCompileLazy) {
+ DCHECK_NE(func_to_return_idx, called_func_index);
+ continue;
+ }
+ DCHECK_EQ(Code::WASM_FUNCTION, callee_compiled->kind());
+ it.rinfo()->set_target_address(isolate,
+ callee_compiled->instruction_start());
+ }
+ DCHECK_EQ(non_compiled_functions.size(), idx);
+ }
+
+ Code* ret =
+ Code::cast(compiled_module->code_table()->get(func_to_return_idx));
+ DCHECK_EQ(Code::WASM_FUNCTION, ret->kind());
+ return handle(ret, isolate);
+}
+
ModuleCompiler::CodeGenerationSchedule::CodeGenerationSchedule(
base::RandomNumberGenerator* random_number_generator, size_t max_memory)
: random_number_generator_(random_number_generator),
@@ -82,11 +941,10 @@ size_t ModuleCompiler::CodeGenerationSchedule::GetRandomIndexInSchedule() {
return index;
}
-ModuleCompiler::ModuleCompiler(Isolate* isolate,
- std::unique_ptr<WasmModule> module,
+ModuleCompiler::ModuleCompiler(Isolate* isolate, WasmModule* module,
Handle<Code> centry_stub)
: isolate_(isolate),
- module_(std::move(module)),
+ module_(module),
async_counters_(isolate->async_counters()),
executed_units_(
isolate->random_number_generator(),
@@ -101,18 +959,6 @@ ModuleCompiler::ModuleCompiler(Isolate* isolate,
centry_stub_(centry_stub) {}
// The actual runnable task that performs compilations in the background.
-ModuleCompiler::CompilationTask::CompilationTask(ModuleCompiler* compiler)
- : CancelableTask(&compiler->background_task_manager_),
- compiler_(compiler) {}
-
-void ModuleCompiler::CompilationTask::RunInternal() {
- while (compiler_->executed_units_.CanAcceptWork() &&
- compiler_->FetchAndExecuteCompilationUnit()) {
- }
-
- compiler_->OnBackgroundTaskStopped();
-}
-
void ModuleCompiler::OnBackgroundTaskStopped() {
base::LockGuard<base::Mutex> guard(&tasks_mutex_);
++stopped_compilation_tasks_;
@@ -323,16 +1169,16 @@ void ModuleCompiler::ValidateSequentially(const ModuleWireBytes& wire_bytes,
}
}
+// static
MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObject(
- ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
- Handle<Script> asm_js_script,
+ Isolate* isolate, ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
+ const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
Vector<const byte> asm_js_offset_table_bytes) {
-
- TimedHistogramScope wasm_compile_module_time_scope(
- module_->is_wasm() ? counters()->wasm_compile_wasm_module_time()
- : counters()->wasm_compile_asm_module_time());
- return CompileToModuleObjectInternal(
- isolate_, thrower, wire_bytes, asm_js_script, asm_js_offset_table_bytes);
+ Handle<Code> centry_stub = CEntryStub(isolate, 1).GetCode();
+ ModuleCompiler compiler(isolate, module.get(), centry_stub);
+ return compiler.CompileToModuleObjectInternal(thrower, std::move(module),
+ wire_bytes, asm_js_script,
+ asm_js_offset_table_bytes);
}
namespace {
@@ -491,11 +1337,11 @@ bool in_bounds(uint32_t offset, uint32_t size, uint32_t upper) {
using WasmInstanceMap =
IdentityMap<Handle<WasmInstanceObject>, FreeStoreAllocationPolicy>;
-Handle<Code> UnwrapOrCompileImportWrapper(
+Handle<Code> UnwrapExportOrCompileImportWrapper(
Isolate* isolate, int index, FunctionSig* sig, Handle<JSReceiver> target,
- Handle<String> module_name, MaybeHandle<String> import_name,
- ModuleOrigin origin, WasmInstanceMap* imported_instances) {
- WasmFunction* other_func = GetWasmFunctionForImportWrapper(isolate, target);
+ ModuleOrigin origin, WasmInstanceMap* imported_instances,
+ Handle<FixedArray> js_imports_table, Handle<WasmInstanceObject> instance) {
+ WasmFunction* other_func = GetWasmFunctionForExport(isolate, target);
if (other_func) {
if (!sig->Equals(other_func->sig)) return Handle<Code>::null();
// Signature matched. Unwrap the import wrapper and return the raw wasm
@@ -504,12 +1350,34 @@ Handle<Code> UnwrapOrCompileImportWrapper(
Handle<WasmInstanceObject> imported_instance(
Handle<WasmExportedFunction>::cast(target)->instance(), isolate);
imported_instances->Set(imported_instance, imported_instance);
- return UnwrapImportWrapper(target);
+ Handle<Code> wasm_code =
+ UnwrapExportWrapper(Handle<JSFunction>::cast(target));
+ // Create a WasmToWasm wrapper to replace the current wasm context with
+ // the imported_instance one, in order to access the right memory.
+ // If the imported instance does not have memory, avoid the wrapper.
+ // TODO(wasm): Avoid the wrapper also if instance memory and imported
+ // instance share the same memory object.
+ bool needs_wasm_to_wasm_wrapper = imported_instance->has_memory_object();
+ if (!needs_wasm_to_wasm_wrapper) return wasm_code;
+ Address new_wasm_context =
+ reinterpret_cast<Address>(imported_instance->wasm_context());
+ Handle<Code> wrapper_code = compiler::CompileWasmToWasmWrapper(
+ isolate, wasm_code, sig, index, new_wasm_context);
+ // Set the deoptimization data for the WasmToWasm wrapper.
+ // TODO(wasm): Remove the deoptimization data when we will use tail calls
+ // for WasmToWasm wrappers.
+ Factory* factory = isolate->factory();
+ Handle<WeakCell> weak_link = factory->NewWeakCell(instance);
+ Handle<FixedArray> deopt_data = factory->NewFixedArray(2, TENURED);
+ deopt_data->set(0, *weak_link);
+ deopt_data->set(1, Smi::FromInt(index));
+ wrapper_code->set_deoptimization_data(*deopt_data);
+ return wrapper_code;
}
// No wasm function or being debugged. Compile a new wrapper for the new
// signature.
- return compiler::CompileWasmToJSWrapper(isolate, target, sig, index,
- module_name, import_name, origin);
+ return compiler::CompileWasmToJSWrapper(isolate, target, sig, index, origin,
+ js_imports_table);
}
double MonotonicallyIncreasingTimeInMs() {
@@ -553,8 +1421,6 @@ std::unique_ptr<compiler::ModuleEnv> CreateDefaultModuleEnv(
signature_maps, // --
empty_code, // --
illegal_builtin, // --
- 0, // --
- 0, // --
0 // --
};
return std::unique_ptr<compiler::ModuleEnv>(new compiler::ModuleEnv(result));
@@ -581,12 +1447,21 @@ void ReopenHandles(Isolate* isolate, const std::vector<Handle<T>>& vec) {
} // namespace
MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
- Handle<Script> asm_js_script,
+ ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
+ const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
Vector<const byte> asm_js_offset_table_bytes) {
- Factory* factory = isolate->factory();
+ TimedHistogramScope wasm_compile_module_time_scope(
+ module_->is_wasm() ? counters()->wasm_compile_wasm_module_time()
+ : counters()->wasm_compile_asm_module_time());
+ // The {module> parameter is passed in to transfer ownership of the WasmModule
+ // to this function. The WasmModule itself existed already as an instance
+ // variable of the ModuleCompiler. We check here that the parameter and the
+ // instance variable actually point to the same object.
+ DCHECK_EQ(module.get(), module_);
// Check whether lazy compilation is enabled for this module.
- bool lazy_compile = compile_lazy(module_.get());
+ bool lazy_compile = compile_lazy(module_);
+
+ Factory* factory = isolate_->factory();
// If lazy compile: Initialize the code table with the lazy compile builtin.
// Otherwise: Initialize with the illegal builtin. All call sites will be
@@ -595,7 +1470,7 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
? BUILTIN_CODE(isolate_, WasmCompileLazy)
: BUILTIN_CODE(isolate_, Illegal);
- auto env = CreateDefaultModuleEnv(isolate, module_.get(), init_builtin);
+ auto env = CreateDefaultModuleEnv(isolate_, module_, init_builtin);
// The {code_table} array contains import wrappers and functions (which
// are both included in {functions.size()}, and export wrappers).
@@ -675,8 +1550,7 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
// The {module_wrapper} will take ownership of the {WasmModule} object,
// and it will be destroyed when the GC reclaims the wrapper object.
Handle<WasmModuleWrapper> module_wrapper =
- WasmModuleWrapper::New(isolate_, module_.release());
- WasmModule* module = module_wrapper->get();
+ WasmModuleWrapper::From(isolate_, module.release());
// Create the shared module data.
// TODO(clemensh): For the same module (same bytes / same hash), we should
@@ -705,12 +1579,12 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
// Compile JS->wasm wrappers for exported functions.
JSToWasmWrapperCache js_to_wasm_cache;
int wrapper_index = 0;
- for (auto exp : module->export_table) {
+ for (auto exp : module_->export_table) {
if (exp.kind != kExternalFunction) continue;
Handle<Code> wasm_code = EnsureExportedLazyDeoptData(
isolate_, Handle<WasmInstanceObject>::null(), code_table, exp.index);
Handle<Code> wrapper_code = js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(
- isolate_, module, wasm_code, exp.index);
+ isolate_, module_, wasm_code, exp.index);
export_wrappers->set(wrapper_index, *wrapper_code);
RecordStats(*wrapper_code, counters());
++wrapper_index;
@@ -718,38 +1592,6 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
return WasmModuleObject::New(isolate_, compiled_module);
}
-Handle<Code> JSToWasmWrapperCache::CloneOrCompileJSToWasmWrapper(
- Isolate* isolate, wasm::WasmModule* module, Handle<Code> wasm_code,
- uint32_t index) {
- const wasm::WasmFunction* func = &module->functions[index];
- int cached_idx = sig_map_.Find(func->sig);
- if (cached_idx >= 0) {
- Handle<Code> code = isolate->factory()->CopyCode(code_cache_[cached_idx]);
- // Now patch the call to wasm code.
- for (RelocIterator it(*code, RelocInfo::kCodeTargetMask);; it.next()) {
- DCHECK(!it.done());
- Code* target =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (target->kind() == Code::WASM_FUNCTION ||
- target->kind() == Code::WASM_TO_JS_FUNCTION ||
- target->builtin_index() == Builtins::kIllegal ||
- target->builtin_index() == Builtins::kWasmCompileLazy) {
- it.rinfo()->set_target_address(isolate, wasm_code->instruction_start());
- break;
- }
- }
- return code;
- }
-
- Handle<Code> code =
- compiler::CompileJSToWasmWrapper(isolate, module, wasm_code, index);
- uint32_t new_cache_idx = sig_map_.FindOrInsert(func->sig);
- DCHECK_EQ(code_cache_.size(), new_cache_idx);
- USE(new_cache_idx);
- code_cache_.push_back(code);
- return code;
-}
-
InstanceBuilder::InstanceBuilder(
Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> ffi,
@@ -957,7 +1799,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Set externally passed ArrayBuffer non neuterable.
memory->set_is_neuterable(false);
- DCHECK_IMPLIES(EnableGuardRegions(),
+ DCHECK_IMPLIES(trap_handler::UseTrapHandler(),
module_->is_asm_js() || memory->has_guard_region());
} else if (initial_pages > 0) {
memory_ = AllocateMemory(initial_pages);
@@ -997,29 +1839,38 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Initialize memory.
//--------------------------------------------------------------------------
- uint32_t mem_size = 0;
Address mem_start = nullptr;
-
- // Stash old values of mem_start, and mem_size before
- // SetSpecializationMemInfoFrom, to patch memory references
- uint32_t old_mem_size = compiled_module_->GetEmbeddedMemSizeOrZero();
- Address old_mem_start = compiled_module_->GetEmbeddedMemStartOrNull();
+ uint32_t mem_size = 0;
if (!memory_.is_null()) {
Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
mem_start = static_cast<Address>(memory->backing_store());
CHECK(memory->byte_length()->ToUint32(&mem_size));
LoadDataSegments(mem_start, mem_size);
-
// Just like with globals, we need to keep both the JSArrayBuffer
// and save the start pointer.
instance->set_memory_buffer(*memory);
- WasmCompiledModule::SetSpecializationMemInfoFrom(factory, compiled_module_,
- memory);
}
- // We might get instantiated again with the same memory. No patching
- // needed in this case.
- code_specialization.RelocateMemoryReferences(old_mem_start, old_mem_size,
- mem_start, mem_size);
+
+ //--------------------------------------------------------------------------
+ // Create a memory object to have a WasmContext.
+ //--------------------------------------------------------------------------
+ if (module_->has_memory) {
+ if (!instance->has_memory_object()) {
+ Handle<WasmMemoryObject> memory_object = WasmMemoryObject::New(
+ isolate_,
+ instance->has_memory_buffer() ? handle(instance->memory_buffer())
+ : Handle<JSArrayBuffer>::null(),
+ module_->maximum_pages != 0 ? module_->maximum_pages : -1);
+ instance->set_memory_object(*memory_object);
+ }
+
+ code_specialization.RelocateWasmContextReferences(
+ reinterpret_cast<Address>(instance->wasm_context()));
+ // Store the wasm_context address in the JSToWasmWrapperCache so that it can
+ // be used to compile JSToWasmWrappers.
+ js_to_wasm_cache_.SetContextAddress(
+ reinterpret_cast<Address>(instance->wasm_context()));
+ }
//--------------------------------------------------------------------------
// Set up the runtime support for the new instance.
@@ -1074,6 +1925,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
code_specialization.ApplyToWholeInstance(*instance, SKIP_ICACHE_FLUSH);
FlushICache(isolate_, code_table);
+ FlushICache(isolate_, wrapper_table);
//--------------------------------------------------------------------------
// Unpack and notify signal handler of protected instructions.
@@ -1167,7 +2019,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
DCHECK(isolate_->has_pending_exception());
// It's unfortunate that the new instance is already linked in the
// chain. However, we need to set up everything before executing the
- // startup function, such that stack trace information can be generated
+ // startup unction, such that stack trace information can be generated
// correctly already in the start function.
return {};
}
@@ -1346,6 +2198,27 @@ void InstanceBuilder::SanitizeImports() {
}
}
+Handle<FixedArray> InstanceBuilder::SetupWasmToJSImportsTable(
+ Handle<WasmInstanceObject> instance) {
+ // The js_imports_table is set up so that index 0 has isolate->native_context
+ // and for every index, 3*index+1 has the JSReceiver, 3*index+2 has function's
+ // global proxy and 3*index+3 has function's context. Hence, the fixed array's
+ // size is 3*import_table.size+1.
+ int size = static_cast<int>(module_->import_table.size());
+ CHECK_LE(size, (kMaxInt - 1) / 3);
+ Handle<FixedArray> func_table =
+ isolate_->factory()->NewFixedArray(3 * size + 1, TENURED);
+ Handle<FixedArray> js_imports_table =
+ isolate_->global_handles()->Create(*func_table);
+ GlobalHandles::MakeWeak(
+ reinterpret_cast<Object**>(js_imports_table.location()),
+ js_imports_table.location(), &FunctionTableFinalizer,
+ v8::WeakCallbackType::kFinalizer);
+ instance->set_js_imports_table(*func_table);
+ js_imports_table->set(0, *isolate_->native_context());
+ return js_imports_table;
+}
+
// Process the imports, including functions, tables, globals, and memory, in
// order, loading them from the {ffi_} object. Returns the number of imported
// functions.
@@ -1353,6 +2226,7 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
Handle<WasmInstanceObject> instance) {
int num_imported_functions = 0;
int num_imported_tables = 0;
+ Handle<FixedArray> js_imports_table = SetupWasmToJSImportsTable(instance);
WasmInstanceMap imported_wasm_instances(isolate_->heap());
DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
for (int index = 0; index < static_cast<int>(module_->import_table.size());
@@ -1372,17 +2246,17 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
return -1;
}
- Handle<Code> import_wrapper = UnwrapOrCompileImportWrapper(
+ Handle<Code> import_code = UnwrapExportOrCompileImportWrapper(
isolate_, index, module_->functions[import.index].sig,
- Handle<JSReceiver>::cast(value), module_name, import_name,
- module_->origin(), &imported_wasm_instances);
- if (import_wrapper.is_null()) {
+ Handle<JSReceiver>::cast(value), module_->origin(),
+ &imported_wasm_instances, js_imports_table, instance);
+ if (import_code.is_null()) {
ReportLinkError("imported function does not match the expected type",
index, module_name, import_name);
return -1;
}
- code_table->set(num_imported_functions, *import_wrapper);
- RecordStats(*import_wrapper, counters());
+ code_table->set(num_imported_functions, *import_code);
+ RecordStats(*import_code, counters());
num_imported_functions++;
break;
}
@@ -1440,8 +2314,7 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
for (int i = 0; i < table_size; ++i) {
Handle<Object> val(table_instance.js_wrappers->get(i), isolate_);
if (!val->IsJSFunction()) continue;
- WasmFunction* function =
- GetWasmFunctionForImportWrapper(isolate_, val);
+ WasmFunction* function = GetWasmFunctionForExport(isolate_, val);
if (function == nullptr) {
thrower_->LinkError("table import %d[%d] is not a wasm function",
index, i);
@@ -1449,7 +2322,8 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
}
int sig_index = table.map.FindOrInsert(function->sig);
table_instance.signature_table->set(i, Smi::FromInt(sig_index));
- table_instance.function_table->set(i, *UnwrapImportWrapper(val));
+ table_instance.function_table->set(
+ i, *UnwrapExportWrapper(Handle<JSFunction>::cast(val)));
}
num_imported_tables++;
@@ -1492,6 +2366,14 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
return -1;
}
}
+ if (module_->has_shared_memory != buffer->is_shared()) {
+ thrower_->LinkError(
+ "mismatch in shared state of memory, declared = %d, imported = "
+ "%d",
+ module_->has_shared_memory, buffer->is_shared());
+ return -1;
+ }
+
break;
}
case kExternalGlobal: {
@@ -1597,7 +2479,7 @@ Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t num_pages) {
thrower_->RangeError("Out of memory: wasm memory too large");
return Handle<JSArrayBuffer>::null();
}
- const bool enable_guard_regions = EnableGuardRegions();
+ const bool enable_guard_regions = trap_handler::UseTrapHandler();
Handle<JSArrayBuffer> mem_buffer = NewArrayBuffer(
isolate_, num_pages * WasmModule::kPageSize, enable_guard_regions);
@@ -1722,23 +2604,12 @@ void InstanceBuilder::ProcessExports(
break;
}
case kExternalMemory: {
- // Export the memory as a WebAssembly.Memory object.
- Handle<WasmMemoryObject> memory_object;
- if (!instance->has_memory_object()) {
- // If there was no imported WebAssembly.Memory object, create one.
- memory_object = WasmMemoryObject::New(
- isolate_,
- (instance->has_memory_buffer())
- ? handle(instance->memory_buffer())
- : Handle<JSArrayBuffer>::null(),
- (module_->maximum_pages != 0) ? module_->maximum_pages : -1);
- instance->set_memory_object(*memory_object);
- } else {
- memory_object =
- Handle<WasmMemoryObject>(instance->memory_object(), isolate_);
- }
-
- desc.set_value(memory_object);
+ // Export the memory as a WebAssembly.Memory object. A WasmMemoryObject
+ // should already be available if the module has memory, since we always
+ // create or import it when building an WasmInstanceObject.
+ DCHECK(instance->has_memory_object());
+ desc.set_value(
+ Handle<WasmMemoryObject>(instance->memory_object(), isolate_));
break;
}
case kExternalGlobal: {
@@ -2006,19 +2877,80 @@ void AsyncCompileJob::Start() {
DoAsync<DecodeModule>(); // --
}
+void AsyncCompileJob::Abort() {
+ background_task_manager_.CancelAndWait();
+ if (num_pending_foreground_tasks_ == 0) {
+ // No task is pending, we can just remove the AsyncCompileJob.
+ isolate_->wasm_compilation_manager()->RemoveJob(this);
+ } else {
+ // There is still a compilation task in the task queue. We enter the
+ // AbortCompilation state and wait for this compilation task to abort the
+ // AsyncCompileJob.
+ NextStep<AbortCompilation>();
+ }
+}
+
+class AsyncStreamingProcessor final : public StreamingProcessor {
+ public:
+ explicit AsyncStreamingProcessor(AsyncCompileJob* job);
+
+ bool ProcessModuleHeader(Vector<const uint8_t> bytes,
+ uint32_t offset) override;
+
+ bool ProcessSection(SectionCode section_code, Vector<const uint8_t> bytes,
+ uint32_t offset) override;
+
+ bool ProcessCodeSectionHeader(size_t functions_count,
+ uint32_t offset) override;
+
+ bool ProcessFunctionBody(Vector<const uint8_t> bytes,
+ uint32_t offset) override;
+
+ void OnFinishedChunk() override;
+
+ void OnFinishedStream(std::unique_ptr<uint8_t[]> bytes,
+ size_t length) override;
+
+ void OnError(DecodeResult result) override;
+
+ void OnAbort() override;
+
+ private:
+ // Finishes the AsyncCOmpileJob with an error.
+ void FinishAsyncCompileJobWithError(ResultBase result);
+
+ ModuleDecoder decoder_;
+ AsyncCompileJob* job_;
+ std::unique_ptr<ModuleCompiler::CompilationUnitBuilder>
+ compilation_unit_builder_;
+ uint32_t next_function_ = 0;
+};
+
+std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() {
+ DCHECK_NULL(stream_);
+ stream_.reset(
+ new StreamingDecoder(base::make_unique<AsyncStreamingProcessor>(this)));
+ return stream_;
+}
+
AsyncCompileJob::~AsyncCompileJob() {
background_task_manager_.CancelAndWait();
for (auto d : deferred_handles_) delete d;
}
void AsyncCompileJob::AsyncCompileFailed(ErrorThrower& thrower) {
+ if (stream_) stream_->NotifyError();
+ // {job} keeps the {this} pointer alive.
+ std::shared_ptr<AsyncCompileJob> job =
+ isolate_->wasm_compilation_manager()->RemoveJob(this);
RejectPromise(isolate_, context_, thrower, module_promise_);
- isolate_->wasm_compilation_manager()->RemoveJob(this);
}
void AsyncCompileJob::AsyncCompileSucceeded(Handle<Object> result) {
+ // {job} keeps the {this} pointer alive.
+ std::shared_ptr<AsyncCompileJob> job =
+ isolate_->wasm_compilation_manager()->RemoveJob(this);
ResolvePromise(isolate_, context_, module_promise_, result);
- isolate_->wasm_compilation_manager()->RemoveJob(this);
}
// A closure to run a compilation step (either as foreground or background
@@ -2032,7 +2964,9 @@ class AsyncCompileJob::CompileStep {
void Run(bool on_foreground) {
if (on_foreground) {
- DCHECK_EQ(1, job_->num_pending_foreground_tasks_--);
+ HandleScope scope(job_->isolate_);
+ --job_->num_pending_foreground_tasks_;
+ DCHECK_EQ(0, job_->num_pending_foreground_tasks_);
SaveContext saved_context(job_->isolate_);
job_->isolate_->set_context(*job_->context_);
RunInForeground();
@@ -2070,16 +3004,19 @@ class AsyncCompileJob::CompileTask : public CancelableTask {
};
void AsyncCompileJob::StartForegroundTask() {
- DCHECK_EQ(0, num_pending_foreground_tasks_++);
-
- V8::GetCurrentPlatform()->CallOnForegroundThread(
- reinterpret_cast<v8::Isolate*>(isolate_), new CompileTask(this, true));
+ ++num_pending_foreground_tasks_;
+ DCHECK_EQ(1, num_pending_foreground_tasks_);
+
+ v8::Platform* platform = V8::GetCurrentPlatform();
+ // TODO(ahaas): This is a CHECK to debug issue 764313.
+ CHECK(platform);
+ platform->CallOnForegroundThread(reinterpret_cast<v8::Isolate*>(isolate_),
+ new CompileTask(this, true));
}
-template <typename State, typename... Args>
+template <typename Step, typename... Args>
void AsyncCompileJob::DoSync(Args&&... args) {
- step_.reset(new State(std::forward<Args>(args)...));
- step_->job_ = this;
+ NextStep<Step>(std::forward<Args>(args)...);
StartForegroundTask();
}
@@ -2088,16 +3025,30 @@ void AsyncCompileJob::StartBackgroundTask() {
new CompileTask(this, false), v8::Platform::kShortRunningTask);
}
-template <typename State, typename... Args>
+void AsyncCompileJob::RestartBackgroundTasks() {
+ size_t num_restarts = stopped_tasks_.Value();
+ stopped_tasks_.Decrement(num_restarts);
+
+ for (size_t i = 0; i < num_restarts; ++i) {
+ StartBackgroundTask();
+ }
+}
+
+template <typename Step, typename... Args>
void AsyncCompileJob::DoAsync(Args&&... args) {
- step_.reset(new State(std::forward<Args>(args)...));
- step_->job_ = this;
+ NextStep<Step>(std::forward<Args>(args)...);
size_t end = step_->NumberOfBackgroundTasks();
for (size_t i = 0; i < end; ++i) {
StartBackgroundTask();
}
}
+template <typename Step, typename... Args>
+void AsyncCompileJob::NextStep(Args&&... args) {
+ step_.reset(new Step(std::forward<Args>(args)...));
+ step_->job_ = this;
+}
+
//==========================================================================
// Step 1: (async) Decode the module.
//==========================================================================
@@ -2121,7 +3072,8 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
job_->DoSync<DecodeFail>(std::move(result));
} else {
// Decode passed.
- job_->DoSync<PrepareAndStartCompile>(std::move(result.val));
+ job_->module_ = std::move(result.val);
+ job_->DoSync<PrepareAndStartCompile>(job_->module_.get(), true);
}
}
};
@@ -2137,7 +3089,6 @@ class AsyncCompileJob::DecodeFail : public CompileStep {
ModuleResult result_;
void RunInForeground() override {
TRACE_COMPILE("(1b) Decoding failed.\n");
- HandleScope scope(job_->isolate_);
ErrorThrower thrower(job_->isolate_, "AsyncCompile");
thrower.CompileFailed("Wasm decoding failed", result_);
// {job_} is deleted in AsyncCompileFailed, therefore the {return}.
@@ -2150,20 +3101,21 @@ class AsyncCompileJob::DecodeFail : public CompileStep {
//==========================================================================
class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
public:
- explicit PrepareAndStartCompile(std::unique_ptr<WasmModule> module)
- : module_(std::move(module)) {}
+ explicit PrepareAndStartCompile(WasmModule* module, bool start_compilation)
+ : module_(module), start_compilation_(start_compilation) {}
private:
- std::unique_ptr<WasmModule> module_;
+ WasmModule* module_;
+ bool start_compilation_;
+
void RunInForeground() override {
TRACE_COMPILE("(2) Prepare and start compile...\n");
Isolate* isolate = job_->isolate_;
- HandleScope scope(isolate);
Factory* factory = isolate->factory();
Handle<Code> illegal_builtin = BUILTIN_CODE(isolate, Illegal);
job_->module_env_ =
- CreateDefaultModuleEnv(isolate, module_.get(), illegal_builtin);
+ CreateDefaultModuleEnv(isolate, module_, illegal_builtin);
// The {code_table} array contains import wrappers and functions (which
// are both included in {functions.size()}.
@@ -2181,9 +3133,7 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
}
// Transfer ownership of the {WasmModule} to the {ModuleCompiler}, but
// keep a pointer.
- WasmModule* module = module_.get();
Handle<Code> centry_stub = CEntryStub(isolate, 1).GetCode();
-
{
// Now reopen the handles in a deferred scope in order to use
// them in the concurrent steps.
@@ -2202,13 +3152,12 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
job_->deferred_handles_.push_back(deferred.Detach());
}
- job_->compiler_.reset(
- new ModuleCompiler(isolate, std::move(module_), centry_stub));
+ job_->compiler_.reset(new ModuleCompiler(isolate, module_, centry_stub));
job_->compiler_->EnableThrottling();
- DCHECK_LE(module->num_imported_functions, module->functions.size());
+ DCHECK_LE(module_->num_imported_functions, module_->functions.size());
size_t num_functions =
- module->functions.size() - module->num_imported_functions;
+ module_->functions.size() - module_->num_imported_functions;
if (num_functions == 0) {
// Degenerate case of an empty module.
job_->DoSync<FinishCompile>();
@@ -2222,10 +3171,20 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
V8::GetCurrentPlatform()
->NumberOfAvailableBackgroundThreads())));
- job_->outstanding_units_ = job_->compiler_->InitializeCompilationUnits(
- module->functions, job_->wire_bytes_, job_->module_env_.get());
- job_->DoAsync<ExecuteAndFinishCompilationUnits>(num_background_tasks);
+ if (start_compilation_) {
+ // TODO(ahaas): Try to remove the {start_compilation_} check when
+ // streaming decoding is done in the background. If
+ // InitializeCompilationUnits always returns 0 for streaming compilation,
+ // then DoAsync would do the same as NextStep already.
+ job_->outstanding_units_ = job_->compiler_->InitializeCompilationUnits(
+ module_->functions, job_->wire_bytes_, job_->module_env_.get());
+
+ job_->DoAsync<ExecuteAndFinishCompilationUnits>(num_background_tasks);
+ } else {
+ job_->stopped_tasks_ = num_background_tasks;
+ job_->NextStep<ExecuteAndFinishCompilationUnits>(num_background_tasks);
+ }
}
};
@@ -2253,16 +3212,7 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
break;
}
}
- stopped_tasks_.Increment(1);
- }
-
- void RestartCompilationTasks() {
- size_t num_restarts = stopped_tasks_.Value();
- stopped_tasks_.Decrement(num_restarts);
-
- for (size_t i = 0; i < num_restarts; ++i) {
- job_->StartBackgroundTask();
- }
+ job_->stopped_tasks_.Increment(1);
}
void RunInForeground() override {
@@ -2272,7 +3222,6 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
job_->compiler_->SetFinisherIsRunning(false);
return;
}
- HandleScope scope(job_->isolate_);
ErrorThrower thrower(job_->isolate_, "AsyncCompile");
// We execute for 1 ms and then reschedule the task, same as the GC.
@@ -2280,7 +3229,7 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
while (true) {
if (!finished_ && job_->compiler_->ShouldIncreaseWorkload()) {
- RestartCompilationTasks();
+ job_->RestartBackgroundTasks();
}
int func_index = -1;
@@ -2299,7 +3248,7 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
// FinishCompilationUnits task again.
break;
} else {
- DCHECK(func_index >= 0);
+ DCHECK_LE(0, func_index);
job_->code_table_->set(func_index, *result.ToHandleChecked());
--job_->outstanding_units_;
}
@@ -2323,14 +3272,13 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
if (job_->outstanding_units_ == 0) {
// Make sure all compilation tasks stopped running.
job_->background_task_manager_.CancelAndWait();
- job_->DoSync<FinishCompile>();
+ if (job_->DecrementAndCheckFinisherCount()) job_->DoSync<FinishCompile>();
}
}
private:
std::atomic<bool> failed_{false};
std::atomic<bool> finished_{false};
- base::AtomicNumber<size_t> stopped_tasks_{0};
};
//==========================================================================
@@ -2339,7 +3287,6 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
class AsyncCompileJob::FinishCompile : public CompileStep {
void RunInForeground() override {
TRACE_COMPILE("(5b) Finish compile...\n");
- HandleScope scope(job_->isolate_);
// At this point, compilation has completed. Update the code table.
for (int i = FLAG_skip_compiling_wasm_funcs,
e = job_->code_table_->length();
@@ -2367,8 +3314,8 @@ class AsyncCompileJob::FinishCompile : public CompileStep {
// The {module_wrapper} will take ownership of the {WasmModule} object,
// and it will be destroyed when the GC reclaims the wrapper object.
- Handle<WasmModuleWrapper> module_wrapper = WasmModuleWrapper::New(
- job_->isolate_, job_->compiler_->ReleaseModule().release());
+ Handle<WasmModuleWrapper> module_wrapper =
+ WasmModuleWrapper::From(job_->isolate_, job_->module_.release());
// Create the shared module data.
// TODO(clemensh): For the same module (same bytes / same hash), we should
@@ -2403,10 +3350,11 @@ class AsyncCompileJob::FinishCompile : public CompileStep {
// Step 6 (sync): Compile JS->wasm wrappers.
//==========================================================================
class AsyncCompileJob::CompileWrappers : public CompileStep {
+ // TODO(wasm): Compile all wrappers here, including the start function wrapper
+ // and the wrappers for the function table elements.
void RunInForeground() override {
TRACE_COMPILE("(6) Compile wrappers...\n");
// Compile JS->wasm wrappers for exported functions.
- HandleScope scope(job_->isolate_);
JSToWasmWrapperCache js_to_wasm_cache;
int wrapper_index = 0;
WasmModule* module = job_->compiled_module_->module();
@@ -2432,7 +3380,6 @@ class AsyncCompileJob::CompileWrappers : public CompileStep {
class AsyncCompileJob::FinishModule : public CompileStep {
void RunInForeground() override {
TRACE_COMPILE("(7) Finish module...\n");
- HandleScope scope(job_->isolate_);
Handle<WasmModuleObject> result =
WasmModuleObject::New(job_->isolate_, job_->compiled_module_);
// {job_} is deleted in AsyncCompileSucceeded, therefore the {return}.
@@ -2440,8 +3387,166 @@ class AsyncCompileJob::FinishModule : public CompileStep {
}
};
-#undef TRACE
+class AsyncCompileJob::AbortCompilation : public CompileStep {
+ void RunInForeground() override {
+ TRACE_COMPILE("Abort asynchronous compilation ...\n");
+ job_->isolate_->wasm_compilation_manager()->RemoveJob(job_);
+ }
+};
+
+AsyncStreamingProcessor::AsyncStreamingProcessor(AsyncCompileJob* job)
+ : job_(job), compilation_unit_builder_(nullptr) {}
+
+void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(ResultBase error) {
+ // Make sure all background tasks stopped executing before we change the state
+ // of the AsyncCompileJob to DecodeFail.
+ job_->background_task_manager_.CancelAndWait();
+
+ // Create a ModuleResult from the result we got as parameter. Since there was
+ // no error, we don't have to provide a real wasm module to the ModuleResult.
+ ModuleResult result(nullptr);
+ result.MoveErrorFrom(error);
+
+ // Check if there is already a ModuleCompiler, in which case we have to clean
+ // it up as well.
+ if (job_->compiler_) {
+ // If {IsFinisherRunning} is true, then there is already a foreground task
+ // in the task queue to execute the DecodeFail step. We do not have to start
+ // a new task ourselves with DoSync.
+ if (job_->compiler_->IsFinisherRunning()) {
+ job_->NextStep<AsyncCompileJob::DecodeFail>(std::move(result));
+ } else {
+ job_->DoSync<AsyncCompileJob::DecodeFail>(std::move(result));
+ }
+
+ compilation_unit_builder_->Clear();
+ } else {
+ job_->DoSync<AsyncCompileJob::DecodeFail>(std::move(result));
+ }
+}
+
+// Process the module header.
+bool AsyncStreamingProcessor::ProcessModuleHeader(Vector<const uint8_t> bytes,
+ uint32_t offset) {
+ TRACE_STREAMING("Process module header...\n");
+ decoder_.StartDecoding(job_->isolate());
+ decoder_.DecodeModuleHeader(bytes, offset);
+ if (!decoder_.ok()) {
+ FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false));
+ return false;
+ }
+ return true;
+}
+
+// Process all sections except for the code section.
+bool AsyncStreamingProcessor::ProcessSection(SectionCode section_code,
+ Vector<const uint8_t> bytes,
+ uint32_t offset) {
+ TRACE_STREAMING("Process section %d ...\n", section_code);
+ if (section_code == SectionCode::kUnknownSectionCode) {
+ // No need to decode unknown sections, even the names section. If decoding
+ // of the unknown section fails, compilation should succeed anyways, and
+ // even decoding the names section is unnecessary because the result comes
+ // too late for streaming compilation.
+ return true;
+ }
+ constexpr bool verify_functions = false;
+ decoder_.DecodeSection(section_code, bytes, offset, verify_functions);
+ if (!decoder_.ok()) {
+ FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false));
+ return false;
+ }
+ return true;
+}
+
+// Start the code section.
+bool AsyncStreamingProcessor::ProcessCodeSectionHeader(size_t functions_count,
+ uint32_t offset) {
+ TRACE_STREAMING("Start the code section with %zu functions...\n",
+ functions_count);
+ if (!decoder_.CheckFunctionsCount(static_cast<uint32_t>(functions_count),
+ offset)) {
+ FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false));
+ return false;
+ }
+ job_->NextStep<AsyncCompileJob::PrepareAndStartCompile>(decoder_.module(),
+ false);
+ // Execute the PrepareAndStartCompile step immediately and not in a separate
+ // task. The step expects to be run on a separate foreground thread though, so
+ // we to increment {num_pending_foreground_tasks_} to look like one.
+ ++job_->num_pending_foreground_tasks_;
+ DCHECK_EQ(1, job_->num_pending_foreground_tasks_);
+ constexpr bool on_foreground = true;
+ job_->step_->Run(on_foreground);
+
+ job_->outstanding_units_ = functions_count;
+ // Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the
+ // AsyncStreamingProcessor have to finish.
+ job_->outstanding_finishers_.SetValue(2);
+ next_function_ = decoder_.module()->num_imported_functions +
+ FLAG_skip_compiling_wasm_funcs;
+ compilation_unit_builder_.reset(
+ new ModuleCompiler::CompilationUnitBuilder(job_->compiler_.get()));
+ return true;
+}
+
+// Process a function body.
+bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
+ uint32_t offset) {
+ TRACE_STREAMING("Process function body %d ...\n", next_function_);
+
+ decoder_.DecodeFunctionBody(
+ next_function_, static_cast<uint32_t>(bytes.length()), offset, false);
+ if (next_function_ >= decoder_.module()->num_imported_functions +
+ FLAG_skip_compiling_wasm_funcs) {
+ const WasmFunction* func = &decoder_.module()->functions[next_function_];
+ WasmName name = {nullptr, 0};
+ compilation_unit_builder_->AddUnit(job_->module_env_.get(), func, offset,
+ bytes, name);
+ }
+ ++next_function_;
+ return true;
+}
+
+void AsyncStreamingProcessor::OnFinishedChunk() {
+ // TRACE_STREAMING("FinishChunk...\n");
+ if (compilation_unit_builder_) {
+ compilation_unit_builder_->Commit();
+ job_->RestartBackgroundTasks();
+ }
+}
+
+// Finish the processing of the stream.
+void AsyncStreamingProcessor::OnFinishedStream(std::unique_ptr<uint8_t[]> bytes,
+ size_t length) {
+ TRACE_STREAMING("Finish stream...\n");
+ job_->bytes_copy_ = std::move(bytes);
+ job_->wire_bytes_ = ModuleWireBytes(job_->bytes_copy_.get(),
+ job_->bytes_copy_.get() + length);
+ ModuleResult result = decoder_.FinishDecoding(false);
+ DCHECK(result.ok());
+ job_->module_ = std::move(result.val);
+ if (job_->DecrementAndCheckFinisherCount())
+ job_->DoSync<AsyncCompileJob::FinishCompile>();
+}
+
+// Report an error detected in the StreamingDecoder.
+void AsyncStreamingProcessor::OnError(DecodeResult result) {
+ TRACE_STREAMING("Stream error...\n");
+ FinishAsyncCompileJobWithError(std::move(result));
+}
+
+void AsyncStreamingProcessor::OnAbort() {
+ TRACE_STREAMING("Abort stream...\n");
+ job_->Abort();
+}
} // namespace wasm
} // namespace internal
} // namespace v8
+
+#undef TRACE
+#undef TRACE_COMPILE
+#undef TRACE_STREAMING
+#undef TRACE_CHAIN
+#undef ERROR_THROWER_WITH_MESSAGE
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 084b6833fd..42ea037d03 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -8,311 +8,72 @@
#include <functional>
#include "src/base/atomic-utils.h"
-#include "src/base/utils/random-number-generator.h"
#include "src/cancelable-task.h"
-#include "src/compiler/wasm-compiler.h"
#include "src/isolate.h"
-#include "src/wasm/wasm-code-specialization.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/streaming-decoder.h"
+#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
namespace wasm {
-// A class compiling an entire module.
-class ModuleCompiler {
- public:
- // The ModuleCompiler takes ownership of the {WasmModule}.
- // In {CompileToModuleObject}, it will transfer ownership to the generated
- // {WasmModuleWrapper}. If this method is not called, ownership may be
- // reclaimed by explicitely releasing the {module_} field.
- ModuleCompiler(Isolate* isolate, std::unique_ptr<WasmModule> module,
- Handle<Code> centry_stub);
-
- // The actual runnable task that performs compilations in the background.
- class CompilationTask : public CancelableTask {
- public:
- ModuleCompiler* compiler_;
- explicit CompilationTask(ModuleCompiler* helper);
-
- void RunInternal() override;
- };
-
- // The CompilationUnitBuilder builds compilation units and stores them in an
- // internal buffer. The buffer is moved into the working queue of the
- // ModuleCompiler when {Commit} is called.
- class CompilationUnitBuilder {
- public:
- explicit CompilationUnitBuilder(ModuleCompiler* compiler)
- : compiler_(compiler) {}
-
- ~CompilationUnitBuilder() { DCHECK(units_.empty()); }
-
- void AddUnit(compiler::ModuleEnv* module_env, const WasmFunction* function,
- uint32_t buffer_offset, Vector<const uint8_t> bytes,
- WasmName name) {
- units_.emplace_back(new compiler::WasmCompilationUnit(
- compiler_->isolate_, module_env,
- wasm::FunctionBody{function->sig, buffer_offset, bytes.begin(),
- bytes.end()},
- name, function->func_index, compiler_->centry_stub_,
- compiler_->async_counters()));
- }
-
- void Commit() {
- {
- base::LockGuard<base::Mutex> guard(
- &compiler_->compilation_units_mutex_);
- compiler_->compilation_units_.insert(
- compiler_->compilation_units_.end(),
- std::make_move_iterator(units_.begin()),
- std::make_move_iterator(units_.end()));
- }
- units_.clear();
- }
-
- private:
- ModuleCompiler* compiler_;
- std::vector<std::unique_ptr<compiler::WasmCompilationUnit>> units_;
- };
-
- class CodeGenerationSchedule {
- public:
- explicit CodeGenerationSchedule(
- base::RandomNumberGenerator* random_number_generator,
- size_t max_memory = 0);
-
- void Schedule(std::unique_ptr<compiler::WasmCompilationUnit>&& item);
-
- bool IsEmpty() const { return schedule_.empty(); }
-
- std::unique_ptr<compiler::WasmCompilationUnit> GetNext();
-
- bool CanAcceptWork() const;
-
- bool ShouldIncreaseWorkload() const;
-
- void EnableThrottling() { throttle_ = true; }
-
- private:
- size_t GetRandomIndexInSchedule();
-
- base::RandomNumberGenerator* random_number_generator_ = nullptr;
- std::vector<std::unique_ptr<compiler::WasmCompilationUnit>> schedule_;
- const size_t max_memory_;
- bool throttle_ = false;
- base::AtomicNumber<size_t> allocated_memory_{0};
- };
-
- const std::shared_ptr<Counters>& async_counters() const {
- return async_counters_;
- }
- Counters* counters() const { return async_counters().get(); }
-
- // Run by each compilation task and by the main thread (i.e. in both
- // foreground and background threads). The no_finisher_callback is called
- // within the result_mutex_ lock when no finishing task is running, i.e. when
- // the finisher_is_running_ flag is not set.
- bool FetchAndExecuteCompilationUnit(
- std::function<void()> no_finisher_callback = nullptr);
-
- void OnBackgroundTaskStopped();
-
- void EnableThrottling() { executed_units_.EnableThrottling(); }
-
- bool CanAcceptWork() const { return executed_units_.CanAcceptWork(); }
-
- bool ShouldIncreaseWorkload() const {
- return executed_units_.ShouldIncreaseWorkload();
- }
-
- size_t InitializeCompilationUnits(const std::vector<WasmFunction>& functions,
- const ModuleWireBytes& wire_bytes,
- compiler::ModuleEnv* module_env);
-
- void RestartCompilationTasks();
+class ModuleCompiler;
+
+V8_EXPORT_PRIVATE bool SyncValidate(Isolate* isolate,
+ const ModuleWireBytes& bytes);
+
+V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes);
+
+V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompile(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes);
+
+V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncInstantiate(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+ MaybeHandle<JSArrayBuffer> memory);
+
+V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncCompileAndInstantiate(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory);
+
+V8_EXPORT_PRIVATE void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
+ const ModuleWireBytes& bytes);
+
+V8_EXPORT_PRIVATE void AsyncInstantiate(Isolate* isolate,
+ Handle<JSPromise> promise,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> imports);
+
+// Triggered by the WasmCompileLazy builtin.
+// Walks the stack (top three frames) to determine the wasm instance involved
+// and which function to compile.
+// Then triggers WasmCompiledModule::CompileLazy, taking care of correctly
+// patching the call site or indirect function tables.
+// Returns either the Code object that has been lazily compiled, or Illegal if
+// an error occurred. In the latter case, a pending exception has been set,
+// which will be triggered when returning from the runtime function, i.e. the
+// Illegal builtin will never be called.
+Handle<Code> CompileLazy(Isolate* isolate);
+
+// This class orchestrates the lazy compilation of wasm functions. It is
+// triggered by the WasmCompileLazy builtin.
+// It contains the logic for compiling and specializing wasm functions, and
+// patching the calling wasm code.
+// Once we support concurrent lazy compilation, this class will contain the
+// logic to actually orchestrate parallel execution of wasm compilation jobs.
+// TODO(clemensh): Implement concurrent lazy compilation.
+class LazyCompilationOrchestrator {
+ void CompileFunction(Isolate*, Handle<WasmInstanceObject>, int func_index);
- size_t FinishCompilationUnits(std::vector<Handle<Code>>& results,
- ErrorThrower* thrower);
-
- void SetFinisherIsRunning(bool value);
-
- MaybeHandle<Code> FinishCompilationUnit(ErrorThrower* thrower,
- int* func_index);
-
- void CompileInParallel(const ModuleWireBytes& wire_bytes,
- compiler::ModuleEnv* module_env,
- std::vector<Handle<Code>>& results,
- ErrorThrower* thrower);
-
- void CompileSequentially(const ModuleWireBytes& wire_bytes,
- compiler::ModuleEnv* module_env,
- std::vector<Handle<Code>>& results,
- ErrorThrower* thrower);
-
- void ValidateSequentially(const ModuleWireBytes& wire_bytes,
- compiler::ModuleEnv* module_env,
- ErrorThrower* thrower);
-
- MaybeHandle<WasmModuleObject> CompileToModuleObject(
- ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
- Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes);
-
- std::unique_ptr<WasmModule> ReleaseModule() { return std::move(module_); }
-
- private:
- MaybeHandle<WasmModuleObject> CompileToModuleObjectInternal(
- Isolate* isolate, ErrorThrower* thrower,
- const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes);
-
- Isolate* isolate_;
- std::unique_ptr<WasmModule> module_;
- const std::shared_ptr<Counters> async_counters_;
- std::vector<std::unique_ptr<compiler::WasmCompilationUnit>>
- compilation_units_;
- base::Mutex compilation_units_mutex_;
- CodeGenerationSchedule executed_units_;
- base::Mutex result_mutex_;
- const size_t num_background_tasks_;
- // This flag should only be set while holding result_mutex_.
- bool finisher_is_running_ = false;
- CancelableTaskManager background_task_manager_;
- size_t stopped_compilation_tasks_ = 0;
- base::Mutex tasks_mutex_;
- Handle<Code> centry_stub_;
-};
-
-class JSToWasmWrapperCache {
public:
- Handle<Code> CloneOrCompileJSToWasmWrapper(Isolate* isolate,
- wasm::WasmModule* module,
- Handle<Code> wasm_code,
- uint32_t index);
-
- private:
- // sig_map_ maps signatures to an index in code_cache_.
- wasm::SignatureMap sig_map_;
- std::vector<Handle<Code>> code_cache_;
-};
-
-// A helper class to simplify instantiating a module from a compiled module.
-// It closes over the {Isolate}, the {ErrorThrower}, the {WasmCompiledModule},
-// etc.
-class InstanceBuilder {
- public:
- InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> ffi,
- MaybeHandle<JSArrayBuffer> memory,
- WeakCallbackInfo<void>::Callback instance_finalizer_callback);
-
- // Build an instance, in all of its glory.
- MaybeHandle<WasmInstanceObject> Build();
-
- private:
- // Represents the initialized state of a table.
- struct TableInstance {
- Handle<WasmTableObject> table_object; // WebAssembly.Table instance
- Handle<FixedArray> js_wrappers; // JSFunctions exported
- Handle<FixedArray> function_table; // internal code array
- Handle<FixedArray> signature_table; // internal sig array
- };
-
- // A pre-evaluated value to use in import binding.
- struct SanitizedImport {
- Handle<String> module_name;
- Handle<String> import_name;
- Handle<Object> value;
- };
-
- Isolate* isolate_;
- WasmModule* const module_;
- const std::shared_ptr<Counters> async_counters_;
- ErrorThrower* thrower_;
- Handle<WasmModuleObject> module_object_;
- MaybeHandle<JSReceiver> ffi_;
- MaybeHandle<JSArrayBuffer> memory_;
- Handle<JSArrayBuffer> globals_;
- Handle<WasmCompiledModule> compiled_module_;
- std::vector<TableInstance> table_instances_;
- std::vector<Handle<JSFunction>> js_wrappers_;
- JSToWasmWrapperCache js_to_wasm_cache_;
- WeakCallbackInfo<void>::Callback instance_finalizer_callback_;
- std::vector<SanitizedImport> sanitized_imports_;
-
- const std::shared_ptr<Counters>& async_counters() const {
- return async_counters_;
- }
- Counters* counters() const { return async_counters().get(); }
-
-// Helper routines to print out errors with imports.
-#define ERROR_THROWER_WITH_MESSAGE(TYPE) \
- void Report##TYPE(const char* error, uint32_t index, \
- Handle<String> module_name, Handle<String> import_name) { \
- thrower_->TYPE("Import #%d module=\"%s\" function=\"%s\" error: %s", \
- index, module_name->ToCString().get(), \
- import_name->ToCString().get(), error); \
- } \
- \
- MaybeHandle<Object> Report##TYPE(const char* error, uint32_t index, \
- Handle<String> module_name) { \
- thrower_->TYPE("Import #%d module=\"%s\" error: %s", index, \
- module_name->ToCString().get(), error); \
- return MaybeHandle<Object>(); \
- }
-
- ERROR_THROWER_WITH_MESSAGE(LinkError)
- ERROR_THROWER_WITH_MESSAGE(TypeError)
-
- // Look up an import value in the {ffi_} object.
- MaybeHandle<Object> LookupImport(uint32_t index, Handle<String> module_name,
- Handle<String> import_name);
-
- // Look up an import value in the {ffi_} object specifically for linking an
- // asm.js module. This only performs non-observable lookups, which allows
- // falling back to JavaScript proper (and hence re-executing all lookups) if
- // module instantiation fails.
- MaybeHandle<Object> LookupImportAsm(uint32_t index,
- Handle<String> import_name);
-
- uint32_t EvalUint32InitExpr(const WasmInitExpr& expr);
-
- // Load data segments into the memory.
- void LoadDataSegments(Address mem_addr, size_t mem_size);
-
- void WriteGlobalValue(WasmGlobal& global, Handle<Object> value);
-
- void SanitizeImports();
- // Process the imports, including functions, tables, globals, and memory, in
- // order, loading them from the {ffi_} object. Returns the number of imported
- // functions.
- int ProcessImports(Handle<FixedArray> code_table,
- Handle<WasmInstanceObject> instance);
-
- template <typename T>
- T* GetRawGlobalPtr(WasmGlobal& global);
-
- // Process initialization of globals.
- void InitGlobals();
-
- // Allocate memory for a module instance as a new JSArrayBuffer.
- Handle<JSArrayBuffer> AllocateMemory(uint32_t num_pages);
-
- bool NeedsWrappers() const;
-
- // Process the exports, creating wrappers for functions, tables, memories,
- // and globals.
- void ProcessExports(Handle<WasmInstanceObject> instance,
- Handle<WasmCompiledModule> compiled_module);
-
- void InitializeTables(Handle<WasmInstanceObject> instance,
- CodeSpecialization* code_specialization);
-
- void LoadTableSegments(Handle<FixedArray> code_table,
- Handle<WasmInstanceObject> instance);
+ Handle<Code> CompileLazy(Isolate*, Handle<WasmInstanceObject>,
+ Handle<Code> caller, int call_offset,
+ int exported_func_index, bool patch_caller);
};
// Encapsulates all the state and steps of an asynchronous compilation.
@@ -330,6 +91,10 @@ class AsyncCompileJob {
void Start();
+ std::shared_ptr<StreamingDecoder> CreateStreamingDecoder();
+
+ void Abort();
+
~AsyncCompileJob();
private:
@@ -346,6 +111,41 @@ class AsyncCompileJob {
class FinishCompile;
class CompileWrappers;
class FinishModule;
+ class AbortCompilation;
+
+ const std::shared_ptr<Counters>& async_counters() const {
+ return async_counters_;
+ }
+ Counters* counters() const { return async_counters().get(); }
+
+ void AsyncCompileFailed(ErrorThrower& thrower);
+
+ void AsyncCompileSucceeded(Handle<Object> result);
+
+ void StartForegroundTask();
+
+ void StartBackgroundTask();
+
+ void RestartBackgroundTasks();
+
+ // Switches to the compilation step {Step} and starts a foreground task to
+ // execute it.
+ template <typename Step, typename... Args>
+ void DoSync(Args&&... args);
+
+ // Switches to the compilation step {Step} and starts a background task to
+ // execute it.
+ template <typename Step, typename... Args>
+ void DoAsync(Args&&... args);
+
+ // Switches to the compilation step {Step} but does not start a task to
+ // execute it.
+ template <typename Step, typename... Args>
+ void NextStep(Args&&... args);
+
+ Isolate* isolate() { return isolate_; }
+
+ friend class AsyncStreamingProcessor;
Isolate* isolate_;
const std::shared_ptr<Counters> async_counters_;
@@ -355,6 +155,7 @@ class AsyncCompileJob {
Handle<JSPromise> module_promise_;
std::unique_ptr<ModuleCompiler> compiler_;
std::unique_ptr<compiler::ModuleEnv> module_env_;
+ std::unique_ptr<WasmModule> module_;
std::vector<DeferredHandles*> deferred_handles_;
Handle<WasmModuleObject> module_object_;
@@ -364,29 +165,29 @@ class AsyncCompileJob {
size_t outstanding_units_ = 0;
std::unique_ptr<CompileStep> step_;
CancelableTaskManager background_task_manager_;
-#if DEBUG
- // Counts the number of pending foreground tasks.
- int32_t num_pending_foreground_tasks_ = 0;
-#endif
-
- const std::shared_ptr<Counters>& async_counters() const {
- return async_counters_;
+ // The number of background tasks which stopped executing within a step.
+ base::AtomicNumber<size_t> stopped_tasks_{0};
+
+ // For async compilation the AsyncCompileJob is the only finisher. For
+ // streaming compilation also the AsyncStreamingProcessor has to finish before
+ // compilation can be finished.
+ base::AtomicNumber<int32_t> outstanding_finishers_{1};
+
+ // Decrements the number of outstanding finishers. The last caller of this
+ // function should finish the asynchronous compilation, see the comment on
+ // {outstanding_finishers_}.
+ V8_WARN_UNUSED_RESULT bool DecrementAndCheckFinisherCount() {
+ return outstanding_finishers_.Decrement(1) == 0;
}
- Counters* counters() const { return async_counters().get(); }
-
- void AsyncCompileFailed(ErrorThrower& thrower);
-
- void AsyncCompileSucceeded(Handle<Object> result);
-
- template <typename Task, typename... Args>
- void DoSync(Args&&... args);
- void StartForegroundTask();
-
- void StartBackgroundTask();
+ // Counts the number of pending foreground tasks.
+ int32_t num_pending_foreground_tasks_ = 0;
- template <typename Task, typename... Args>
- void DoAsync(Args&&... args);
+ // The AsyncCompileJob owns the StreamingDecoder because the StreamingDecoder
+ // contains data which is needed by the AsyncCompileJob for streaming
+ // compilation. The AsyncCompileJob does not actively use the
+ // StreamingDecoder.
+ std::shared_ptr<StreamingDecoder> stream_;
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 5280af7374..d7a0156a7b 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -237,11 +237,17 @@ class WasmSectionIterator {
}
};
+} // namespace
+
// The main logic for decoding the bytes of a module.
-class ModuleDecoder : public Decoder {
+class ModuleDecoderImpl : public Decoder {
public:
- ModuleDecoder(const byte* module_start, const byte* module_end,
- ModuleOrigin origin)
+ explicit ModuleDecoderImpl(ModuleOrigin origin)
+ : Decoder(nullptr, nullptr),
+ origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) {}
+
+ ModuleDecoderImpl(const byte* module_start, const byte* module_end,
+ ModuleOrigin origin)
: Decoder(module_start, module_end),
origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) {
if (end_ < start_) {
@@ -265,13 +271,9 @@ class ModuleDecoder : public Decoder {
}
// File are named `HASH.{ok,failed}.wasm`.
size_t hash = base::hash_range(start_, end_);
- char buf[32] = {'\0'};
-#if V8_OS_WIN && _MSC_VER < 1900
-#define snprintf sprintf_s
-#endif
- snprintf(buf, sizeof(buf) - 1, "%016zx.%s.wasm", hash,
- result.ok() ? "ok" : "failed");
- std::string name(buf);
+ EmbeddedVector<char, 32> buf;
+ SNPrintF(buf, "%016zx.%s.wasm", hash, result.ok() ? "ok" : "failed");
+ std::string name(buf.start());
if (FILE* wasm_file = base::OS::FOpen((path + name).c_str(), "wb")) {
if (fwrite(start_, end_ - start_, 1, wasm_file) != 1) {
OFStream os(stderr);
@@ -316,12 +318,16 @@ class ModuleDecoder : public Decoder {
BYTES(kWasmVersion), BYTES(magic_version));
}
}
+#undef BYTES
}
void DecodeSection(SectionCode section_code, Vector<const uint8_t> bytes,
uint32_t offset, bool verify_functions = true) {
if (failed()) return;
Reset(bytes, offset);
+ TRACE("Section: %s\n", SectionName(section_code));
+ TRACE("Decode Section %p - %p\n", static_cast<const void*>(bytes.begin()),
+ static_cast<const void*>(bytes.end()));
// Check if the section is out-of-order.
if (section_code < next_section_) {
@@ -477,7 +483,8 @@ class ModuleDecoder : public Decoder {
consume_resizable_limits(
"memory", "pages", FLAG_wasm_max_mem_pages,
&module_->initial_pages, &module_->has_maximum_pages,
- kSpecMaxWasmMemoryPages, &module_->maximum_pages);
+ kSpecMaxWasmMemoryPages, &module_->maximum_pages,
+ &module_->has_shared_memory);
break;
}
case kExternalGlobal: {
@@ -545,7 +552,7 @@ class ModuleDecoder : public Decoder {
consume_resizable_limits(
"memory", "pages", FLAG_wasm_max_mem_pages, &module_->initial_pages,
&module_->has_maximum_pages, kSpecMaxWasmMemoryPages,
- &module_->maximum_pages);
+ &module_->maximum_pages, &module_->has_shared_memory);
}
}
@@ -561,6 +568,7 @@ class ModuleDecoder : public Decoder {
WasmGlobal* global = &module_->globals.back();
DecodeGlobalInModule(module_.get(), i + imported_globals, global);
}
+ if (ok()) CalculateGlobalOffsets(module_.get());
}
void DecodeExportSection() {
@@ -701,26 +709,42 @@ class ModuleDecoder : public Decoder {
}
void DecodeCodeSection(bool verify_functions) {
- const byte* pos = pc_;
+ uint32_t pos = pc_offset();
uint32_t functions_count = consume_u32v("functions count");
- if (functions_count != module_->num_declared_functions) {
- errorf(pos, "function body count %u mismatch (%u expected)",
- functions_count, module_->num_declared_functions);
- }
- for (uint32_t i = 0; i < functions_count; ++i) {
+ CheckFunctionsCount(functions_count, pos);
+ for (uint32_t i = 0; ok() && i < functions_count; ++i) {
uint32_t size = consume_u32v("body size");
uint32_t offset = pc_offset();
consume_bytes(size, "function body");
if (failed()) break;
- WasmFunction* function =
- &module_->functions[i + module_->num_imported_functions];
- function->code = {offset, size};
- if (verify_functions) {
- ModuleWireBytes bytes(start_, end_);
- VerifyFunctionBody(module_->signature_zone->allocator(),
- i + module_->num_imported_functions, bytes,
- module_.get(), function);
- }
+ DecodeFunctionBody(i, size, offset, verify_functions);
+ }
+ }
+
+ bool CheckFunctionsCount(uint32_t functions_count, uint32_t offset) {
+ if (functions_count != module_->num_declared_functions) {
+ Reset(nullptr, nullptr, offset);
+ errorf(nullptr, "function body count %u mismatch (%u expected)",
+ functions_count, module_->num_declared_functions);
+ return false;
+ }
+ return true;
+ }
+
+ void DecodeFunctionBody(uint32_t index, uint32_t length, uint32_t offset,
+ bool verify_functions) {
+ auto size_histogram = module_->is_wasm()
+ ? GetCounters()->wasm_wasm_function_size_bytes()
+ : GetCounters()->wasm_asm_function_size_bytes();
+ size_histogram->AddSample(length);
+ WasmFunction* function =
+ &module_->functions[index + module_->num_imported_functions];
+ function->code = {offset, length};
+ if (verify_functions) {
+ ModuleWireBytes bytes(start_, end_);
+ VerifyFunctionBody(module_->signature_zone->allocator(),
+ index + module_->num_imported_functions, bytes,
+ module_.get(), function);
}
}
@@ -881,6 +905,8 @@ class ModuleDecoder : public Decoder {
return consume_init_expr(nullptr, kWasmStmt);
}
+ WasmModule* module() { return module_.get(); }
+
bool IsWasm() { return origin_ == kWasmOrigin; }
Counters* GetCounters() {
@@ -902,7 +928,7 @@ class ModuleDecoder : public Decoder {
// We store next_section_ as uint8_t instead of SectionCode so that we can
// increment it. This static_assert should make sure that SectionCode does not
// get bigger than uint8_t accidentially.
- static_assert(sizeof(ModuleDecoder::next_section_) == sizeof(SectionCode),
+ static_assert(sizeof(ModuleDecoderImpl::next_section_) == sizeof(SectionCode),
"type mismatch");
Result<bool> intermediate_result_;
ModuleOrigin origin_;
@@ -1085,9 +1111,30 @@ class ModuleDecoder : public Decoder {
void consume_resizable_limits(const char* name, const char* units,
uint32_t max_initial, uint32_t* initial,
bool* has_max, uint32_t max_maximum,
- uint32_t* maximum) {
- uint32_t flags = consume_u32v("resizable limits flags");
+ uint32_t* maximum,
+ bool* has_shared_memory = nullptr) {
+ uint8_t flags = consume_u8("resizable limits flags");
const byte* pos = pc();
+
+ if (FLAG_experimental_wasm_threads) {
+ bool is_memory = (strcmp(name, "memory") == 0);
+ if (flags & 0xfc || (!is_memory && (flags & 0xfe))) {
+ errorf(pos - 1, "invalid %s limits flags", name);
+ }
+ if (flags == 3) {
+ DCHECK_NOT_NULL(has_shared_memory);
+ *has_shared_memory = true;
+ } else if (flags == 2) {
+ errorf(pos - 1,
+ "%s limits flags should have maximum defined if shared is true",
+ name);
+ }
+ } else {
+ if (flags & 0xfe) {
+ errorf(pos - 1, "invalid %s limits flags", name);
+ }
+ }
+
*initial = consume_u32v("initial size");
*has_max = false;
if (*initial > max_initial) {
@@ -1301,7 +1348,7 @@ ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
size_counter->AddSample(static_cast<int>(size));
// Signatures are stored in zone memory, which have the same lifetime
// as the {module}.
- ModuleDecoder decoder(module_start, module_end, origin);
+ ModuleDecoderImpl decoder(module_start, module_end, origin);
ModuleResult result = decoder.DecodeModule(isolate, verify_functions);
// TODO(bradnelson): Improve histogram handling of size_t.
// TODO(titzer): this isn't accurate, since it doesn't count the data
@@ -1318,7 +1365,43 @@ ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
return result;
}
-} // namespace
+ModuleDecoder::ModuleDecoder() = default;
+ModuleDecoder::~ModuleDecoder() = default;
+
+WasmModule* ModuleDecoder::module() const { return impl_->module(); }
+
+void ModuleDecoder::StartDecoding(Isolate* isolate, ModuleOrigin origin) {
+ DCHECK_NULL(impl_);
+ impl_.reset(new ModuleDecoderImpl(origin));
+ impl_->StartDecoding(isolate);
+}
+
+void ModuleDecoder::DecodeModuleHeader(Vector<const uint8_t> bytes,
+ uint32_t offset) {
+ impl_->DecodeModuleHeader(bytes, offset);
+}
+
+void ModuleDecoder::DecodeSection(SectionCode section_code,
+ Vector<const uint8_t> bytes, uint32_t offset,
+ bool verify_functions) {
+ impl_->DecodeSection(section_code, bytes, offset, verify_functions);
+}
+
+void ModuleDecoder::DecodeFunctionBody(uint32_t index, uint32_t length,
+ uint32_t offset, bool verify_functions) {
+ impl_->DecodeFunctionBody(index, length, offset, verify_functions);
+}
+
+bool ModuleDecoder::CheckFunctionsCount(uint32_t functions_count,
+ uint32_t offset) {
+ return impl_->CheckFunctionsCount(functions_count, offset);
+}
+
+ModuleResult ModuleDecoder::FinishDecoding(bool verify_functions) {
+ return impl_->FinishDecoding(verify_functions);
+}
+
+bool ModuleDecoder::ok() { return impl_->ok(); }
ModuleResult SyncDecodeWasmModule(Isolate* isolate, const byte* module_start,
const byte* module_end, bool verify_functions,
@@ -1337,13 +1420,13 @@ ModuleResult AsyncDecodeWasmModule(
FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
const byte* end) {
- ModuleDecoder decoder(start, end, kWasmOrigin);
+ ModuleDecoderImpl decoder(start, end, kWasmOrigin);
return decoder.DecodeFunctionSignature(zone, start);
}
WasmInitExpr DecodeWasmInitExprForTesting(const byte* start, const byte* end) {
AccountingAllocator allocator;
- ModuleDecoder decoder(start, end, kWasmOrigin);
+ ModuleDecoderImpl decoder(start, end, kWasmOrigin);
return decoder.DecodeInitExpr(start);
}
@@ -1358,9 +1441,14 @@ FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
size_t size = function_end - function_start;
if (function_start > function_end)
return FunctionResult::Error("start > end");
+ auto size_histogram = module->is_wasm()
+ ? counters->wasm_wasm_function_size_bytes()
+ : counters->wasm_asm_function_size_bytes();
+ // TODO(bradnelson): Improve histogram handling of ptrdiff_t.
+ size_histogram->AddSample(static_cast<int>(size));
if (size > kV8MaxWasmFunctionSize)
return FunctionResult::Error("size > maximum function size: %zu", size);
- ModuleDecoder decoder(function_start, function_end, kWasmOrigin);
+ ModuleDecoderImpl decoder(function_start, function_end, kWasmOrigin);
decoder.SetCounters(counters);
return decoder.DecodeSingleFunction(zone, wire_bytes, module,
base::make_unique<WasmFunction>());
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 5239c95e68..b6cd869ae7 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -19,13 +19,18 @@ struct ModuleEnv;
namespace wasm {
-const uint32_t kWasmMagic = 0x6d736100;
-const uint32_t kWasmVersion = 0x01;
const uint8_t kWasmFunctionTypeForm = 0x60;
const uint8_t kWasmAnyFunctionTypeForm = 0x70;
const uint8_t kResizableMaximumFlag = 1;
const uint8_t kNoMaximumFlag = 0;
+enum MemoryFlags : uint8_t {
+ kNoMaximum = 0,
+ kMaximum = 1,
+ kSharedNoMaximum = 2,
+ kSharedAndMaximum = 3
+};
+
enum SectionCode : int8_t {
kUnknownSectionCode = 0, // code for unknown sections
kTypeSectionCode = 1, // Function signature declarations
@@ -143,6 +148,36 @@ AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* module_start,
void DecodeLocalNames(const byte* module_start, const byte* module_end,
LocalNames* result);
+class ModuleDecoderImpl;
+
+class ModuleDecoder {
+ public:
+ ModuleDecoder();
+ ~ModuleDecoder();
+
+ void StartDecoding(Isolate* isolate,
+ ModuleOrigin origin = ModuleOrigin::kWasmOrigin);
+
+ void DecodeModuleHeader(Vector<const uint8_t> bytes, uint32_t offset);
+
+ void DecodeSection(SectionCode section_code, Vector<const uint8_t> bytes,
+ uint32_t offset, bool verify_functions = true);
+
+ bool CheckFunctionsCount(uint32_t functions_count, uint32_t offset);
+
+ void DecodeFunctionBody(uint32_t index, uint32_t size, uint32_t offset,
+ bool verify_functions = true);
+
+ ModuleResult FinishDecoding(bool verify_functions = true);
+
+ WasmModule* module() const;
+
+ bool ok();
+
+ private:
+ std::unique_ptr<ModuleDecoderImpl> impl_;
+};
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index c0c51c9d76..b48d11c902 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -15,31 +15,25 @@
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
-
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic pop
-#endif
+namespace v8 {
+namespace internal {
+namespace wasm {
void StreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
size_t current = 0;
- while (decoder()->ok() && current < bytes.size()) {
+ while (ok() && current < bytes.size()) {
size_t num_bytes =
state_->ReadBytes(this, bytes.SubVector(current, bytes.size()));
current += num_bytes;
+ module_offset_ += num_bytes;
if (state_->is_finished()) {
state_ = state_->Next(this);
}
}
total_size_ += bytes.size();
+ if (ok()) {
+ processor_->OnFinishedChunk();
+ }
}
size_t StreamingDecoder::DecodingState::ReadBytes(StreamingDecoder* streaming,
@@ -50,13 +44,36 @@ size_t StreamingDecoder::DecodingState::ReadBytes(StreamingDecoder* streaming,
return num_bytes;
}
-MaybeHandle<WasmModuleObject> StreamingDecoder::Finish() {
- UNIMPLEMENTED();
- return Handle<WasmModuleObject>::null();
+void StreamingDecoder::Finish() {
+ if (!ok()) {
+ return;
+ }
+
+ if (!state_->is_finishing_allowed()) {
+ // The byte stream ended too early, we report an error.
+ Error("unexpected end of stream");
+ return;
+ }
+
+ std::unique_ptr<uint8_t[]> bytes(new uint8_t[total_size_]);
+ uint8_t* cursor = bytes.get();
+ {
+#define BYTES(x) (x & 0xff), (x >> 8) & 0xff, (x >> 16) & 0xff, (x >> 24) & 0xff
+ uint8_t module_header[]{BYTES(kWasmMagic), BYTES(kWasmVersion)};
+#undef BYTES
+ memcpy(cursor, module_header, arraysize(module_header));
+ cursor += arraysize(module_header);
+ }
+ for (auto&& buffer : section_buffers_) {
+ DCHECK_LE(cursor - bytes.get() + buffer->length(), total_size_);
+ memcpy(cursor, buffer->bytes(), buffer->length());
+ cursor += buffer->length();
+ }
+ processor_->OnFinishedStream(std::move(bytes), total_size_);
}
-bool StreamingDecoder::FinishForTesting() {
- return decoder_.ok() && state_->is_finishing_allowed();
+void StreamingDecoder::Abort() {
+ if (ok()) processor_->OnAbort();
}
// An abstract class to share code among the states which decode VarInts. This
@@ -64,7 +81,8 @@ bool StreamingDecoder::FinishForTesting() {
// code with the decoded value.
class StreamingDecoder::DecodeVarInt32 : public DecodingState {
public:
- explicit DecodeVarInt32(size_t max_value) : max_value_(max_value) {}
+ explicit DecodeVarInt32(size_t max_value, const char* field_name)
+ : max_value_(max_value), field_name_(field_name) {}
uint8_t* buffer() override { return byte_buffer_; }
size_t size() const override { return kMaxVarInt32Size; }
@@ -84,6 +102,7 @@ class StreamingDecoder::DecodeVarInt32 : public DecodingState {
// The maximum valid value decoded in this state. {Next} returns an error if
// this value is exceeded.
size_t max_value_;
+ const char* field_name_;
size_t value_ = 0;
size_t bytes_needed_ = 0;
};
@@ -106,30 +125,43 @@ class StreamingDecoder::DecodeModuleHeader : public DecodingState {
class StreamingDecoder::DecodeSectionID : public DecodingState {
public:
+ explicit DecodeSectionID(uint32_t module_offset)
+ : module_offset_(module_offset) {}
+
size_t size() const override { return 1; }
uint8_t* buffer() override { return &id_; }
bool is_finishing_allowed() const override { return true; }
uint8_t id() const { return id_; }
+ uint32_t module_offset() const { return module_offset_; }
+
std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
private:
uint8_t id_ = 0;
+ // The start offset of this section in the module.
+ uint32_t module_offset_;
};
class StreamingDecoder::DecodeSectionLength : public DecodeVarInt32 {
public:
- explicit DecodeSectionLength(uint8_t id)
- : DecodeVarInt32(kV8MaxWasmModuleSize), section_id_(id) {}
+ explicit DecodeSectionLength(uint8_t id, uint32_t module_offset)
+ : DecodeVarInt32(kV8MaxWasmModuleSize, "section length"),
+ section_id_(id),
+ module_offset_(module_offset) {}
uint8_t section_id() const { return section_id_; }
+ uint32_t module_offset() const { return module_offset_; }
+
std::unique_ptr<DecodingState> NextWithValue(
StreamingDecoder* streaming) override;
private:
uint8_t section_id_;
+ // The start offset of this section in the module.
+ uint32_t module_offset_;
};
class StreamingDecoder::DecodeSectionPayload : public DecodingState {
@@ -144,6 +176,8 @@ class StreamingDecoder::DecodeSectionPayload : public DecodingState {
std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
+ SectionBuffer* section_buffer() const { return section_buffer_; }
+
private:
SectionBuffer* section_buffer_;
};
@@ -151,7 +185,8 @@ class StreamingDecoder::DecodeSectionPayload : public DecodingState {
class StreamingDecoder::DecodeNumberOfFunctions : public DecodeVarInt32 {
public:
explicit DecodeNumberOfFunctions(SectionBuffer* section_buffer)
- : DecodeVarInt32(kV8MaxWasmFunctions), section_buffer_(section_buffer) {}
+ : DecodeVarInt32(kV8MaxWasmFunctions, "functions count"),
+ section_buffer_(section_buffer) {}
SectionBuffer* section_buffer() const { return section_buffer_; }
@@ -167,7 +202,7 @@ class StreamingDecoder::DecodeFunctionLength : public DecodeVarInt32 {
explicit DecodeFunctionLength(SectionBuffer* section_buffer,
size_t buffer_offset,
size_t num_remaining_functions)
- : DecodeVarInt32(kV8MaxWasmFunctionSize),
+ : DecodeVarInt32(kV8MaxWasmFunctionSize, "body size"),
section_buffer_(section_buffer),
buffer_offset_(buffer_offset),
// We are reading a new function, so one function less is remaining.
@@ -192,18 +227,21 @@ class StreamingDecoder::DecodeFunctionBody : public DecodingState {
public:
explicit DecodeFunctionBody(SectionBuffer* section_buffer,
size_t buffer_offset, size_t function_length,
- size_t num_remaining_functions)
+ size_t num_remaining_functions,
+ uint32_t module_offset)
: section_buffer_(section_buffer),
buffer_offset_(buffer_offset),
size_(function_length),
- num_remaining_functions_(num_remaining_functions) {}
+ num_remaining_functions_(num_remaining_functions),
+ module_offset_(module_offset) {}
+ size_t buffer_offset() const { return buffer_offset_; }
size_t size() const override { return size_; }
uint8_t* buffer() override {
return section_buffer_->bytes() + buffer_offset_;
}
size_t num_remaining_functions() const { return num_remaining_functions_; }
- size_t buffer_offset() const { return buffer_offset_; }
+ uint32_t module_offset() const { return module_offset_; }
SectionBuffer* section_buffer() const { return section_buffer_; }
std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
@@ -213,23 +251,24 @@ class StreamingDecoder::DecodeFunctionBody : public DecodingState {
size_t buffer_offset_;
size_t size_;
size_t num_remaining_functions_;
+ uint32_t module_offset_;
};
size_t StreamingDecoder::DecodeVarInt32::ReadBytes(
StreamingDecoder* streaming, Vector<const uint8_t> bytes) {
size_t bytes_read = std::min(bytes.size(), remaining());
memcpy(buffer() + offset(), &bytes.first(), bytes_read);
- streaming->decoder()->Reset(buffer(), buffer() + offset() + bytes_read);
- value_ = streaming->decoder()->consume_i32v();
+ Decoder decoder(buffer(), buffer() + offset() + bytes_read,
+ streaming->module_offset());
+ value_ = decoder.consume_u32v(field_name_);
// The number of bytes we actually needed to read.
- DCHECK_GT(streaming->decoder()->pc(), buffer());
- bytes_needed_ = static_cast<size_t>(streaming->decoder()->pc() - buffer());
-
- if (streaming->decoder()->failed()) {
- if (offset() + bytes_read < size()) {
- // We did not decode a full buffer, so we ignore errors. Maybe the
- // decoding will succeed when we have more bytes.
- streaming->decoder()->Reset(nullptr, nullptr);
+ DCHECK_GT(decoder.pc(), buffer());
+ bytes_needed_ = static_cast<size_t>(decoder.pc() - buffer());
+
+ if (decoder.failed()) {
+ if (offset() + bytes_read == size()) {
+ // We only report an error if we read all bytes.
+ streaming->Error(decoder.toResult(nullptr));
}
set_offset(offset() + bytes_read);
return bytes_read;
@@ -244,72 +283,70 @@ size_t StreamingDecoder::DecodeVarInt32::ReadBytes(
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeVarInt32::Next(StreamingDecoder* streaming) {
- if (streaming->decoder()->failed()) {
+ if (!streaming->ok()) {
return nullptr;
}
if (value() > max_value_) {
- streaming->decoder()->errorf(buffer(), "size > maximum function size: %zu",
- value());
- return nullptr;
+ std::ostringstream oss;
+ oss << "function size > maximum function size: " << value() << " < "
+ << max_value_;
+ return streaming->Error(oss.str());
}
return NextWithValue(streaming);
}
-#define BYTES(x) (x & 0xff), (x >> 8) & 0xff, (x >> 16) & 0xff, (x >> 24) & 0xff
-// Decode the module header. The error state of the decoder stores the result.
-void StreamingDecoder::DecodeModuleHeader::CheckHeader(Decoder* decoder) {
- // TODO(ahaas): Share code with the module-decoder.
- decoder->Reset(buffer(), buffer() + size());
- uint32_t magic_word = decoder->consume_u32("wasm magic");
- if (magic_word != kWasmMagic) {
- decoder->errorf(buffer(),
- "expected magic word %02x %02x %02x %02x, "
- "found %02x %02x %02x %02x",
- BYTES(kWasmMagic), BYTES(magic_word));
- }
- uint32_t magic_version = decoder->consume_u32("wasm version");
- if (magic_version != kWasmVersion) {
- decoder->errorf(buffer(),
- "expected version %02x %02x %02x %02x, "
- "found %02x %02x %02x %02x",
- BYTES(kWasmVersion), BYTES(magic_version));
- }
-}
-#undef BYTES
-
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeModuleHeader::Next(StreamingDecoder* streaming) {
- CheckHeader(streaming->decoder());
- return base::make_unique<DecodeSectionID>();
+ streaming->ProcessModuleHeader();
+ if (streaming->ok()) {
+ return base::make_unique<DecodeSectionID>(streaming->module_offset());
+ }
+ return nullptr;
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionID::Next(StreamingDecoder* streaming) {
- return base::make_unique<DecodeSectionLength>(id());
+ return base::make_unique<DecodeSectionLength>(id(), module_offset());
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionLength::NextWithValue(
StreamingDecoder* streaming) {
SectionBuffer* buf = streaming->CreateNewBuffer(
- section_id(), value(),
+ module_offset(), section_id(), value(),
Vector<const uint8_t>(buffer(), static_cast<int>(bytes_needed())));
+ if (!buf) return nullptr;
if (value() == 0) {
- // There is no payload, we go to the next section immediately.
- return base::make_unique<DecodeSectionID>();
- } else if (section_id() == SectionCode::kCodeSectionCode) {
- // We reached the code section. All functions of the code section are put
- // into the same SectionBuffer.
- return base::make_unique<DecodeNumberOfFunctions>(buf);
+ if (section_id() == SectionCode::kCodeSectionCode) {
+ return streaming->Error("Code section cannot have size 0");
+ } else {
+ streaming->ProcessSection(buf);
+ if (streaming->ok()) {
+ // There is no payload, we go to the next section immediately.
+ return base::make_unique<DecodeSectionID>(streaming->module_offset());
+ } else {
+ return nullptr;
+ }
+ }
} else {
- return base::make_unique<DecodeSectionPayload>(buf);
+ if (section_id() == SectionCode::kCodeSectionCode) {
+ // We reached the code section. All functions of the code section are put
+ // into the same SectionBuffer.
+ return base::make_unique<DecodeNumberOfFunctions>(buf);
+ } else {
+ return base::make_unique<DecodeSectionPayload>(buf);
+ }
}
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionPayload::Next(StreamingDecoder* streaming) {
- return base::make_unique<DecodeSectionID>();
+ streaming->ProcessSection(section_buffer());
+ if (streaming->ok()) {
+ return base::make_unique<DecodeSectionID>(streaming->module_offset());
+ }
+ return nullptr;
}
std::unique_ptr<StreamingDecoder::DecodingState>
@@ -320,17 +357,18 @@ StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
memcpy(section_buffer_->bytes() + section_buffer_->payload_offset(),
buffer(), bytes_needed());
} else {
- streaming->decoder()->error("Invalid code section length");
- return base::make_unique<DecodeSectionID>();
+ return streaming->Error("Invalid code section length");
}
// {value} is the number of functions.
if (value() > 0) {
+ streaming->StartCodeSection(value());
+ if (!streaming->ok()) return nullptr;
return base::make_unique<DecodeFunctionLength>(
section_buffer(), section_buffer()->payload_offset() + bytes_needed(),
value());
} else {
- return base::make_unique<DecodeSectionID>();
+ return base::make_unique<DecodeSectionID>(streaming->module_offset());
}
}
@@ -341,49 +379,47 @@ StreamingDecoder::DecodeFunctionLength::NextWithValue(
if (section_buffer_->length() >= buffer_offset_ + bytes_needed()) {
memcpy(section_buffer_->bytes() + buffer_offset_, buffer(), bytes_needed());
} else {
- streaming->decoder()->error("Invalid code section length");
- return base::make_unique<DecodeSectionID>();
+ return streaming->Error("Invalid code section length");
}
// {value} is the length of the function.
if (value() == 0) {
- streaming->decoder()->errorf(buffer(), "Invalid function length (0)");
- return nullptr;
+ return streaming->Error("Invalid function length (0)");
} else if (buffer_offset() + bytes_needed() + value() >
section_buffer()->length()) {
- streaming->decoder()->errorf(buffer(), "not enough code section bytes");
+ streaming->Error("not enough code section bytes");
return nullptr;
}
return base::make_unique<DecodeFunctionBody>(
section_buffer(), buffer_offset() + bytes_needed(), value(),
- num_remaining_functions());
+ num_remaining_functions(), streaming->module_offset());
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeFunctionBody::Next(StreamingDecoder* streaming) {
- // TODO(ahaas): Start compilation of the function here.
+ streaming->ProcessFunctionBody(
+ Vector<const uint8_t>(buffer(), static_cast<int>(size())),
+ module_offset());
+ if (!streaming->ok()) {
+ return nullptr;
+ }
if (num_remaining_functions() != 0) {
return base::make_unique<DecodeFunctionLength>(
section_buffer(), buffer_offset() + size(), num_remaining_functions());
} else {
if (buffer_offset() + size() != section_buffer()->length()) {
- streaming->decoder()->Reset(
- section_buffer()->bytes(),
- section_buffer()->bytes() + section_buffer()->length());
- streaming->decoder()->errorf(
- section_buffer()->bytes() + buffer_offset() + size(),
- "not all code section bytes were used");
- return nullptr;
+ return streaming->Error("not all code section bytes were used");
}
- return base::make_unique<DecodeSectionID>();
+ return base::make_unique<DecodeSectionID>(streaming->module_offset());
}
}
-StreamingDecoder::StreamingDecoder(Isolate* isolate)
- : isolate_(isolate),
+StreamingDecoder::StreamingDecoder(
+ std::unique_ptr<StreamingProcessor> processor)
+ : processor_(std::move(processor)),
// A module always starts with a module header.
- state_(new DecodeModuleHeader()),
- decoder_(nullptr, nullptr) {
- USE(isolate_);
-}
+ state_(new DecodeModuleHeader()) {}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/streaming-decoder.h b/deps/v8/src/wasm/streaming-decoder.h
index 349e013e6c..2bf5f625d5 100644
--- a/deps/v8/src/wasm/streaming-decoder.h
+++ b/deps/v8/src/wasm/streaming-decoder.h
@@ -7,30 +7,72 @@
#include <vector>
#include "src/isolate.h"
-#include "src/wasm/decoder.h"
+#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
namespace wasm {
+// This class is an interface for the StreamingDecoder to start the processing
+// of the incoming module bytes.
+class V8_EXPORT_PRIVATE StreamingProcessor {
+ public:
+ virtual ~StreamingProcessor() = default;
+ // Process the first 8 bytes of a WebAssembly module. Returns true if the
+ // processing finished successfully and the decoding should continue.
+ virtual bool ProcessModuleHeader(Vector<const uint8_t> bytes,
+ uint32_t offset) = 0;
+
+ // Process all sections but the code section. Returns true if the processing
+ // finished successfully and the decoding should continue.
+ virtual bool ProcessSection(SectionCode section_code,
+ Vector<const uint8_t> bytes, uint32_t offset) = 0;
+
+ // Process the start of the code section. Returns true if the processing
+ // finished successfully and the decoding should continue.
+ virtual bool ProcessCodeSectionHeader(size_t num_functions,
+ uint32_t offset) = 0;
+
+ // Process a function body. Returns true if the processing finished
+ // successfully and the decoding should continue.
+ virtual bool ProcessFunctionBody(Vector<const uint8_t> bytes,
+ uint32_t offset) = 0;
+
+ // Report the end of a chunk.
+ virtual void OnFinishedChunk() = 0;
+ // Report the end of the stream. If the stream was successful, all
+ // received bytes are passed by parameter. If there has been an error, an
+ // empty array is passed.
+ virtual void OnFinishedStream(std::unique_ptr<uint8_t[]> bytes,
+ size_t length) = 0;
+ // Report an error detected in the StreamingDecoder.
+ virtual void OnError(DecodeResult result) = 0;
+ // Report the abortion of the stream.
+ virtual void OnAbort() = 0;
+};
+
// The StreamingDecoder takes a sequence of byte arrays, each received by a call
// of {OnBytesReceived}, and extracts the bytes which belong to section payloads
// and function bodies.
class V8_EXPORT_PRIVATE StreamingDecoder {
public:
- explicit StreamingDecoder(Isolate* isolate);
+ explicit StreamingDecoder(std::unique_ptr<StreamingProcessor> processor);
// The buffer passed into OnBytesReceived is owned by the caller.
void OnBytesReceived(Vector<const uint8_t> bytes);
- // Finishes the stream and returns compiled WasmModuleObject.
- MaybeHandle<WasmModuleObject> Finish();
+ void Finish();
- // Finishes the streaming and returns true if no error was detected.
- bool FinishForTesting();
+ void Abort();
+
+ // Notify the StreamingDecoder that there has been an compilation error.
+ void NotifyError() { ok_ = false; }
private:
+ // TODO(ahaas): Put the whole private state of the StreamingDecoder into the
+ // cc file (PIMPL design pattern).
+
// The SectionBuffer is the data object for the content of a single section.
// It stores all bytes of the section (including section id and section
// length), and the offset where the actual payload starts.
@@ -39,21 +81,33 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
// id: The section id.
// payload_length: The length of the payload.
// length_bytes: The section length, as it is encoded in the module bytes.
- SectionBuffer(uint8_t id, size_t payload_length,
+ SectionBuffer(uint32_t module_offset, uint8_t id, size_t payload_length,
Vector<const uint8_t> length_bytes)
: // ID + length + payload
+ module_offset_(module_offset),
length_(1 + length_bytes.length() + payload_length),
bytes_(new uint8_t[length_]),
payload_offset_(1 + length_bytes.length()) {
bytes_[0] = id;
memcpy(bytes_.get() + 1, &length_bytes.first(), length_bytes.length());
}
+
+ SectionCode section_code() const {
+ return static_cast<SectionCode>(bytes_[0]);
+ }
+
+ uint32_t module_offset() const { return module_offset_; }
uint8_t* bytes() const { return bytes_.get(); }
size_t length() const { return length_; }
size_t payload_offset() const { return payload_offset_; }
size_t payload_length() const { return length_ - payload_offset_; }
+ Vector<const uint8_t> payload() const {
+ return Vector<const uint8_t>(bytes() + payload_offset(),
+ payload_length());
+ }
private:
+ uint32_t module_offset_;
size_t length_;
std::unique_ptr<uint8_t[]> bytes_;
size_t payload_offset_;
@@ -127,20 +181,75 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
class DecodeFunctionBody;
// Creates a buffer for the next section of the module.
- SectionBuffer* CreateNewBuffer(uint8_t id, size_t length,
+ SectionBuffer* CreateNewBuffer(uint32_t module_offset, uint8_t id,
+ size_t length,
Vector<const uint8_t> length_bytes) {
- section_buffers_.emplace_back(new SectionBuffer(id, length, length_bytes));
+ // Check the order of sections. Unknown sections can appear at any position.
+ if (id != kUnknownSectionCode) {
+ if (id < next_section_id_) {
+ Error("Unexpected section");
+ return nullptr;
+ }
+ next_section_id_ = id + 1;
+ }
+ section_buffers_.emplace_back(
+ new SectionBuffer(module_offset, id, length, length_bytes));
return section_buffers_.back().get();
}
- Decoder* decoder() { return &decoder_; }
+ std::unique_ptr<DecodingState> Error(DecodeResult result) {
+ if (ok_) processor_->OnError(std::move(result));
+ ok_ = false;
+ return std::unique_ptr<DecodingState>(nullptr);
+ }
+
+ std::unique_ptr<DecodingState> Error(std::string message) {
+ DecodeResult result(nullptr);
+ result.error(module_offset_ - 1, std::move(message));
+ return Error(std::move(result));
+ }
+
+ void ProcessModuleHeader() {
+ if (!ok_) return;
+ ok_ &= processor_->ProcessModuleHeader(
+ Vector<const uint8_t>(state_->buffer(),
+ static_cast<int>(state_->size())),
+ 0);
+ }
+
+ void ProcessSection(SectionBuffer* buffer) {
+ if (!ok_) return;
+ ok_ &= processor_->ProcessSection(
+ buffer->section_code(), buffer->payload(),
+ buffer->module_offset() +
+ static_cast<uint32_t>(buffer->payload_offset()));
+ }
+
+ void StartCodeSection(size_t num_functions) {
+ if (!ok_) return;
+ // The offset passed to {ProcessCodeSectionHeader} is an error offset and
+ // not the start offset of a buffer. Therefore we need the -1 here.
+ ok_ &= processor_->ProcessCodeSectionHeader(num_functions,
+ module_offset() - 1);
+ }
+
+ void ProcessFunctionBody(Vector<const uint8_t> bytes,
+ uint32_t module_offset) {
+ if (!ok_) return;
+ ok_ &= processor_->ProcessFunctionBody(bytes, module_offset);
+ }
+
+ bool ok() const { return ok_; }
+
+ uint32_t module_offset() const { return module_offset_; }
- Isolate* isolate_;
+ std::unique_ptr<StreamingProcessor> processor_;
+ bool ok_ = true;
std::unique_ptr<DecodingState> state_;
- // The decoder is an instance variable because we use it for error handling.
- Decoder decoder_;
std::vector<std::unique_ptr<SectionBuffer>> section_buffers_;
+ uint32_t module_offset_ = 0;
size_t total_size_ = 0;
+ uint8_t next_section_id_ = kFirstSectionInModule;
DISALLOW_COPY_AND_ASSIGN(StreamingDecoder);
};
diff --git a/deps/v8/src/wasm/wasm-code-specialization.cc b/deps/v8/src/wasm/wasm-code-specialization.cc
index 52e565c0a7..33db8bb7d2 100644
--- a/deps/v8/src/wasm/wasm-code-specialization.cc
+++ b/deps/v8/src/wasm/wasm-code-specialization.cc
@@ -9,20 +9,9 @@
#include "src/source-position-table.h"
#include "src/wasm/decoder.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-opcodes.h"
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
-
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-
-#if __clang__
-#pragma clang diagnostic pop
-#endif
-
namespace v8 {
namespace internal {
namespace wasm {
@@ -36,10 +25,6 @@ int ExtractDirectCallIndex(wasm::Decoder& decoder, const byte* pc) {
return static_cast<int>(call_idx);
}
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
namespace {
int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
@@ -88,16 +73,10 @@ CodeSpecialization::CodeSpecialization(Isolate* isolate, Zone* zone) {}
CodeSpecialization::~CodeSpecialization() {}
-void CodeSpecialization::RelocateMemoryReferences(Address old_start,
- uint32_t old_size,
- Address new_start,
- uint32_t new_size) {
- DCHECK(old_mem_start == nullptr && old_mem_size == 0 &&
- new_mem_start == nullptr && new_mem_size == 0);
- old_mem_start = old_start;
- old_mem_size = old_size;
- new_mem_start = new_start;
- new_mem_size = new_size;
+void CodeSpecialization::RelocateWasmContextReferences(Address new_context) {
+ DCHECK_NOT_NULL(new_context);
+ DCHECK_NULL(new_wasm_context_address);
+ new_wasm_context_address = new_context;
}
void CodeSpecialization::RelocateGlobals(Address old_start, Address new_start) {
@@ -146,37 +125,51 @@ bool CodeSpecialization::ApplyToWholeInstance(
changed |= ApplyToWasmCode(wasm_function, icache_flush_mode);
}
- // Patch all exported functions (if we shall relocate direct calls).
+ // Patch all exported functions (JS_TO_WASM_FUNCTION).
+ int reloc_mode = 0;
+ // We need to patch WASM_CONTEXT_REFERENCE to put the correct address.
+ if (new_wasm_context_address) {
+ reloc_mode |= RelocInfo::ModeMask(RelocInfo::WASM_CONTEXT_REFERENCE);
+ }
+ // Patch CODE_TARGET if we shall relocate direct calls. If we patch direct
+ // calls, the instance registered for that (relocate_direct_calls_instance)
+ // should match the instance we currently patch (instance).
if (!relocate_direct_calls_instance.is_null()) {
- // If we patch direct calls, the instance registered for that
- // (relocate_direct_calls_instance) should match the instance we currently
- // patch (instance).
- int wrapper_index = 0;
DCHECK_EQ(instance, *relocate_direct_calls_instance);
- for (auto exp : module->export_table) {
- if (exp.kind != kExternalFunction) continue;
- Code* export_wrapper =
- Code::cast(compiled_module->export_wrappers()->get(wrapper_index));
- DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
- // There must be exactly one call to WASM_FUNCTION or WASM_TO_JS_FUNCTION.
- for (RelocIterator it(export_wrapper,
- RelocInfo::ModeMask(RelocInfo::CODE_TARGET));
- ; it.next()) {
- DCHECK(!it.done());
- // Ignore calls to other builtins like ToNumber.
- if (!IsAtWasmDirectCallTarget(it)) continue;
- Code* new_code = Code::cast(code_table->get(exp.index));
- it.rinfo()->set_target_address(new_code->GetIsolate(),
- new_code->instruction_start(),
- UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
- break;
+ reloc_mode |= RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+ }
+ if (!reloc_mode) return changed;
+ int wrapper_index = 0;
+ for (auto exp : module->export_table) {
+ if (exp.kind != kExternalFunction) continue;
+ Code* export_wrapper =
+ Code::cast(compiled_module->export_wrappers()->get(wrapper_index));
+ DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
+ for (RelocIterator it(export_wrapper, reloc_mode); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ switch (mode) {
+ case RelocInfo::WASM_CONTEXT_REFERENCE:
+ it.rinfo()->set_wasm_context_reference(export_wrapper->GetIsolate(),
+ new_wasm_context_address,
+ icache_flush_mode);
+ break;
+ case RelocInfo::CODE_TARGET: {
+ // Ignore calls to other builtins like ToNumber.
+ if (!IsAtWasmDirectCallTarget(it)) continue;
+ Code* new_code = Code::cast(code_table->get(exp.index));
+ it.rinfo()->set_target_address(
+ new_code->GetIsolate(), new_code->instruction_start(),
+ UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ } break;
+ default:
+ UNREACHABLE();
}
- changed = true;
- ++wrapper_index;
}
- DCHECK_EQ(code_table->length(), func_index);
- DCHECK_EQ(compiled_module->export_wrappers()->length(), wrapper_index);
+ changed = true;
+ ++wrapper_index;
}
+ DCHECK_EQ(code_table->length(), func_index);
+ DCHECK_EQ(compiled_module->export_wrappers()->length(), wrapper_index);
return changed;
}
@@ -185,8 +178,6 @@ bool CodeSpecialization::ApplyToWasmCode(Code* code,
DisallowHeapAllocation no_gc;
DCHECK_EQ(Code::WASM_FUNCTION, code->kind());
- bool reloc_mem_addr = old_mem_start != new_mem_start;
- bool reloc_mem_size = old_mem_size != new_mem_size;
bool reloc_globals = old_globals_start || new_globals_start;
bool patch_table_size = old_function_table_size || new_function_table_size;
bool reloc_direct_calls = !relocate_direct_calls_instance.is_null();
@@ -196,8 +187,6 @@ bool CodeSpecialization::ApplyToWasmCode(Code* code,
auto add_mode = [&reloc_mode](bool cond, RelocInfo::Mode mode) {
if (cond) reloc_mode |= RelocInfo::ModeMask(mode);
};
- add_mode(reloc_mem_addr, RelocInfo::WASM_MEMORY_REFERENCE);
- add_mode(reloc_mem_size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
add_mode(reloc_globals, RelocInfo::WASM_GLOBAL_REFERENCE);
add_mode(patch_table_size, RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
add_mode(reloc_direct_calls, RelocInfo::CODE_TARGET);
@@ -209,19 +198,6 @@ bool CodeSpecialization::ApplyToWasmCode(Code* code,
for (RelocIterator it(code, reloc_mode); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
switch (mode) {
- case RelocInfo::WASM_MEMORY_REFERENCE:
- DCHECK(reloc_mem_addr);
- it.rinfo()->update_wasm_memory_reference(code->GetIsolate(),
- old_mem_start, new_mem_start,
- icache_flush_mode);
- changed = true;
- break;
- case RelocInfo::WASM_MEMORY_SIZE_REFERENCE:
- DCHECK(reloc_mem_size);
- it.rinfo()->update_wasm_memory_size(code->GetIsolate(), old_mem_size,
- new_mem_size, icache_flush_mode);
- changed = true;
- break;
case RelocInfo::WASM_GLOBAL_REFERENCE:
DCHECK(reloc_globals);
it.rinfo()->update_wasm_global_reference(
@@ -281,3 +257,7 @@ bool CodeSpecialization::ApplyToWasmCode(Code* code,
return changed;
}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-code-specialization.h b/deps/v8/src/wasm/wasm-code-specialization.h
index abcc941c5f..4cf422b64f 100644
--- a/deps/v8/src/wasm/wasm-code-specialization.h
+++ b/deps/v8/src/wasm/wasm-code-specialization.h
@@ -28,9 +28,8 @@ class CodeSpecialization {
CodeSpecialization(Isolate*, Zone*);
~CodeSpecialization();
- // Update memory references.
- void RelocateMemoryReferences(Address old_start, uint32_t old_size,
- Address new_start, uint32_t new_size);
+ // Update WasmContext references.
+ void RelocateWasmContextReferences(Address new_context);
// Update references to global variables.
void RelocateGlobals(Address old_start, Address new_start);
// Update function table size.
@@ -49,10 +48,7 @@ class CodeSpecialization {
bool ApplyToWasmCode(Code*, ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
private:
- Address old_mem_start = 0;
- uint32_t old_mem_size = 0;
- Address new_mem_start = 0;
- uint32_t new_mem_size = 0;
+ Address new_wasm_context_address = 0;
Address old_globals_start = 0;
Address new_globals_start = 0;
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 0770940484..79c784a0f7 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -17,20 +17,12 @@
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/zone/accounting-allocator.h"
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
-
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-
-#if __clang__
-#pragma clang diagnostic pop
-#endif
+namespace v8 {
+namespace internal {
+namespace wasm {
namespace {
@@ -98,10 +90,6 @@ MaybeHandle<String> GetLocalName(Isolate* isolate,
return handle(String::cast(func_locals_names->get(local_index)));
}
-// Forward declaration.
-class InterpreterHandle;
-InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info);
-
class InterpreterHandle {
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(InterpreterHandle);
Isolate* isolate_;
@@ -150,20 +138,16 @@ class InterpreterHandle {
static uint32_t GetMemSize(WasmDebugInfo* debug_info) {
DisallowHeapAllocation no_gc;
- WasmCompiledModule* compiled_module =
- debug_info->wasm_instance()->compiled_module();
- return compiled_module->has_embedded_mem_size()
- ? compiled_module->embedded_mem_size()
+ return debug_info->wasm_instance()->has_memory_object()
+ ? debug_info->wasm_instance()->wasm_context()->mem_size
: 0;
}
static byte* GetMemStart(WasmDebugInfo* debug_info) {
DisallowHeapAllocation no_gc;
- WasmCompiledModule* compiled_module =
- debug_info->wasm_instance()->compiled_module();
- return reinterpret_cast<byte*>(compiled_module->has_embedded_mem_start()
- ? compiled_module->embedded_mem_start()
- : 0);
+ return debug_info->wasm_instance()->has_memory_object()
+ ? debug_info->wasm_instance()->wasm_context()->mem_start
+ : nullptr;
}
static byte* GetGlobalsStart(WasmDebugInfo* debug_info) {
@@ -326,7 +310,12 @@ class InterpreterHandle {
WasmInterpreterEntryFrame* frame =
WasmInterpreterEntryFrame::cast(it.frame());
Handle<WasmInstanceObject> instance_obj(frame->wasm_instance(), isolate_);
- DCHECK_EQ(this, GetInterpreterHandle(instance_obj->debug_info()));
+ // Check that this is indeed the instance which is connected to this
+ // interpreter.
+ DCHECK_EQ(this, Managed<wasm::InterpreterHandle>::cast(
+ instance_obj->debug_info()->get(
+ WasmDebugInfo::kInterpreterHandleIndex))
+ ->get());
return instance_obj;
}
@@ -565,29 +554,35 @@ class InterpreterHandle {
}
};
-InterpreterHandle* GetOrCreateInterpreterHandle(
+} // namespace
+
+} // namespace wasm
+
+namespace {
+
+wasm::InterpreterHandle* GetOrCreateInterpreterHandle(
Isolate* isolate, Handle<WasmDebugInfo> debug_info) {
Handle<Object> handle(debug_info->get(WasmDebugInfo::kInterpreterHandleIndex),
isolate);
if (handle->IsUndefined(isolate)) {
- InterpreterHandle* cpp_handle = new InterpreterHandle(isolate, *debug_info);
- handle = Managed<InterpreterHandle>::New(isolate, cpp_handle);
+ handle = Managed<wasm::InterpreterHandle>::Allocate(isolate, isolate,
+ *debug_info);
debug_info->set(WasmDebugInfo::kInterpreterHandleIndex, *handle);
}
- return Handle<Managed<InterpreterHandle>>::cast(handle)->get();
+ return Handle<Managed<wasm::InterpreterHandle>>::cast(handle)->get();
}
-InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info) {
+wasm::InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info) {
Object* handle_obj = debug_info->get(WasmDebugInfo::kInterpreterHandleIndex);
DCHECK(!handle_obj->IsUndefined(debug_info->GetIsolate()));
- return Managed<InterpreterHandle>::cast(handle_obj)->get();
+ return Managed<wasm::InterpreterHandle>::cast(handle_obj)->get();
}
-InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo* debug_info) {
+wasm::InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo* debug_info) {
Object* handle_obj = debug_info->get(WasmDebugInfo::kInterpreterHandleIndex);
if (handle_obj->IsUndefined(debug_info->GetIsolate())) return nullptr;
- return Managed<InterpreterHandle>::cast(handle_obj)->get();
+ return Managed<wasm::InterpreterHandle>::cast(handle_obj)->get();
}
int GetNumFunctions(WasmInstanceObject* instance) {
@@ -657,14 +652,14 @@ Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
return debug_info;
}
-WasmInterpreter* WasmDebugInfo::SetupForTesting(
+wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting(
Handle<WasmInstanceObject> instance_obj) {
Handle<WasmDebugInfo> debug_info = WasmDebugInfo::New(instance_obj);
Isolate* isolate = instance_obj->GetIsolate();
- InterpreterHandle* cpp_handle = new InterpreterHandle(isolate, *debug_info);
- Handle<Object> handle = Managed<InterpreterHandle>::New(isolate, cpp_handle);
- debug_info->set(kInterpreterHandleIndex, *handle);
- return cpp_handle->interpreter();
+ auto interp_handle =
+ Managed<wasm::InterpreterHandle>::Allocate(isolate, isolate, *debug_info);
+ debug_info->set(kInterpreterHandleIndex, *interp_handle);
+ return interp_handle->get()->interpreter();
}
bool WasmDebugInfo::IsWasmDebugInfo(Object* object) {
@@ -691,9 +686,9 @@ WasmInstanceObject* WasmDebugInfo::wasm_instance() {
void WasmDebugInfo::SetBreakpoint(Handle<WasmDebugInfo> debug_info,
int func_index, int offset) {
Isolate* isolate = debug_info->GetIsolate();
- InterpreterHandle* handle = GetOrCreateInterpreterHandle(isolate, debug_info);
+ auto* handle = GetOrCreateInterpreterHandle(isolate, debug_info);
RedirectToInterpreter(debug_info, Vector<int>(&func_index, 1));
- const WasmFunction* func = &handle->module()->functions[func_index];
+ const wasm::WasmFunction* func = &handle->module()->functions[func_index];
handle->interpreter()->SetBreakpoint(func, offset, true);
}
@@ -753,12 +748,12 @@ void WasmDebugInfo::Unwind(Address frame_pointer) {
}
uint64_t WasmDebugInfo::NumInterpretedCalls() {
- auto handle = GetInterpreterHandleOrNull(this);
+ auto* handle = GetInterpreterHandleOrNull(this);
return handle ? handle->NumInterpretedCalls() : 0;
}
void WasmDebugInfo::UpdateMemory(JSArrayBuffer* new_memory) {
- InterpreterHandle* interp_handle = GetInterpreterHandleOrNull(this);
+ auto* interp_handle = GetInterpreterHandleOrNull(this);
if (!interp_handle) return;
interp_handle->UpdateMemory(new_memory);
}
@@ -766,14 +761,14 @@ void WasmDebugInfo::UpdateMemory(JSArrayBuffer* new_memory) {
// static
Handle<JSObject> WasmDebugInfo::GetScopeDetails(
Handle<WasmDebugInfo> debug_info, Address frame_pointer, int frame_index) {
- InterpreterHandle* interp_handle = GetInterpreterHandle(*debug_info);
+ auto* interp_handle = GetInterpreterHandle(*debug_info);
return interp_handle->GetScopeDetails(frame_pointer, frame_index, debug_info);
}
// static
Handle<JSObject> WasmDebugInfo::GetGlobalScopeObject(
Handle<WasmDebugInfo> debug_info, Address frame_pointer, int frame_index) {
- InterpreterHandle* interp_handle = GetInterpreterHandle(*debug_info);
+ auto* interp_handle = GetInterpreterHandle(*debug_info);
auto frame = interp_handle->GetInterpretedFrame(frame_pointer, frame_index);
return interp_handle->GetGlobalScopeObject(frame.get(), debug_info);
}
@@ -781,22 +776,21 @@ Handle<JSObject> WasmDebugInfo::GetGlobalScopeObject(
// static
Handle<JSObject> WasmDebugInfo::GetLocalScopeObject(
Handle<WasmDebugInfo> debug_info, Address frame_pointer, int frame_index) {
- InterpreterHandle* interp_handle = GetInterpreterHandle(*debug_info);
+ auto* interp_handle = GetInterpreterHandle(*debug_info);
auto frame = interp_handle->GetInterpretedFrame(frame_pointer, frame_index);
return interp_handle->GetLocalScopeObject(frame.get(), debug_info);
}
// static
Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
- Handle<WasmDebugInfo> debug_info, FunctionSig* sig) {
+ Handle<WasmDebugInfo> debug_info, wasm::FunctionSig* sig) {
Isolate* isolate = debug_info->GetIsolate();
DCHECK_EQ(debug_info->has_c_wasm_entries(),
debug_info->has_c_wasm_entry_map());
if (!debug_info->has_c_wasm_entries()) {
auto entries = isolate->factory()->NewFixedArray(4, TENURED);
debug_info->set_c_wasm_entries(*entries);
- auto managed_map =
- Managed<wasm::SignatureMap>::New(isolate, new wasm::SignatureMap());
+ auto managed_map = Managed<wasm::SignatureMap>::Allocate(isolate);
debug_info->set_c_wasm_entry_map(*managed_map);
}
Handle<FixedArray> entries(debug_info->c_wasm_entries(), isolate);
@@ -810,7 +804,12 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
debug_info->set_c_wasm_entries(*entries);
}
DCHECK(entries->get(index)->IsUndefined(isolate));
- Handle<Code> new_entry_code = compiler::CompileCWasmEntry(isolate, sig);
+ Address context_address = reinterpret_cast<Address>(
+ debug_info->wasm_instance()->has_memory_object()
+ ? debug_info->wasm_instance()->wasm_context()
+ : nullptr);
+ Handle<Code> new_entry_code =
+ compiler::CompileCWasmEntry(isolate, sig, context_address);
Handle<String> name = isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("c-wasm-entry"));
Handle<SharedFunctionInfo> shared =
@@ -826,3 +825,6 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
}
return handle(JSFunction::cast(entries->get(index)));
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 8c2547bf3e..93a84583b9 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -10,6 +10,7 @@
#include "include/v8config.h"
#include "src/base/bits.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/utils.h"
#include "src/wasm/wasm-external-refs.h"
@@ -223,6 +224,10 @@ void float64_pow_wrapper(double* param0, double* param1) {
WriteDoubleValue(param0, Pow(x, y));
}
+void set_thread_in_wasm_flag() { trap_handler::SetThreadInWasm(); }
+
+void clear_thread_in_wasm_flag() { trap_handler::ClearThreadInWasm(); }
+
static WasmTrapCallbackForTesting wasm_trap_callback_for_testing = nullptr;
void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback) {
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index 04337b99ec..e4e88de0db 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -61,6 +61,9 @@ uint32_t word64_popcnt_wrapper(uint64_t* input);
void float64_pow_wrapper(double* param0, double* param1);
+void set_thread_in_wasm_flag();
+void clear_thread_in_wasm_flag();
+
typedef void (*WasmTrapCallbackForTesting)();
void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback);
diff --git a/deps/v8/src/wasm/wasm-heap.cc b/deps/v8/src/wasm/wasm-heap.cc
new file mode 100644
index 0000000000..b7d13b067f
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-heap.cc
@@ -0,0 +1,101 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-heap.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+DisjointAllocationPool::DisjointAllocationPool(Address start, Address end) {
+ ranges_.push_back({start, end});
+}
+
+void DisjointAllocationPool::Merge(DisjointAllocationPool&& other) {
+ auto dest_it = ranges_.begin();
+ auto dest_end = ranges_.end();
+
+ for (auto src_it = other.ranges_.begin(), src_end = other.ranges_.end();
+ src_it != src_end;) {
+ if (dest_it == dest_end) {
+ // everything else coming from src will be inserted
+ // at the back of ranges_ from now on.
+ ranges_.push_back(*src_it);
+ ++src_it;
+ continue;
+ }
+ // Before or adjacent to dest. Insert or merge, and advance
+ // just src.
+ if (dest_it->first >= src_it->second) {
+ if (dest_it->first == src_it->second) {
+ dest_it->first = src_it->first;
+ } else {
+ ranges_.insert(dest_it, {src_it->first, src_it->second});
+ }
+ ++src_it;
+ continue;
+ }
+ // Src is strictly after dest. Skip over this dest.
+ if (dest_it->second < src_it->first) {
+ ++dest_it;
+ continue;
+ }
+ // Src is adjacent from above. Merge and advance
+ // just src, because the next src, if any, is bound to be
+ // strictly above the newly-formed range.
+ DCHECK_EQ(dest_it->second, src_it->first);
+ dest_it->second = src_it->second;
+ ++src_it;
+ // Now that we merged, maybe this new range is adjacent to
+ // the next. Since we assume src to have come from the
+ // same original memory pool, it follows that the next src
+ // must be above or adjacent to the new bubble.
+ auto next_dest = dest_it;
+ ++next_dest;
+ if (next_dest != dest_end && dest_it->second == next_dest->first) {
+ dest_it->second = next_dest->second;
+ ranges_.erase(next_dest);
+ }
+
+ // src_it points now at the next, if any, src
+ DCHECK_IMPLIES(src_it != src_end, src_it->first >= dest_it->second);
+ }
+}
+
+DisjointAllocationPool DisjointAllocationPool::Extract(size_t size,
+ ExtractionMode mode) {
+ DisjointAllocationPool ret;
+ for (auto it = ranges_.begin(), end = ranges_.end(); it != end;) {
+ auto current = it;
+ ++it;
+ DCHECK_LT(current->first, current->second);
+ size_t current_size = reinterpret_cast<size_t>(current->second) -
+ reinterpret_cast<size_t>(current->first);
+ if (size == current_size) {
+ ret.ranges_.push_back(*current);
+ ranges_.erase(current);
+ return ret;
+ }
+ if (size < current_size) {
+ ret.ranges_.push_back({current->first, current->first + size});
+ current->first += size;
+ DCHECK(current->first < current->second);
+ return ret;
+ }
+ if (mode != kContiguous) {
+ size -= current_size;
+ ret.ranges_.push_back(*current);
+ ranges_.erase(current);
+ }
+ }
+ if (size > 0) {
+ Merge(std::move(ret));
+ return {};
+ }
+ return ret;
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-heap.h b/deps/v8/src/wasm/wasm-heap.h
new file mode 100644
index 0000000000..60cbfb14ba
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-heap.h
@@ -0,0 +1,66 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_HEAP_H_
+#define V8_WASM_HEAP_H_
+
+#include <list>
+
+#include "src/base/macros.h"
+#include "src/vector.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Sorted, disjoint and non-overlapping memory ranges. A range is of the
+// form [start, end). So there's no [start, end), [end, other_end),
+// because that should have been reduced to [start, other_end).
+using AddressRange = std::pair<Address, Address>;
+class V8_EXPORT_PRIVATE DisjointAllocationPool final {
+ public:
+ enum ExtractionMode : bool { kAny = false, kContiguous = true };
+ DisjointAllocationPool() {}
+
+ explicit DisjointAllocationPool(Address, Address);
+
+ DisjointAllocationPool(DisjointAllocationPool&& other) = default;
+ DisjointAllocationPool& operator=(DisjointAllocationPool&& other) = default;
+
+ // Merge the ranges of the parameter into this object. Ordering is
+ // preserved. The assumption is that the passed parameter is
+ // not intersecting this object - for example, it was obtained
+ // from a previous Allocate{Pool}.
+ void Merge(DisjointAllocationPool&&);
+
+ // Allocate a contiguous range of size {size}. Return an empty pool on
+ // failure.
+ DisjointAllocationPool Allocate(size_t size) {
+ return Extract(size, kContiguous);
+ }
+
+ // Allocate a sub-pool of size {size}. Return an empty pool on failure.
+ DisjointAllocationPool AllocatePool(size_t size) {
+ return Extract(size, kAny);
+ }
+
+ bool IsEmpty() const { return ranges_.empty(); }
+ const std::list<AddressRange>& ranges() const { return ranges_; }
+
+ private:
+ // Extract out a total of {size}. By default, the return may
+ // be more than one range. If kContiguous is passed, the return
+ // will be one range. If the operation fails, this object is
+ // unchanged, and the return {IsEmpty()}
+ DisjointAllocationPool Extract(size_t size, ExtractionMode mode);
+
+ std::list<AddressRange> ranges_;
+
+ DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool)
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+#endif
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 8b81d73b4f..4269e18c8f 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -15,10 +15,11 @@
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/memory-tracing.h"
#include "src/wasm/wasm-external-refs.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone-containers.h"
@@ -624,24 +625,11 @@ inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
Isolate* isolate = instance->GetIsolate();
int32_t ret = WasmInstanceObject::GrowMemory(isolate, instance, delta_pages);
-#ifdef DEBUG
// Ensure the effects of GrowMemory have been observed by the interpreter.
// See {UpdateMemory}. In all cases, we are in agreement with the runtime
// object's view.
- uint32_t cached_size = mem_info->mem_size;
- byte* cached_start = mem_info->mem_start;
- uint32_t instance_size =
- instance->compiled_module()->has_embedded_mem_size()
- ? instance->compiled_module()->embedded_mem_size()
- : 0;
- byte* instance_start =
- instance->compiled_module()->has_embedded_mem_start()
- ? reinterpret_cast<byte*>(
- instance->compiled_module()->embedded_mem_start())
- : nullptr;
- CHECK_EQ(cached_size, instance_size);
- CHECK_EQ(cached_start, instance_start);
-#endif
+ DCHECK_EQ(mem_info->mem_size, instance->wasm_context()->mem_size);
+ DCHECK_EQ(mem_info->mem_start, instance->wasm_context()->mem_start);
return ret;
}
@@ -667,23 +655,24 @@ const char* OpcodeName(uint32_t val) {
Handle<HeapObject> UnwrapWasmToJSWrapper(Isolate* isolate,
Handle<Code> js_wrapper) {
DCHECK_EQ(Code::WASM_TO_JS_FUNCTION, js_wrapper->kind());
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*js_wrapper, mask); !it.done(); it.next()) {
- HeapObject* obj = it.rinfo()->target_object();
- if (!obj->IsCallable()) continue;
-#ifdef DEBUG
- // There should only be this one reference to a callable object.
- for (it.next(); !it.done(); it.next()) {
- HeapObject* other = it.rinfo()->target_object();
- DCHECK(!other->IsCallable());
- }
-#endif
- return handle(obj, isolate);
+ Handle<FixedArray> deopt_data(js_wrapper->deoptimization_data(), isolate);
+ DCHECK_EQ(2, deopt_data->length());
+ intptr_t js_imports_table_loc = static_cast<intptr_t>(
+ HeapNumber::cast(deopt_data->get(0))->value_as_bits());
+ Handle<FixedArray> js_imports_table(
+ reinterpret_cast<FixedArray**>(js_imports_table_loc));
+ int index = 0;
+ CHECK(deopt_data->get(1)->ToInt32(&index));
+ DCHECK_GT(js_imports_table->length(), index);
+ Handle<Object> obj(js_imports_table->get(index), isolate);
+ if (obj->IsCallable()) {
+ return Handle<HeapObject>::cast(obj);
+ } else {
+ // If we did not find a callable object, this is an illegal JS import and
+ // obj must be undefined.
+ DCHECK(obj->IsUndefined(isolate));
+ return Handle<HeapObject>::null();
}
- // If we did not find a callable object, then there must be a reference to
- // the WasmThrowTypeError runtime function.
- // TODO(clemensh): Check that this is the case.
- return Handle<HeapObject>::null();
}
class SideTable;
@@ -1454,7 +1443,8 @@ class ThreadImpl {
}
template <typename ctype, typename mtype>
- bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len) {
+ bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
+ MachineRepresentation rep) {
MemoryAccessOperand<false> operand(decoder, code->at(pc), sizeof(ctype));
uint32_t index = Pop().to<uint32_t>();
if (!BoundsCheck<mtype>(cached_instance_info_->mem_size, operand.offset,
@@ -1467,12 +1457,20 @@ class ThreadImpl {
Push(result);
len = 1 + operand.length;
+
+ if (FLAG_wasm_trace_memory) {
+ tracing::TraceMemoryOperation(
+ tracing::kWasmInterpreted, false, rep, operand.offset + index,
+ code->function->func_index, static_cast<int>(pc),
+ cached_instance_info_->mem_start);
+ }
+
return true;
}
template <typename ctype, typename mtype>
- bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc,
- int& len) {
+ bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
+ MachineRepresentation rep) {
MemoryAccessOperand<false> operand(decoder, code->at(pc), sizeof(ctype));
WasmValue val = Pop();
@@ -1491,6 +1489,14 @@ class ThreadImpl {
} else if (std::is_same<double, ctype>::value) {
possible_nondeterminism_ |= std::isnan(val.to<double>());
}
+
+ if (FLAG_wasm_trace_memory) {
+ tracing::TraceMemoryOperation(
+ tracing::kWasmInterpreted, true, rep, operand.offset + index,
+ code->function->func_index, static_cast<int>(pc),
+ cached_instance_info_->mem_start);
+ }
+
return true;
}
@@ -1812,43 +1818,47 @@ class ThreadImpl {
break;
}
-#define LOAD_CASE(name, ctype, mtype) \
- case kExpr##name: { \
- if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, len)) return; \
- break; \
- }
-
- LOAD_CASE(I32LoadMem8S, int32_t, int8_t);
- LOAD_CASE(I32LoadMem8U, int32_t, uint8_t);
- LOAD_CASE(I32LoadMem16S, int32_t, int16_t);
- LOAD_CASE(I32LoadMem16U, int32_t, uint16_t);
- LOAD_CASE(I64LoadMem8S, int64_t, int8_t);
- LOAD_CASE(I64LoadMem8U, int64_t, uint8_t);
- LOAD_CASE(I64LoadMem16S, int64_t, int16_t);
- LOAD_CASE(I64LoadMem16U, int64_t, uint16_t);
- LOAD_CASE(I64LoadMem32S, int64_t, int32_t);
- LOAD_CASE(I64LoadMem32U, int64_t, uint32_t);
- LOAD_CASE(I32LoadMem, int32_t, int32_t);
- LOAD_CASE(I64LoadMem, int64_t, int64_t);
- LOAD_CASE(F32LoadMem, float, float);
- LOAD_CASE(F64LoadMem, double, double);
+#define LOAD_CASE(name, ctype, mtype, rep) \
+ case kExpr##name: { \
+ if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, len, \
+ MachineRepresentation::rep)) \
+ return; \
+ break; \
+ }
+
+ LOAD_CASE(I32LoadMem8S, int32_t, int8_t, kWord8);
+ LOAD_CASE(I32LoadMem8U, int32_t, uint8_t, kWord8);
+ LOAD_CASE(I32LoadMem16S, int32_t, int16_t, kWord16);
+ LOAD_CASE(I32LoadMem16U, int32_t, uint16_t, kWord16);
+ LOAD_CASE(I64LoadMem8S, int64_t, int8_t, kWord8);
+ LOAD_CASE(I64LoadMem8U, int64_t, uint8_t, kWord16);
+ LOAD_CASE(I64LoadMem16S, int64_t, int16_t, kWord16);
+ LOAD_CASE(I64LoadMem16U, int64_t, uint16_t, kWord16);
+ LOAD_CASE(I64LoadMem32S, int64_t, int32_t, kWord32);
+ LOAD_CASE(I64LoadMem32U, int64_t, uint32_t, kWord32);
+ LOAD_CASE(I32LoadMem, int32_t, int32_t, kWord32);
+ LOAD_CASE(I64LoadMem, int64_t, int64_t, kWord64);
+ LOAD_CASE(F32LoadMem, float, float, kFloat32);
+ LOAD_CASE(F64LoadMem, double, double, kFloat64);
#undef LOAD_CASE
-#define STORE_CASE(name, ctype, mtype) \
- case kExpr##name: { \
- if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, len)) return; \
- break; \
- }
-
- STORE_CASE(I32StoreMem8, int32_t, int8_t);
- STORE_CASE(I32StoreMem16, int32_t, int16_t);
- STORE_CASE(I64StoreMem8, int64_t, int8_t);
- STORE_CASE(I64StoreMem16, int64_t, int16_t);
- STORE_CASE(I64StoreMem32, int64_t, int32_t);
- STORE_CASE(I32StoreMem, int32_t, int32_t);
- STORE_CASE(I64StoreMem, int64_t, int64_t);
- STORE_CASE(F32StoreMem, float, float);
- STORE_CASE(F64StoreMem, double, double);
+#define STORE_CASE(name, ctype, mtype, rep) \
+ case kExpr##name: { \
+ if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, len, \
+ MachineRepresentation::rep)) \
+ return; \
+ break; \
+ }
+
+ STORE_CASE(I32StoreMem8, int32_t, int8_t, kWord8);
+ STORE_CASE(I32StoreMem16, int32_t, int16_t, kWord16);
+ STORE_CASE(I64StoreMem8, int64_t, int8_t, kWord8);
+ STORE_CASE(I64StoreMem16, int64_t, int16_t, kWord16);
+ STORE_CASE(I64StoreMem32, int64_t, int32_t, kWord32);
+ STORE_CASE(I32StoreMem, int32_t, int32_t, kWord32);
+ STORE_CASE(I64StoreMem, int64_t, int64_t, kWord64);
+ STORE_CASE(F32StoreMem, float, float, kFloat32);
+ STORE_CASE(F64StoreMem, double, double, kFloat64);
#undef STORE_CASE
#define ASMJS_LOAD_CASE(name, ctype, mtype, defval) \
@@ -2004,6 +2014,7 @@ class ThreadImpl {
return;
PAUSE_IF_BREAK_FLAG(AfterReturn);
}
+#undef PAUSE_IF_BREAK_FLAG
}
state_ = WasmInterpreter::PAUSED;
@@ -2701,6 +2712,11 @@ WasmInterpreter::HeapObjectsScope::~HeapObjectsScope() {
}
#undef TRACE
+#undef FOREACH_INTERNAL_OPCODE
+#undef WASM_CTYPES
+#undef FOREACH_SIMPLE_BINOP
+#undef FOREACH_OTHER_BINOP
+#undef FOREACH_OTHER_UNOP
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index ce2e3f1341..6a017365aa 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -14,12 +14,14 @@
#include "src/objects.h"
#include "src/parsing/parse-info.h"
+#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-api.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-limits.h"
+#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
using v8::internal::wasm::ErrorThrower;
@@ -48,11 +50,6 @@ Local<String> v8_str(Isolate* isolate, const char* str) {
i::MaybeHandle<i::WasmModuleObject> GetFirstArgumentAsModule(
const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
- if (args.Length() < 1) {
- thrower->TypeError("Argument 0 must be a WebAssembly.Module");
- return {};
- }
-
i::Handle<i::Object> arg0 = Utils::OpenHandle(*args[0]);
if (!arg0->IsWasmModuleObject()) {
thrower->TypeError("Argument 0 must be a WebAssembly.Module");
@@ -66,11 +63,6 @@ i::MaybeHandle<i::WasmModuleObject> GetFirstArgumentAsModule(
i::wasm::ModuleWireBytes GetFirstArgumentAsBytes(
const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
- if (args.Length() < 1) {
- thrower->TypeError("Argument 0 must be a buffer source");
- return i::wasm::ModuleWireBytes(nullptr, nullptr);
- }
-
const uint8_t* start = nullptr;
size_t length = 0;
v8::Local<v8::Value> source = args[0];
@@ -122,6 +114,22 @@ void WebAssemblyCompileStreaming(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+
+ if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
+ // Manually create a promise and reject it.
+ Local<Context> context = isolate->GetCurrentContext();
+ ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(resolver->GetPromise());
+ i::wasm::ScheduledErrorThrower thrower(i_isolate,
+ "WebAssembly.compileStreaming()");
+ thrower.CompileError("Wasm code generation disallowed by embedder");
+ auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ CHECK_IMPLIES(!maybe.FromMaybe(false),
+ i_isolate->has_scheduled_exception());
+ return;
+ }
+
MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
DCHECK_NOT_NULL(i_isolate->wasm_compile_streaming_callback());
i_isolate->wasm_compile_streaming_callback()(args);
@@ -136,6 +144,10 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(isolate);
i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compile()");
+ if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
+ thrower.CompileError("Wasm code generation disallowed by embedder");
+ }
+
Local<Context> context = isolate->GetCurrentContext();
ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
@@ -180,6 +192,11 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(isolate);
i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
+ if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
+ thrower.CompileError("Wasm code generation disallowed by embedder");
+ return;
+ }
+
auto bytes = GetFirstArgumentAsBytes(args, &thrower);
if (thrower.error()) {
@@ -393,16 +410,6 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Promise> module_promise = resolver->GetPromise();
args.GetReturnValue().Set(module_promise);
- if (args.Length() < 1) {
- thrower.TypeError(
- "Argument 0 must be provided and must be either a buffer source or a "
- "WebAssembly.Module object");
- auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
- CHECK_IMPLIES(!maybe.FromMaybe(false),
- i_isolate->has_scheduled_exception());
- return;
- }
-
Local<Value> first_arg_value = args[0];
i::Handle<i::Object> first_arg = Utils::OpenHandle(*first_arg_value);
if (!first_arg->IsJSObject()) {
@@ -470,12 +477,12 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
- if (args.Length() < 1 || !args[0]->IsObject()) {
+ if (!args[0]->IsObject()) {
thrower.TypeError("Argument 0 must be a table descriptor");
return;
}
Local<Context> context = isolate->GetCurrentContext();
- Local<v8::Object> descriptor = args[0]->ToObject(context).ToLocalChecked();
+ Local<v8::Object> descriptor = Local<Object>::Cast(args[0]);
// The descriptor's 'element'.
{
v8::MaybeLocal<v8::Value> maybe =
@@ -523,12 +530,12 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory()");
- if (args.Length() < 1 || !args[0]->IsObject()) {
+ if (!args[0]->IsObject()) {
thrower.TypeError("Argument 0 must be a memory descriptor");
return;
}
Local<Context> context = isolate->GetCurrentContext();
- Local<v8::Object> descriptor = args[0]->ToObject(context).ToLocalChecked();
+ Local<v8::Object> descriptor = Local<Object>::Cast(args[0]);
// The descriptor's 'initial'.
int64_t initial = 0;
if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
@@ -571,7 +578,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
size_t size = static_cast<size_t>(i::wasm::WasmModule::kPageSize) *
static_cast<size_t>(initial);
i::Handle<i::JSArrayBuffer> buffer = i::wasm::NewArrayBuffer(
- i_isolate, size, i::FLAG_wasm_guard_pages,
+ i_isolate, size, internal::trap_handler::UseTrapHandler(),
is_shared_memory ? i::SharedFlag::kShared : i::SharedFlag::kNotShared);
if (buffer.is_null()) {
thrower.RangeError("could not allocate memory");
@@ -590,17 +597,16 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(Utils::ToLocal(memory_obj));
}
-#define NAME_OF_WasmMemoryObject "WebAssembly.Memory"
-#define NAME_OF_WasmModuleObject "WebAssembly.Module"
-#define NAME_OF_WasmInstanceObject "WebAssembly.Instance"
-#define NAME_OF_WasmTableObject "WebAssembly.Table"
+constexpr const char* kName_WasmMemoryObject = "WebAssembly.Memory";
+constexpr const char* kName_WasmInstanceObject = "WebAssembly.Instance";
+constexpr const char* kName_WasmTableObject = "WebAssembly.Table";
#define EXTRACT_THIS(var, WasmType) \
i::Handle<i::WasmType> var; \
{ \
i::Handle<i::Object> this_arg = Utils::OpenHandle(*args.This()); \
if (!this_arg->Is##WasmType()) { \
- thrower.TypeError("Receiver is not a " NAME_OF_##WasmType); \
+ thrower.TypeError("Receiver is not a %s", kName_##WasmType); \
return; \
} \
var = i::Handle<i::WasmType>::cast(this_arg); \
@@ -639,27 +645,24 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
- int64_t new_size64 = 0;
- if (args.Length() > 0 && !args[0]->IntegerValue(context).To(&new_size64)) {
- return;
- }
+ int64_t grow_by = 0;
+ if (!args[0]->IntegerValue(context).To(&grow_by)) return;
i::Handle<i::FixedArray> old_array(receiver->functions(), i_isolate);
int old_size = old_array->length();
- new_size64 += old_size;
int64_t max_size64 = receiver->maximum_length()->Number();
if (max_size64 < 0 || max_size64 > i::FLAG_wasm_max_table_size) {
max_size64 = i::FLAG_wasm_max_table_size;
}
- if (new_size64 < old_size || new_size64 > max_size64) {
- thrower.RangeError(new_size64 < old_size ? "trying to shrink table"
- : "maximum table size exceeded");
+ if (grow_by < 0 || grow_by > max_size64 - old_size) {
+ thrower.RangeError(grow_by < 0 ? "trying to shrink table"
+ : "maximum table size exceeded");
return;
}
- int new_size = static_cast<int>(new_size64);
- receiver->grow(i_isolate, static_cast<uint32_t>(new_size - old_size));
+ int new_size = static_cast<int>(old_size + grow_by);
+ receiver->Grow(i_isolate, static_cast<uint32_t>(new_size - old_size));
if (new_size != old_size) {
i::Handle<i::FixedArray> new_array =
@@ -685,7 +688,7 @@ void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
EXTRACT_THIS(receiver, WasmTableObject);
i::Handle<i::FixedArray> array(receiver->functions(), i_isolate);
int64_t i = 0;
- if (args.Length() > 0 && !args[0]->IntegerValue(context).To(&i)) return;
+ if (!args[0]->IntegerValue(context).To(&i)) return;
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
if (i < 0 || i >= array->length()) {
thrower.RangeError("index out of bounds");
@@ -705,11 +708,6 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
- if (args.Length() < 2) {
- thrower.TypeError("Argument 1 must be null or a function");
- return;
- }
-
// Parameter 0.
int64_t index;
if (!args[0]->IntegerValue(context).To(&index)) return;
@@ -724,10 +722,15 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- i::wasm::TableSet(&thrower, i_isolate, receiver, index,
- value->IsNull(i_isolate)
- ? i::Handle<i::JSFunction>::null()
- : i::Handle<i::JSFunction>::cast(value));
+ if (index < 0 || index >= receiver->functions()->length()) {
+ thrower.RangeError("index out of bounds");
+ return;
+ }
+
+ i::WasmTableObject::Set(i_isolate, receiver, static_cast<int32_t>(index),
+ value->IsNull(i_isolate)
+ ? i::Handle<i::JSFunction>::null()
+ : i::Handle<i::JSFunction>::cast(value));
}
// WebAssembly.Memory.grow(num) -> num
@@ -741,10 +744,8 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
EXTRACT_THIS(receiver, WasmMemoryObject);
int64_t delta_size = 0;
- if (args.Length() < 1 || !args[0]->IntegerValue(context).To(&delta_size)) {
- thrower.TypeError("Argument 0 required, must be numeric value of pages");
- return;
- }
+ if (!args[0]->IntegerValue(context).To(&delta_size)) return;
+
int64_t max_size64 = receiver->maximum_pages();
if (max_size64 < 0 ||
max_size64 > static_cast<int64_t>(i::FLAG_wasm_max_mem_pages)) {
@@ -769,9 +770,17 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
thrower.RangeError("Unable to grow instance memory.");
return;
}
- bool free_memory = (delta_size != 0);
if (!old_buffer->is_shared()) {
- i::wasm::DetachWebAssemblyMemoryBuffer(i_isolate, old_buffer, free_memory);
+ // When delta_size == 0, or guard pages are enabled, the same backing store
+ // is used. To be spec compliant, the buffer associated with the memory
+ // object needs to be detached. Setup a new buffer with the same backing
+ // store, detach the old buffer, and do not free backing store memory.
+ bool free_memory = delta_size != 0 && !old_buffer->has_guard_region();
+ if ((!free_memory && old_size != 0) || new_size64 == 0) {
+ i::WasmMemoryObject::SetupNewBufferWithSameBackingStore(
+ i_isolate, receiver, static_cast<uint32_t>(new_size64));
+ }
+ i::wasm::DetachMemoryBuffer(i_isolate, old_buffer, free_memory);
}
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(ret);
@@ -847,7 +856,7 @@ void InstallGetter(Isolate* isolate, Handle<JSObject> object,
Local<Function>(), attributes);
}
-void WasmJs::Install(Isolate* isolate) {
+void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<JSGlobalObject> global = isolate->global_object();
Handle<Context> context(global->native_context(), isolate);
// Install the JS API once only.
@@ -867,11 +876,11 @@ void WasmJs::Install(Isolate* isolate) {
cons->shared()->set_instance_class_name(*name);
Handle<JSObject> webassembly = factory->NewJSObject(cons, TENURED);
PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
- JSObject::AddProperty(global, name, webassembly, attributes);
+
PropertyAttributes ro_attributes =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
- JSObject::AddProperty(webassembly, factory->to_string_tag_symbol(),
- v8_str(isolate, "WebAssembly"), ro_attributes);
+ JSObject::AddProperty(webassembly, factory->to_string_tag_symbol(), name,
+ ro_attributes);
InstallFunc(isolate, webassembly, "compile", WebAssemblyCompile, 1);
InstallFunc(isolate, webassembly, "validate", WebAssemblyValidate, 1);
InstallFunc(isolate, webassembly, "instantiate", WebAssemblyInstantiate, 1);
@@ -883,6 +892,11 @@ void WasmJs::Install(Isolate* isolate) {
WebAssemblyInstantiateStreaming, 1);
}
+ // Expose the API on the global object if configured to do so.
+ if (exposed_on_global_object) {
+ JSObject::AddProperty(global, name, webassembly, attributes);
+ }
+
// Setup Module
Handle<JSFunction> module_constructor =
InstallFunc(isolate, webassembly, "Module", WebAssemblyModule, 1);
@@ -965,5 +979,9 @@ void WasmJs::Install(Isolate* isolate) {
JSObject::AddProperty(webassembly, isolate->factory()->RuntimeError_string(),
runtime_error, attributes);
}
+
+#undef ASSIGN
+#undef EXTRACT_THIS
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
index 0ef2219b1f..926bd7647a 100644
--- a/deps/v8/src/wasm/wasm-js.h
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -14,7 +14,8 @@ namespace internal {
// Exposes a WebAssembly API to JavaScript through the V8 API.
class WasmJs {
public:
- V8_EXPORT_PRIVATE static void Install(Isolate* isolate);
+ V8_EXPORT_PRIVATE static void Install(Isolate* isolate,
+ bool exposed_on_global_object);
// WebAssembly.Table.
static bool IsWasmTableObject(Isolate* isolate, Handle<Object> value);
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
new file mode 100644
index 0000000000..4ddda98189
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -0,0 +1,134 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-memory.h"
+#include "src/objects-inl.h"
+#include "src/wasm/wasm-limits.h"
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+void* TryAllocateBackingStore(Isolate* isolate, size_t size,
+ bool enable_guard_regions, void*& allocation_base,
+ size_t& allocation_length) {
+ // TODO(eholk): Right now enable_guard_regions has no effect on 32-bit
+ // systems. It may be safer to fail instead, given that other code might do
+ // things that would be unsafe if they expected guard pages where there
+ // weren't any.
+ if (enable_guard_regions) {
+ // TODO(eholk): On Windows we want to make sure we don't commit the guard
+ // pages yet.
+
+ // We always allocate the largest possible offset into the heap, so the
+ // addressable memory after the guard page can be made inaccessible.
+ allocation_length = RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize());
+ DCHECK_EQ(0, size % base::OS::CommitPageSize());
+
+ // AllocateGuarded makes the whole region inaccessible by default.
+ allocation_base =
+ isolate->array_buffer_allocator()->Reserve(allocation_length);
+ if (allocation_base == nullptr) {
+ return nullptr;
+ }
+
+ void* memory = allocation_base;
+
+ // Make the part we care about accessible.
+ isolate->array_buffer_allocator()->SetProtection(
+ memory, size, v8::ArrayBuffer::Allocator::Protection::kReadWrite);
+
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(size);
+
+ return memory;
+ } else {
+ void* memory =
+ size == 0 ? nullptr : isolate->array_buffer_allocator()->Allocate(size);
+ allocation_base = memory;
+ allocation_length = size;
+ return memory;
+ }
+}
+
+Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* allocation_base,
+ size_t allocation_length,
+ void* backing_store, size_t size,
+ bool is_external,
+ bool enable_guard_regions,
+ SharedFlag shared) {
+ Handle<JSArrayBuffer> buffer =
+ isolate->factory()->NewJSArrayBuffer(shared, TENURED);
+ DCHECK_GE(kMaxInt, size);
+ if (shared == SharedFlag::kShared) DCHECK(FLAG_experimental_wasm_threads);
+ JSArrayBuffer::Setup(buffer, isolate, is_external, allocation_base,
+ allocation_length, backing_store, static_cast<int>(size),
+ shared);
+ buffer->set_is_neuterable(false);
+ buffer->set_is_growable(true);
+ buffer->set_has_guard_region(enable_guard_regions);
+ return buffer;
+}
+
+Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
+ bool enable_guard_regions,
+ SharedFlag shared) {
+ // Check against kMaxInt, since the byte length is stored as int in the
+ // JSArrayBuffer. Note that wasm_max_mem_pages can be raised from the command
+ // line, and we don't want to fail a CHECK then.
+ if (size > FLAG_wasm_max_mem_pages * WasmModule::kPageSize ||
+ size > kMaxInt) {
+ // TODO(titzer): lift restriction on maximum memory allocated here.
+ return Handle<JSArrayBuffer>::null();
+ }
+
+ void* allocation_base = nullptr; // Set by TryAllocateBackingStore
+ size_t allocation_length = 0; // Set by TryAllocateBackingStore
+ // Do not reserve memory till non zero memory is encountered.
+ void* memory =
+ (size == 0) ? nullptr
+ : TryAllocateBackingStore(isolate, size, enable_guard_regions,
+ allocation_base, allocation_length);
+
+ if (size > 0 && memory == nullptr) {
+ return Handle<JSArrayBuffer>::null();
+ }
+
+#if DEBUG
+ // Double check the API allocator actually zero-initialized the memory.
+ const byte* bytes = reinterpret_cast<const byte*>(memory);
+ for (size_t i = 0; i < size; ++i) {
+ DCHECK_EQ(0, bytes[i]);
+ }
+#endif
+
+ constexpr bool is_external = false;
+ return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory,
+ size, is_external, enable_guard_regions, shared);
+}
+
+void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
+ bool free_memory) {
+ const bool is_external = buffer->is_external();
+ DCHECK(!buffer->is_neuterable());
+ if (!is_external) {
+ buffer->set_is_external(true);
+ isolate->heap()->UnregisterArrayBuffer(*buffer);
+ if (free_memory) {
+ // We need to free the memory before neutering the buffer because
+ // FreeBackingStore reads buffer->allocation_base(), which is nulled out
+ // by Neuter. This means there is a dangling pointer until we neuter the
+ // buffer. Since there is no way for the user to directly call
+ // FreeBackingStore, we can ensure this is safe.
+ buffer->FreeBackingStore();
+ }
+ }
+ buffer->set_is_neuterable(true);
+ buffer->Neuter();
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-memory.h b/deps/v8/src/wasm/wasm-memory.h
new file mode 100644
index 0000000000..1054795f70
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-memory.h
@@ -0,0 +1,32 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MEMORY_H_
+#define V8_WASM_MEMORY_H_
+
+#include "src/flags.h"
+#include "src/handles.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+Handle<JSArrayBuffer> NewArrayBuffer(
+ Isolate*, size_t size, bool enable_guard_regions,
+ SharedFlag shared = SharedFlag::kNotShared);
+
+Handle<JSArrayBuffer> SetupArrayBuffer(
+ Isolate*, void* allocation_base, size_t allocation_length,
+ void* backing_store, size_t size, bool is_external,
+ bool enable_guard_regions, SharedFlag shared = SharedFlag::kNotShared);
+
+void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
+ bool free_memory);
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_MODULE_H_
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 0c2976757f..997496bb29 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -226,7 +226,8 @@ WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
start_function_index_(-1),
min_memory_size_(16),
max_memory_size_(0),
- has_max_memory_size_(false) {}
+ has_max_memory_size_(false),
+ has_shared_memory_(false) {}
WasmFunctionBuilder* WasmModuleBuilder::AddFunction(FunctionSig* sig) {
functions_.push_back(new (zone_) WasmFunctionBuilder(this));
@@ -325,6 +326,8 @@ void WasmModuleBuilder::SetMaxMemorySize(uint32_t value) {
max_memory_size_ = value;
}
+void WasmModuleBuilder::SetHasSharedMemory() { has_shared_memory_ = true; }
+
void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == Emit magic =============================================================
buffer.write_u32(kWasmMagic);
@@ -396,8 +399,13 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
{
size_t start = EmitSection(kMemorySectionCode, buffer);
buffer.write_u8(1); // memory count
- buffer.write_u8(has_max_memory_size_ ? kResizableMaximumFlag
- : kNoMaximumFlag);
+ if (has_shared_memory_) {
+ buffer.write_u8(has_max_memory_size_ ? MemoryFlags::kSharedAndMaximum
+ : MemoryFlags::kSharedNoMaximum);
+ } else {
+ buffer.write_u8(has_max_memory_size_ ? MemoryFlags::kMaximum
+ : MemoryFlags::kNoMaximum);
+ }
buffer.write_u32v(min_memory_size_);
if (has_max_memory_size_) {
buffer.write_u32v(max_memory_size_);
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 2e00318043..898f996cd3 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -10,7 +10,6 @@
#include "src/wasm/leb-helper.h"
#include "src/wasm/local-decl-encoder.h"
-#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
@@ -236,6 +235,7 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
void AddExport(Vector<const char> name, WasmFunctionBuilder* builder);
void SetMinMemorySize(uint32_t value);
void SetMaxMemorySize(uint32_t value);
+ void SetHasSharedMemory();
// Writing methods.
void WriteTo(ZoneBuffer& buffer) const;
@@ -295,6 +295,7 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
uint32_t min_memory_size_;
uint32_t max_memory_size_;
bool has_max_memory_size_;
+ bool has_shared_memory_;
};
inline FunctionSig* WasmFunctionBuilder::signature() {
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 4adc9ef375..2c8266592a 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -5,6 +5,8 @@
#include <functional>
#include <memory>
+#include "src/api.h"
+#include "src/assembler-inl.h"
#include "src/code-stubs.h"
#include "src/debug/interface-types.h"
#include "src/frames-inl.h"
@@ -14,30 +16,19 @@
#include "src/snapshot/snapshot.h"
#include "src/v8.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/wasm/compilation-manager.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-specialization.h"
#include "src/wasm/wasm-js.h"
-#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
-
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-namespace base = v8::base;
-
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic pop
-#endif
+namespace v8 {
+namespace internal {
+namespace wasm {
#define TRACE(...) \
do { \
@@ -54,299 +45,61 @@ namespace base = v8::base;
if (FLAG_trace_wasm_compiler) PrintF(__VA_ARGS__); \
} while (false)
-namespace {
-
-
-void* TryAllocateBackingStore(Isolate* isolate, size_t size,
- bool enable_guard_regions, void*& allocation_base,
- size_t& allocation_length) {
- // TODO(eholk): Right now enable_guard_regions has no effect on 32-bit
- // systems. It may be safer to fail instead, given that other code might do
- // things that would be unsafe if they expected guard pages where there
- // weren't any.
- if (enable_guard_regions && kGuardRegionsSupported) {
- // TODO(eholk): On Windows we want to make sure we don't commit the guard
- // pages yet.
-
- // We always allocate the largest possible offset into the heap, so the
- // addressable memory after the guard page can be made inaccessible.
- allocation_length = RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize());
- DCHECK_EQ(0, size % base::OS::CommitPageSize());
-
- // AllocateGuarded makes the whole region inaccessible by default.
- allocation_base =
- isolate->array_buffer_allocator()->Reserve(allocation_length);
- if (allocation_base == nullptr) {
- return nullptr;
- }
-
- void* memory = allocation_base;
-
- // Make the part we care about accessible.
- isolate->array_buffer_allocator()->SetProtection(
- memory, size, v8::ArrayBuffer::Allocator::Protection::kReadWrite);
-
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(size);
-
- return memory;
- } else {
- void* memory =
- size == 0 ? nullptr : isolate->array_buffer_allocator()->Allocate(size);
- allocation_base = memory;
- allocation_length = size;
- return memory;
- }
-}
-
-static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
- DisallowHeapAllocation no_gc;
- JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
- WasmInstanceObject* owner = reinterpret_cast<WasmInstanceObject*>(*p);
- Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
- // If a link to shared memory instances exists, update the list of memory
- // instances before the instance is destroyed.
- WasmCompiledModule* compiled_module = owner->compiled_module();
- TRACE("Finalizing %d {\n", compiled_module->instance_id());
- DCHECK(compiled_module->has_weak_wasm_module());
- WeakCell* weak_wasm_module = compiled_module->ptr_to_weak_wasm_module();
-
- if (trap_handler::UseTrapHandler()) {
- Handle<FixedArray> code_table = compiled_module->code_table();
- for (int i = 0; i < code_table->length(); ++i) {
- Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
- int index = code->trap_handler_index()->value();
- if (index >= 0) {
- trap_handler::ReleaseHandlerData(index);
- code->set_trap_handler_index(Smi::FromInt(-1));
- }
- }
- }
-
- // Since the order of finalizers is not guaranteed, it can be the case
- // that {instance->compiled_module()->module()}, which is a
- // {Managed<WasmModule>} has been collected earlier in this GC cycle.
- // Weak references to this instance won't be cleared until
- // the next GC cycle, so we need to manually break some links (such as
- // the weak references from {WasmMemoryObject::instances}.
- if (owner->has_memory_object()) {
- Handle<WasmMemoryObject> memory(owner->memory_object(), isolate);
- Handle<WasmInstanceObject> instance(owner, isolate);
- WasmMemoryObject::RemoveInstance(isolate, memory, instance);
- }
-
- // weak_wasm_module may have been cleared, meaning the module object
- // was GC-ed. In that case, there won't be any new instances created,
- // and we don't need to maintain the links between instances.
- if (!weak_wasm_module->cleared()) {
- WasmModuleObject* wasm_module =
- WasmModuleObject::cast(weak_wasm_module->value());
- WasmCompiledModule* current_template = wasm_module->compiled_module();
-
- TRACE("chain before {\n");
- TRACE_CHAIN(current_template);
- TRACE("}\n");
-
- DCHECK(!current_template->has_weak_prev_instance());
- WeakCell* next = compiled_module->maybe_ptr_to_weak_next_instance();
- WeakCell* prev = compiled_module->maybe_ptr_to_weak_prev_instance();
-
- if (current_template == compiled_module) {
- if (next == nullptr) {
- WasmCompiledModule::Reset(isolate, compiled_module);
- } else {
- WasmCompiledModule* next_compiled_module =
- WasmCompiledModule::cast(next->value());
- WasmModuleObject::cast(wasm_module)
- ->set_compiled_module(next_compiled_module);
- DCHECK_NULL(prev);
- next_compiled_module->reset_weak_prev_instance();
- }
- } else {
- DCHECK(!(prev == nullptr && next == nullptr));
- // the only reason prev or next would be cleared is if the
- // respective objects got collected, but if that happened,
- // we would have relinked the list.
- if (prev != nullptr) {
- DCHECK(!prev->cleared());
- if (next == nullptr) {
- WasmCompiledModule::cast(prev->value())->reset_weak_next_instance();
- } else {
- WasmCompiledModule::cast(prev->value())
- ->set_ptr_to_weak_next_instance(next);
- }
- }
- if (next != nullptr) {
- DCHECK(!next->cleared());
- if (prev == nullptr) {
- WasmCompiledModule::cast(next->value())->reset_weak_prev_instance();
- } else {
- WasmCompiledModule::cast(next->value())
- ->set_ptr_to_weak_prev_instance(prev);
- }
- }
- }
- TRACE("chain after {\n");
- TRACE_CHAIN(wasm_module->compiled_module());
- TRACE("}\n");
- }
- compiled_module->reset_weak_owning_instance();
- GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
- TRACE("}\n");
-}
-
-int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
- int offset) {
- DCHECK(!iterator.done());
- int byte_pos;
- do {
- byte_pos = iterator.source_position().ScriptOffset();
- iterator.Advance();
- } while (!iterator.done() && iterator.code_offset() <= offset);
- return byte_pos;
-}
-
-void RecordLazyCodeStats(Code* code, Counters* counters) {
- counters->wasm_lazily_compiled_functions()->Increment();
- counters->wasm_generated_code_size()->Increment(code->body_size());
- counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
-}
-
-compiler::ModuleEnv CreateModuleEnvFromCompiledModule(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
- DisallowHeapAllocation no_gc;
- WasmModule* module = compiled_module->module();
-
- std::vector<GlobalHandleAddress> function_tables;
- std::vector<GlobalHandleAddress> signature_tables;
- std::vector<SignatureMap*> signature_maps;
-
- int num_function_tables = static_cast<int>(module->function_tables.size());
- for (int i = 0; i < num_function_tables; ++i) {
- FixedArray* ft = compiled_module->ptr_to_function_tables();
- FixedArray* st = compiled_module->ptr_to_signature_tables();
-
- // TODO(clemensh): defer these handles for concurrent compilation.
- function_tables.push_back(WasmCompiledModule::GetTableValue(ft, i));
- signature_tables.push_back(WasmCompiledModule::GetTableValue(st, i));
- signature_maps.push_back(&module->function_tables[i].map);
- }
-
- std::vector<Handle<Code>> empty_code;
-
- compiler::ModuleEnv result = {
- module, // --
- function_tables, // --
- signature_tables, // --
- signature_maps, // --
- empty_code, // --
- BUILTIN_CODE(isolate, WasmCompileLazy), // --
- reinterpret_cast<uintptr_t>( // --
- compiled_module->GetEmbeddedMemStartOrNull()), // --
- compiled_module->GetEmbeddedMemSizeOrZero(), // --
- reinterpret_cast<uintptr_t>( // --
- compiled_module->GetGlobalsStartOrNull()) // --
- };
- return result;
-}
-
-} // namespace
-
// static
-const WasmExceptionSig wasm::WasmException::empty_sig_(0, 0, nullptr);
-
-Handle<JSArrayBuffer> wasm::SetupArrayBuffer(
- Isolate* isolate, void* allocation_base, size_t allocation_length,
- void* backing_store, size_t size, bool is_external,
- bool enable_guard_regions, SharedFlag shared) {
- Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer(shared);
- DCHECK_GE(kMaxInt, size);
- if (shared == SharedFlag::kShared) DCHECK(FLAG_experimental_wasm_threads);
- JSArrayBuffer::Setup(buffer, isolate, is_external, allocation_base,
- allocation_length, backing_store, static_cast<int>(size),
- shared);
- buffer->set_is_neuterable(false);
- buffer->set_is_growable(true);
- buffer->set_has_guard_region(enable_guard_regions);
- return buffer;
-}
-
-Handle<JSArrayBuffer> wasm::NewArrayBuffer(Isolate* isolate, size_t size,
- bool enable_guard_regions,
- SharedFlag shared) {
- // Check against kMaxInt, since the byte length is stored as int in the
- // JSArrayBuffer. Note that wasm_max_mem_pages can be raised from the command
- // line, and we don't want to fail a CHECK then.
- if (size > FLAG_wasm_max_mem_pages * WasmModule::kPageSize ||
- size > kMaxInt) {
- // TODO(titzer): lift restriction on maximum memory allocated here.
- return Handle<JSArrayBuffer>::null();
- }
-
- enable_guard_regions = enable_guard_regions && kGuardRegionsSupported;
-
- void* allocation_base = nullptr; // Set by TryAllocateBackingStore
- size_t allocation_length = 0; // Set by TryAllocateBackingStore
- // Do not reserve memory till non zero memory is encountered.
- void* memory =
- (size == 0) ? nullptr
- : TryAllocateBackingStore(isolate, size, enable_guard_regions,
- allocation_base, allocation_length);
+const WasmExceptionSig WasmException::empty_sig_(0, 0, nullptr);
- if (size > 0 && memory == nullptr) {
- return Handle<JSArrayBuffer>::null();
- }
+// static
+constexpr const char* WasmException::kRuntimeIdStr;
-#if DEBUG
- // Double check the API allocator actually zero-initialized the memory.
- const byte* bytes = reinterpret_cast<const byte*>(memory);
- for (size_t i = 0; i < size; ++i) {
- DCHECK_EQ(0, bytes[i]);
- }
-#endif
+// static
+constexpr const char* WasmException::kRuntimeValuesStr;
- constexpr bool is_external = false;
- return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory,
- size, is_external, enable_guard_regions, shared);
-}
+void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
+ Handle<FixedArray> code_table) {
+ DisallowHeapAllocation no_gc;
+ std::vector<trap_handler::ProtectedInstructionData> unpacked;
-void wasm::UnpackAndRegisterProtectedInstructions(
- Isolate* isolate, Handle<FixedArray> code_table) {
for (int i = 0; i < code_table->length(); ++i) {
- Handle<Code> code;
+ Object* maybe_code = code_table->get(i);
// This is sometimes undefined when we're called from cctests.
- if (!code_table->GetValue<Code>(isolate, i).ToHandle(&code)) {
+ if (maybe_code->IsUndefined(isolate)) continue;
+ Code* code = Code::cast(maybe_code);
+
+ if (code->kind() != Code::WASM_FUNCTION) {
continue;
}
- if (code->kind() != Code::WASM_FUNCTION) {
+ if (code->trap_handler_index()->value() != trap_handler::kInvalidIndex) {
+ // This function has already been registered.
continue;
}
- const intptr_t base = reinterpret_cast<intptr_t>(code->entry());
+ byte* base = code->entry();
- Zone zone(isolate->allocator(), "Wasm Module");
- ZoneVector<trap_handler::ProtectedInstructionData> unpacked(&zone);
const int mode_mask =
RelocInfo::ModeMask(RelocInfo::WASM_PROTECTED_INSTRUCTION_LANDING);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
trap_handler::ProtectedInstructionData data;
- data.instr_offset = it.rinfo()->data();
- data.landing_offset = reinterpret_cast<intptr_t>(it.rinfo()->pc()) - base;
+ data.instr_offset = static_cast<uint32_t>(it.rinfo()->data());
+ data.landing_offset = static_cast<uint32_t>(it.rinfo()->pc() - base);
+ // Check that now over-/underflow happened.
+ DCHECK_EQ(it.rinfo()->data(), data.instr_offset);
+ DCHECK_EQ(it.rinfo()->pc() - base, data.landing_offset);
unpacked.emplace_back(data);
}
- if (unpacked.size() > 0) {
- int size = code->CodeSize();
- const int index = RegisterHandlerData(reinterpret_cast<void*>(base), size,
- unpacked.size(), &unpacked[0]);
- // TODO(eholk): if index is negative, fail.
- DCHECK(index >= 0);
- code->set_trap_handler_index(Smi::FromInt(index));
- }
+ if (unpacked.empty()) continue;
+
+ int size = code->CodeSize();
+ const int index = RegisterHandlerData(reinterpret_cast<void*>(base), size,
+ unpacked.size(), &unpacked[0]);
+ unpacked.clear();
+ // TODO(eholk): if index is negative, fail.
+ DCHECK_LE(0, index);
+ code->set_trap_handler_index(Smi::FromInt(index));
}
}
-std::ostream& wasm::operator<<(std::ostream& os, const WasmFunctionName& name) {
+std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
os << "#" << name.function_->func_index;
if (name.function_->name.is_set()) {
if (name.name_.start()) {
@@ -359,25 +112,11 @@ std::ostream& wasm::operator<<(std::ostream& os, const WasmFunctionName& name) {
return os;
}
-WasmInstanceObject* wasm::GetOwningWasmInstance(Code* code) {
- DisallowHeapAllocation no_gc;
- DCHECK(code->kind() == Code::WASM_FUNCTION ||
- code->kind() == Code::WASM_INTERPRETER_ENTRY);
- FixedArray* deopt_data = code->deoptimization_data();
- DCHECK_EQ(code->kind() == Code::WASM_INTERPRETER_ENTRY ? 1 : 2,
- deopt_data->length());
- Object* weak_link = deopt_data->get(0);
- DCHECK(weak_link->IsWeakCell());
- WeakCell* cell = WeakCell::cast(weak_link);
- if (cell->cleared()) return nullptr;
- return WasmInstanceObject::cast(cell->value());
-}
-
WasmModule::WasmModule(std::unique_ptr<Zone> owned)
: signature_zone(std::move(owned)) {}
-WasmFunction* wasm::GetWasmFunctionForImportWrapper(Isolate* isolate,
- Handle<Object> target) {
+WasmFunction* GetWasmFunctionForExport(Isolate* isolate,
+ Handle<Object> target) {
if (target->IsJSFunction()) {
Handle<JSFunction> func = Handle<JSFunction>::cast(target);
if (func->code()->kind() == Code::JS_TO_WASM_FUNCTION) {
@@ -390,9 +129,9 @@ WasmFunction* wasm::GetWasmFunctionForImportWrapper(Isolate* isolate,
return nullptr;
}
-Handle<Code> wasm::UnwrapImportWrapper(Handle<Object> import_wrapper) {
- Handle<JSFunction> func = Handle<JSFunction>::cast(import_wrapper);
- Handle<Code> export_wrapper_code = handle(func->code());
+Handle<Code> UnwrapExportWrapper(Handle<JSFunction> export_wrapper) {
+ Handle<Code> export_wrapper_code = handle(export_wrapper->code());
+ DCHECK_EQ(export_wrapper_code->kind(), Code::JS_TO_WASM_FUNCTION);
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
for (RelocIterator it(*export_wrapper_code, mask);; it.next()) {
DCHECK(!it.done());
@@ -415,9 +154,9 @@ Handle<Code> wasm::UnwrapImportWrapper(Handle<Object> import_wrapper) {
UNREACHABLE();
}
-void wasm::UpdateDispatchTables(Isolate* isolate,
- Handle<FixedArray> dispatch_tables, int index,
- WasmFunction* function, Handle<Code> code) {
+void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
+ int index, WasmFunction* function,
+ Handle<Code> code) {
DCHECK_EQ(0, dispatch_tables->length() % 4);
for (int i = 0; i < dispatch_tables->length(); i += 4) {
int table_index = Smi::ToInt(dispatch_tables->get(i + 1));
@@ -441,41 +180,7 @@ void wasm::UpdateDispatchTables(Isolate* isolate,
}
}
-
-void wasm::TableSet(ErrorThrower* thrower, Isolate* isolate,
- Handle<WasmTableObject> table, int64_t index,
- Handle<JSFunction> function) {
- Handle<FixedArray> array(table->functions(), isolate);
-
- if (index < 0 || index >= array->length()) {
- thrower->RangeError("index out of bounds");
- return;
- }
- int index32 = static_cast<int>(index);
-
- Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
-
- WasmFunction* wasm_function = nullptr;
- Handle<Code> code = Handle<Code>::null();
- Handle<Object> value = handle(isolate->heap()->null_value());
-
- if (!function.is_null()) {
- wasm_function = GetWasmFunctionForImportWrapper(isolate, function);
- code = UnwrapImportWrapper(function);
- value = Handle<Object>::cast(function);
- }
-
- UpdateDispatchTables(isolate, dispatch_tables, index32, wasm_function, code);
- array->set(index32, *value);
-}
-
-Handle<Script> wasm::GetScript(Handle<JSObject> instance) {
- WasmCompiledModule* compiled_module =
- WasmInstanceObject::cast(*instance)->compiled_module();
- return handle(compiled_module->script());
-}
-
-bool wasm::IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
+bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
// TODO(wasm): Once wasm has its own CSP policy, we should introduce a
// separate callback that includes information about the module about to be
// compiled. For the time being, pass an empty string as placeholder for the
@@ -486,75 +191,8 @@ bool wasm::IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
v8::Utils::ToLocal(isolate->factory()->empty_string()));
}
-void wasm::DetachWebAssemblyMemoryBuffer(Isolate* isolate,
- Handle<JSArrayBuffer> buffer,
- bool free_memory) {
- const bool is_external = buffer->is_external();
- DCHECK(!buffer->is_neuterable());
- if (!is_external) {
- buffer->set_is_external(true);
- isolate->heap()->UnregisterArrayBuffer(*buffer);
- if (free_memory) {
- // We need to free the memory before neutering the buffer because
- // FreeBackingStore reads buffer->allocation_base(), which is nulled out
- // by Neuter. This means there is a dangling pointer until we neuter the
- // buffer. Since there is no way for the user to directly call
- // FreeBackingStore, we can ensure this is safe.
- buffer->FreeBackingStore();
- }
- }
- buffer->set_is_neuterable(true);
- buffer->Neuter();
-}
-
-void testing::ValidateInstancesChain(Isolate* isolate,
- Handle<WasmModuleObject> module_obj,
- int instance_count) {
- CHECK_GE(instance_count, 0);
- DisallowHeapAllocation no_gc;
- WasmCompiledModule* compiled_module = module_obj->compiled_module();
- CHECK_EQ(JSObject::cast(compiled_module->ptr_to_weak_wasm_module()->value()),
- *module_obj);
- Object* prev = nullptr;
- int found_instances = compiled_module->has_weak_owning_instance() ? 1 : 0;
- WasmCompiledModule* current_instance = compiled_module;
- while (current_instance->has_weak_next_instance()) {
- CHECK((prev == nullptr && !current_instance->has_weak_prev_instance()) ||
- current_instance->ptr_to_weak_prev_instance()->value() == prev);
- CHECK_EQ(current_instance->ptr_to_weak_wasm_module()->value(), *module_obj);
- CHECK(current_instance->ptr_to_weak_owning_instance()
- ->value()
- ->IsWasmInstanceObject());
- prev = current_instance;
- current_instance = WasmCompiledModule::cast(
- current_instance->ptr_to_weak_next_instance()->value());
- ++found_instances;
- CHECK_LE(found_instances, instance_count);
- }
- CHECK_EQ(found_instances, instance_count);
-}
-
-void testing::ValidateModuleState(Isolate* isolate,
- Handle<WasmModuleObject> module_obj) {
- DisallowHeapAllocation no_gc;
- WasmCompiledModule* compiled_module = module_obj->compiled_module();
- CHECK(compiled_module->has_weak_wasm_module());
- CHECK_EQ(compiled_module->ptr_to_weak_wasm_module()->value(), *module_obj);
- CHECK(!compiled_module->has_weak_prev_instance());
- CHECK(!compiled_module->has_weak_next_instance());
- CHECK(!compiled_module->has_weak_owning_instance());
-}
-
-void testing::ValidateOrphanedInstance(Isolate* isolate,
- Handle<WasmInstanceObject> instance) {
- DisallowHeapAllocation no_gc;
- WasmCompiledModule* compiled_module = instance->compiled_module();
- CHECK(compiled_module->has_weak_wasm_module());
- CHECK(compiled_module->ptr_to_weak_wasm_module()->cleared());
-}
-
-Handle<JSArray> wasm::GetImports(Isolate* isolate,
- Handle<WasmModuleObject> module_object) {
+Handle<JSArray> GetImports(Isolate* isolate,
+ Handle<WasmModuleObject> module_object) {
Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
isolate);
Factory* factory = isolate->factory();
@@ -623,8 +261,8 @@ Handle<JSArray> wasm::GetImports(Isolate* isolate,
return array_object;
}
-Handle<JSArray> wasm::GetExports(Isolate* isolate,
- Handle<WasmModuleObject> module_object) {
+Handle<JSArray> GetExports(Isolate* isolate,
+ Handle<WasmModuleObject> module_object) {
Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
isolate);
Factory* factory = isolate->factory();
@@ -686,10 +324,9 @@ Handle<JSArray> wasm::GetExports(Isolate* isolate,
return array_object;
}
-Handle<JSArray> wasm::GetCustomSections(Isolate* isolate,
- Handle<WasmModuleObject> module_object,
- Handle<String> name,
- ErrorThrower* thrower) {
+Handle<JSArray> GetCustomSections(Isolate* isolate,
+ Handle<WasmModuleObject> module_object,
+ Handle<String> name, ErrorThrower* thrower) {
Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
isolate);
Factory* factory = isolate->factory();
@@ -751,15 +388,15 @@ Handle<JSArray> wasm::GetCustomSections(Isolate* isolate,
return array_object;
}
-Handle<FixedArray> wasm::DecodeLocalNames(
+Handle<FixedArray> DecodeLocalNames(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
Handle<SeqOneByteString> wire_bytes(compiled_module->module_bytes(), isolate);
LocalNames decoded_locals;
{
DisallowHeapAllocation no_gc;
- wasm::DecodeLocalNames(wire_bytes->GetChars(),
- wire_bytes->GetChars() + wire_bytes->length(),
- &decoded_locals);
+ DecodeLocalNames(wire_bytes->GetChars(),
+ wire_bytes->GetChars() + wire_bytes->length(),
+ &decoded_locals);
}
Handle<FixedArray> locals_names =
isolate->factory()->NewFixedArray(decoded_locals.max_function_index + 1);
@@ -778,379 +415,7 @@ Handle<FixedArray> wasm::DecodeLocalNames(
return locals_names;
}
-bool wasm::SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes) {
- if (bytes.start() == nullptr || bytes.length() == 0) return false;
- ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
- bytes.end(), true, kWasmOrigin);
- return result.ok();
-}
-
-MaybeHandle<WasmModuleObject> wasm::SyncCompileTranslatedAsmJs(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes) {
- ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
- bytes.end(), false, kAsmJsOrigin);
- if (result.failed()) {
- thrower->CompileFailed("Wasm decoding failed", result);
- return {};
- }
-
- // Transfer ownership to the {WasmModuleWrapper} generated in
- // {CompileToModuleObject}.
- Handle<Code> centry_stub = CEntryStub(isolate, 1).GetCode();
- ModuleCompiler compiler(isolate, std::move(result.val), centry_stub);
- return compiler.CompileToModuleObject(thrower, bytes, asm_js_script,
- asm_js_offset_table_bytes);
-}
-
-MaybeHandle<WasmModuleObject> wasm::SyncCompile(Isolate* isolate,
- ErrorThrower* thrower,
- const ModuleWireBytes& bytes) {
- if (!IsWasmCodegenAllowed(isolate, isolate->native_context())) {
- thrower->CompileError("Wasm code generation disallowed in this context");
- return {};
- }
-
- // TODO(titzer): only make a copy of the bytes if SharedArrayBuffer
- std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
- memcpy(copy.get(), bytes.start(), bytes.length());
- ModuleWireBytes bytes_copy(copy.get(), copy.get() + bytes.length());
-
- ModuleResult result = SyncDecodeWasmModule(
- isolate, bytes_copy.start(), bytes_copy.end(), false, kWasmOrigin);
- if (result.failed()) {
- thrower->CompileFailed("Wasm decoding failed", result);
- return {};
- }
-
- // Transfer ownership to the {WasmModuleWrapper} generated in
- // {CompileToModuleObject}.
- Handle<Code> centry_stub = CEntryStub(isolate, 1).GetCode();
- ModuleCompiler compiler(isolate, std::move(result.val), centry_stub);
- return compiler.CompileToModuleObject(thrower, bytes_copy, Handle<Script>(),
- Vector<const byte>());
-}
-
-MaybeHandle<WasmInstanceObject> wasm::SyncInstantiate(
- Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
- MaybeHandle<JSArrayBuffer> memory) {
- InstanceBuilder builder(isolate, thrower, module_object, imports, memory,
- &InstanceFinalizer);
- return builder.Build();
-}
-
-MaybeHandle<WasmInstanceObject> wasm::SyncCompileAndInstantiate(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory) {
- MaybeHandle<WasmModuleObject> module =
- wasm::SyncCompile(isolate, thrower, bytes);
- DCHECK_EQ(thrower->error(), module.is_null());
- if (module.is_null()) return {};
-
- return wasm::SyncInstantiate(isolate, thrower, module.ToHandleChecked(),
- Handle<JSReceiver>::null(),
- Handle<JSArrayBuffer>::null());
-}
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-void RejectPromise(Isolate* isolate, Handle<Context> context,
- ErrorThrower& thrower, Handle<JSPromise> promise) {
- v8::Local<v8::Promise::Resolver> resolver =
- v8::Utils::PromiseToLocal(promise).As<v8::Promise::Resolver>();
- auto maybe = resolver->Reject(v8::Utils::ToLocal(context),
- v8::Utils::ToLocal(thrower.Reify()));
- CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
-}
-
-void ResolvePromise(Isolate* isolate, Handle<Context> context,
- Handle<JSPromise> promise, Handle<Object> result) {
- v8::Local<v8::Promise::Resolver> resolver =
- v8::Utils::PromiseToLocal(promise).As<v8::Promise::Resolver>();
- auto maybe = resolver->Resolve(v8::Utils::ToLocal(context),
- v8::Utils::ToLocal(result));
- CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
-}
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-void wasm::AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> imports) {
- ErrorThrower thrower(isolate, nullptr);
- MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
- isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
- if (thrower.error()) {
- RejectPromise(isolate, handle(isolate->context()), thrower, promise);
- return;
- }
- ResolvePromise(isolate, handle(isolate->context()), promise,
- instance_object.ToHandleChecked());
-}
-
-void wasm::AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
- const ModuleWireBytes& bytes) {
- if (!FLAG_wasm_async_compilation) {
- ErrorThrower thrower(isolate, "WasmCompile");
- // Compile the module.
- MaybeHandle<WasmModuleObject> module_object =
- SyncCompile(isolate, &thrower, bytes);
- if (thrower.error()) {
- RejectPromise(isolate, handle(isolate->context()), thrower, promise);
- return;
- }
- Handle<WasmModuleObject> module = module_object.ToHandleChecked();
- ResolvePromise(isolate, handle(isolate->context()), promise, module);
- return;
- }
-
- // Make a copy of the wire bytes in case the user program changes them
- // during asynchronous compilation.
- std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
- memcpy(copy.get(), bytes.start(), bytes.length());
- isolate->wasm_compilation_manager()->StartAsyncCompileJob(
- isolate, std::move(copy), bytes.length(), handle(isolate->context()),
- promise);
-}
-
-Handle<Code> wasm::CompileLazy(Isolate* isolate) {
- HistogramTimerScope lazy_time_scope(
- isolate->counters()->wasm_lazy_compilation_time());
-
- // Find the wasm frame which triggered the lazy compile, to get the wasm
- // instance.
- StackFrameIterator it(isolate);
- // First frame: C entry stub.
- DCHECK(!it.done());
- DCHECK_EQ(StackFrame::EXIT, it.frame()->type());
- it.Advance();
- // Second frame: WasmCompileLazy builtin.
- DCHECK(!it.done());
- Handle<Code> lazy_compile_code(it.frame()->LookupCode(), isolate);
- DCHECK_EQ(Builtins::kWasmCompileLazy, lazy_compile_code->builtin_index());
- Handle<WasmInstanceObject> instance;
- Handle<FixedArray> exp_deopt_data;
- int func_index = -1;
- if (lazy_compile_code->deoptimization_data()->length() > 0) {
- // Then it's an indirect call or via JS->wasm wrapper.
- DCHECK_LE(2, lazy_compile_code->deoptimization_data()->length());
- exp_deopt_data = handle(lazy_compile_code->deoptimization_data(), isolate);
- auto* weak_cell = WeakCell::cast(exp_deopt_data->get(0));
- instance = handle(WasmInstanceObject::cast(weak_cell->value()), isolate);
- func_index = Smi::ToInt(exp_deopt_data->get(1));
- }
- it.Advance();
- // Third frame: The calling wasm code or js-to-wasm wrapper.
- DCHECK(!it.done());
- DCHECK(it.frame()->is_js_to_wasm() || it.frame()->is_wasm_compiled());
- Handle<Code> caller_code = handle(it.frame()->LookupCode(), isolate);
- if (it.frame()->is_js_to_wasm()) {
- DCHECK(!instance.is_null());
- } else if (instance.is_null()) {
- // Then this is a direct call (otherwise we would have attached the instance
- // via deopt data to the lazy compile stub). Just use the instance of the
- // caller.
- instance = handle(wasm::GetOwningWasmInstance(*caller_code), isolate);
- }
- int offset =
- static_cast<int>(it.frame()->pc() - caller_code->instruction_start());
- // Only patch the caller code if this is *no* indirect call.
- // exp_deopt_data will be null if the called function is not exported at all,
- // and its length will be <= 2 if all entries in tables were already patched.
- // Note that this check is conservative: If the first call to an exported
- // function is direct, we will just patch the export tables, and only on the
- // second call we will patch the caller.
- bool patch_caller = caller_code->kind() == Code::JS_TO_WASM_FUNCTION ||
- exp_deopt_data.is_null() || exp_deopt_data->length() <= 2;
-
- Handle<Code> compiled_code = WasmCompiledModule::CompileLazy(
- isolate, instance, caller_code, offset, func_index, patch_caller);
- if (!exp_deopt_data.is_null() && exp_deopt_data->length() > 2) {
- // See EnsureExportedLazyDeoptData: exp_deopt_data[2...(len-1)] are pairs of
- // <export_table, index> followed by undefined values.
- // Use this information here to patch all export tables.
- DCHECK_EQ(0, exp_deopt_data->length() % 2);
- for (int idx = 2, end = exp_deopt_data->length(); idx < end; idx += 2) {
- if (exp_deopt_data->get(idx)->IsUndefined(isolate)) break;
- FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
- int exp_index = Smi::ToInt(exp_deopt_data->get(idx + 1));
- DCHECK(exp_table->get(exp_index) == *lazy_compile_code);
- exp_table->set(exp_index, *compiled_code);
- }
- // After processing, remove the list of exported entries, such that we don't
- // do the patching redundantly.
- Handle<FixedArray> new_deopt_data =
- isolate->factory()->CopyFixedArrayUpTo(exp_deopt_data, 2, TENURED);
- lazy_compile_code->set_deoptimization_data(*new_deopt_data);
- }
-
- return compiled_code;
-}
-
-void LazyCompilationOrchestrator::CompileFunction(
- Isolate* isolate, Handle<WasmInstanceObject> instance, int func_index) {
- Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
- isolate);
- if (Code::cast(compiled_module->code_table()->get(func_index))->kind() ==
- Code::WASM_FUNCTION) {
- return;
- }
-
- compiler::ModuleEnv module_env =
- CreateModuleEnvFromCompiledModule(isolate, compiled_module);
-
- const uint8_t* module_start = compiled_module->module_bytes()->GetChars();
-
- const WasmFunction* func = &module_env.module->functions[func_index];
- wasm::FunctionBody body{func->sig, func->code.offset(),
- module_start + func->code.offset(),
- module_start + func->code.end_offset()};
- // TODO(wasm): Refactor this to only get the name if it is really needed for
- // tracing / debugging.
- std::string func_name;
- {
- wasm::WasmName name = Vector<const char>::cast(
- compiled_module->GetRawFunctionName(func_index));
- // Copy to std::string, because the underlying string object might move on
- // the heap.
- func_name.assign(name.start(), static_cast<size_t>(name.length()));
- }
- ErrorThrower thrower(isolate, "WasmLazyCompile");
- compiler::WasmCompilationUnit unit(isolate, &module_env, body,
- CStrVector(func_name.c_str()), func_index,
- CEntryStub(isolate, 1).GetCode());
- unit.ExecuteCompilation();
- MaybeHandle<Code> maybe_code = unit.FinishCompilation(&thrower);
-
- // If there is a pending error, something really went wrong. The module was
- // verified before starting execution with lazy compilation.
- // This might be OOM, but then we cannot continue execution anyway.
- // TODO(clemensh): According to the spec, we can actually skip validation at
- // module creation time, and return a function that always traps here.
- CHECK(!thrower.error());
- Handle<Code> code = maybe_code.ToHandleChecked();
-
- Handle<FixedArray> deopt_data = isolate->factory()->NewFixedArray(2, TENURED);
- Handle<WeakCell> weak_instance = isolate->factory()->NewWeakCell(instance);
- // TODO(wasm): Introduce constants for the indexes in wasm deopt data.
- deopt_data->set(0, *weak_instance);
- deopt_data->set(1, Smi::FromInt(func_index));
- code->set_deoptimization_data(*deopt_data);
-
- DCHECK_EQ(Builtins::kWasmCompileLazy,
- Code::cast(compiled_module->code_table()->get(func_index))
- ->builtin_index());
- compiled_module->code_table()->set(func_index, *code);
-
- // Now specialize the generated code for this instance.
- Zone specialization_zone(isolate->allocator(), ZONE_NAME);
- CodeSpecialization code_specialization(isolate, &specialization_zone);
- code_specialization.RelocateDirectCalls(instance);
- code_specialization.ApplyToWasmCode(*code, SKIP_ICACHE_FLUSH);
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
- RecordLazyCodeStats(*code, isolate->counters());
-}
-
-Handle<Code> LazyCompilationOrchestrator::CompileLazy(
- Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<Code> caller,
- int call_offset, int exported_func_index, bool patch_caller) {
- struct NonCompiledFunction {
- int offset;
- int func_index;
- };
- std::vector<NonCompiledFunction> non_compiled_functions;
- int func_to_return_idx = exported_func_index;
- wasm::Decoder decoder(nullptr, nullptr);
- bool is_js_to_wasm = caller->kind() == Code::JS_TO_WASM_FUNCTION;
- Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
- isolate);
-
- if (is_js_to_wasm) {
- non_compiled_functions.push_back({0, exported_func_index});
- } else if (patch_caller) {
- DisallowHeapAllocation no_gc;
- SeqOneByteString* module_bytes = compiled_module->module_bytes();
- SourcePositionTableIterator source_pos_iterator(
- caller->SourcePositionTable());
- DCHECK_EQ(2, caller->deoptimization_data()->length());
- int caller_func_index = Smi::ToInt(caller->deoptimization_data()->get(1));
- const byte* func_bytes =
- module_bytes->GetChars() +
- compiled_module->module()->functions[caller_func_index].code.offset();
- for (RelocIterator it(*caller, RelocInfo::kCodeTargetMask); !it.done();
- it.next()) {
- Code* callee =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (callee->builtin_index() != Builtins::kWasmCompileLazy) continue;
- // TODO(clemensh): Introduce safe_cast<T, bool> which (D)CHECKS
- // (depending on the bool) against limits of T and then static_casts.
- size_t offset_l = it.rinfo()->pc() - caller->instruction_start();
- DCHECK_GE(kMaxInt, offset_l);
- int offset = static_cast<int>(offset_l);
- int byte_pos =
- AdvanceSourcePositionTableIterator(source_pos_iterator, offset);
- int called_func_index =
- ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
- non_compiled_functions.push_back({offset, called_func_index});
- // Call offset one instruction after the call. Remember the last called
- // function before that offset.
- if (offset < call_offset) func_to_return_idx = called_func_index;
- }
- }
-
- // TODO(clemensh): compile all functions in non_compiled_functions in
- // background, wait for func_to_return_idx.
- CompileFunction(isolate, instance, func_to_return_idx);
-
- if (is_js_to_wasm || patch_caller) {
- DisallowHeapAllocation no_gc;
- // Now patch the code object with all functions which are now compiled.
- int idx = 0;
- for (RelocIterator it(*caller, RelocInfo::kCodeTargetMask); !it.done();
- it.next()) {
- Code* callee =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (callee->builtin_index() != Builtins::kWasmCompileLazy) continue;
- DCHECK_GT(non_compiled_functions.size(), idx);
- int called_func_index = non_compiled_functions[idx].func_index;
- // Check that the callee agrees with our assumed called_func_index.
- DCHECK_IMPLIES(callee->deoptimization_data()->length() > 0,
- Smi::ToInt(callee->deoptimization_data()->get(1)) ==
- called_func_index);
- if (is_js_to_wasm) {
- DCHECK_EQ(func_to_return_idx, called_func_index);
- } else {
- DCHECK_EQ(non_compiled_functions[idx].offset,
- it.rinfo()->pc() - caller->instruction_start());
- }
- ++idx;
- Handle<Code> callee_compiled(
- Code::cast(compiled_module->code_table()->get(called_func_index)));
- if (callee_compiled->builtin_index() == Builtins::kWasmCompileLazy) {
- DCHECK_NE(func_to_return_idx, called_func_index);
- continue;
- }
- DCHECK_EQ(Code::WASM_FUNCTION, callee_compiled->kind());
- it.rinfo()->set_target_address(isolate,
- callee_compiled->instruction_start());
- }
- DCHECK_EQ(non_compiled_functions.size(), idx);
- }
-
- Code* ret =
- Code::cast(compiled_module->code_table()->get(func_to_return_idx));
- DCHECK_EQ(Code::WASM_FUNCTION, ret->kind());
- return handle(ret, isolate);
-}
-
-const char* wasm::ExternalKindName(WasmExternalKind kind) {
+const char* ExternalKindName(WasmExternalKind kind) {
switch (kind) {
case kExternalFunction:
return "function";
@@ -1163,3 +428,11 @@ const char* wasm::ExternalKindName(WasmExternalKind kind) {
}
return "unknown";
}
+
+#undef TRACE
+#undef TRACE_CHAIN
+#undef TRACE_COMPILE
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 63edd5f865..a45d421ee8 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -7,13 +7,13 @@
#include <memory>
-#include "src/api.h"
#include "src/debug/debug-interface.h"
#include "src/globals.h"
#include "src/handles.h"
#include "src/managed.h"
#include "src/parsing/preparse-data.h"
+#include "src/wasm/decoder.h"
#include "src/wasm/signature-map.h"
#include "src/wasm/wasm-opcodes.h"
@@ -41,56 +41,6 @@ enum WasmExternalKind {
kExternalGlobal = 3
};
-// Representation of an initializer expression.
-struct WasmInitExpr {
- enum WasmInitKind {
- kNone,
- kGlobalIndex,
- kI32Const,
- kI64Const,
- kF32Const,
- kF64Const
- } kind;
-
- union {
- int32_t i32_const;
- int64_t i64_const;
- float f32_const;
- double f64_const;
- uint32_t global_index;
- } val;
-
- WasmInitExpr() : kind(kNone) {}
- explicit WasmInitExpr(int32_t v) : kind(kI32Const) { val.i32_const = v; }
- explicit WasmInitExpr(int64_t v) : kind(kI64Const) { val.i64_const = v; }
- explicit WasmInitExpr(float v) : kind(kF32Const) { val.f32_const = v; }
- explicit WasmInitExpr(double v) : kind(kF64Const) { val.f64_const = v; }
- WasmInitExpr(WasmInitKind kind, uint32_t global_index) : kind(kGlobalIndex) {
- val.global_index = global_index;
- }
-};
-
-// Reference to a string in the wire bytes.
-class WireBytesRef {
- public:
- WireBytesRef() : WireBytesRef(0, 0) {}
- WireBytesRef(uint32_t offset, uint32_t length)
- : offset_(offset), length_(length) {
- DCHECK_IMPLIES(offset_ == 0, length_ == 0);
- DCHECK_LE(offset_, offset_ + length_); // no uint32_t overflow.
- }
-
- uint32_t offset() const { return offset_; }
- uint32_t length() const { return length_; }
- uint32_t end_offset() const { return offset_ + length_; }
- bool is_empty() const { return length_ == 0; }
- bool is_set() const { return offset_ != 0; }
-
- private:
- uint32_t offset_;
- uint32_t length_;
-};
-
// Static representation of a wasm function.
struct WasmFunction {
FunctionSig* sig; // signature of the function.
@@ -119,9 +69,14 @@ typedef FunctionSig WasmExceptionSig;
struct WasmException {
explicit WasmException(const WasmExceptionSig* sig = &empty_sig_)
: sig(sig) {}
+ FunctionSig* ToFunctionSig() const { return const_cast<FunctionSig*>(sig); }
const WasmExceptionSig* sig; // type signature of the exception.
+ // Used to hold data on runtime exceptions.
+ static constexpr const char* kRuntimeIdStr = "WasmExceptionRuntimeId";
+ static constexpr const char* kRuntimeValuesStr = "WasmExceptionValues";
+
private:
static const WasmExceptionSig empty_sig_;
};
@@ -184,13 +139,16 @@ struct V8_EXPORT_PRIVATE WasmModule {
static const uint32_t kPageSize = 0x10000; // Page size, 64kb.
static const uint32_t kMinMemPages = 1; // Minimum memory size = 64kb
+ static constexpr int kInvalidExceptionTag = -1;
+
std::unique_ptr<Zone> signature_zone;
uint32_t initial_pages = 0; // initial size of the memory in 64k pages
uint32_t maximum_pages = 0; // maximum size of the memory in 64k pages
+ bool has_shared_memory = false; // true if memory is a SharedArrayBuffer
bool has_maximum_pages = false; // true if there is a maximum memory size
- bool has_memory = false; // true if the memory was defined or imported
- bool mem_export = false; // true if the memory is exported
- int start_function_index = -1; // start function, >= 0 if any
+ bool has_memory = false; // true if the memory was defined or imported
+ bool mem_export = false; // true if the memory is exported
+ int start_function_index = -1; // start function, >= 0 if any
std::vector<WasmGlobal> globals;
uint32_t globals_size = 0;
@@ -272,12 +230,13 @@ struct V8_EXPORT_PRIVATE ModuleWireBytes {
function->code.end_offset());
}
+ Vector<const byte> module_bytes() const { return module_bytes_; }
const byte* start() const { return module_bytes_.start(); }
const byte* end() const { return module_bytes_.end(); }
size_t length() const { return module_bytes_.length(); }
private:
- const Vector<const byte> module_bytes_;
+ Vector<const byte> module_bytes_;
};
// A helper for printing out the names of functions.
@@ -295,11 +254,6 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
// If no debug info exists yet, it is created automatically.
Handle<WasmDebugInfo> GetDebugInfo(Handle<JSObject> wasm);
-// Get the script of the wasm module. If the origin of the module is asm.js, the
-// returned Script will be a JavaScript Script of Script::TYPE_NORMAL, otherwise
-// it's of type TYPE_WASM.
-Handle<Script> GetScript(Handle<JSObject> instance);
-
V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> CreateModuleObjectFromBytes(
Isolate* isolate, const byte* start, const byte* end, ErrorThrower* thrower,
ModuleOrigin origin, Handle<Script> asm_js_script,
@@ -321,117 +275,21 @@ V8_EXPORT_PRIVATE Handle<JSArray> GetCustomSections(
// function index, the inner one by the local index.
Handle<FixedArray> DecodeLocalNames(Isolate*, Handle<WasmCompiledModule>);
-// Assumed to be called with a code object associated to a wasm module instance.
-// Intended to be called from runtime functions.
-// Returns nullptr on failing to get owning instance.
-WasmInstanceObject* GetOwningWasmInstance(Code* code);
-
-Handle<JSArrayBuffer> NewArrayBuffer(
- Isolate*, size_t size, bool enable_guard_regions,
- SharedFlag shared = SharedFlag::kNotShared);
-
-Handle<JSArrayBuffer> SetupArrayBuffer(
- Isolate*, void* allocation_base, size_t allocation_length,
- void* backing_store, size_t size, bool is_external,
- bool enable_guard_regions, SharedFlag shared = SharedFlag::kNotShared);
-
-void DetachWebAssemblyMemoryBuffer(Isolate* isolate,
- Handle<JSArrayBuffer> buffer,
- bool free_memory);
-
+// If the target is an export wrapper, return the {WasmFunction*} corresponding
+// to the wrapped wasm function; in all other cases, return nullptr.
// The returned pointer is owned by the wasm instance target belongs to. The
// result is alive as long as the instance exists.
-WasmFunction* GetWasmFunctionForImportWrapper(Isolate* isolate,
- Handle<Object> target);
+WasmFunction* GetWasmFunctionForExport(Isolate* isolate, Handle<Object> target);
-Handle<Code> UnwrapImportWrapper(Handle<Object> import_wrapper);
-
-void TableSet(ErrorThrower* thrower, Isolate* isolate,
- Handle<WasmTableObject> table, int64_t index,
- Handle<JSFunction> function);
+// {export_wrapper} is known to be an export.
+Handle<Code> UnwrapExportWrapper(Handle<JSFunction> export_wrapper);
void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
int index, WasmFunction* function, Handle<Code> code);
-//============================================================================
-//== Compilation and instantiation ===========================================
-//============================================================================
-V8_EXPORT_PRIVATE bool SyncValidate(Isolate* isolate,
- const ModuleWireBytes& bytes);
-
-V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes);
-
-V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompile(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes);
-
-V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncInstantiate(
- Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
- MaybeHandle<JSArrayBuffer> memory);
-
-V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncCompileAndInstantiate(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory);
-
-V8_EXPORT_PRIVATE void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
- const ModuleWireBytes& bytes);
-
-V8_EXPORT_PRIVATE void AsyncInstantiate(Isolate* isolate,
- Handle<JSPromise> promise,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> imports);
-
-#if V8_TARGET_ARCH_64_BIT
-const bool kGuardRegionsSupported = true;
-#else
-const bool kGuardRegionsSupported = false;
-#endif
-
-inline bool EnableGuardRegions() {
- return FLAG_wasm_guard_pages && kGuardRegionsSupported &&
- !FLAG_experimental_wasm_threads;
-}
-
-inline SharedFlag IsShared(Handle<JSArrayBuffer> buffer) {
- if (!buffer.is_null() && buffer->is_shared()) {
- DCHECK(FLAG_experimental_wasm_threads);
- return SharedFlag::kShared;
- }
- return SharedFlag::kNotShared;
-}
-
void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
Handle<FixedArray> code_table);
-// Triggered by the WasmCompileLazy builtin.
-// Walks the stack (top three frames) to determine the wasm instance involved
-// and which function to compile.
-// Then triggers WasmCompiledModule::CompileLazy, taking care of correctly
-// patching the call site or indirect function tables.
-// Returns either the Code object that has been lazily compiled, or Illegal if
-// an error occurred. In the latter case, a pending exception has been set,
-// which will be triggered when returning from the runtime function, i.e. the
-// Illegal builtin will never be called.
-Handle<Code> CompileLazy(Isolate* isolate);
-
-// This class orchestrates the lazy compilation of wasm functions. It is
-// triggered by the WasmCompileLazy builtin.
-// It contains the logic for compiling and specializing wasm functions, and
-// patching the calling wasm code.
-// Once we support concurrent lazy compilation, this class will contain the
-// logic to actually orchestrate parallel execution of wasm compilation jobs.
-// TODO(clemensh): Implement concurrent lazy compilation.
-class LazyCompilationOrchestrator {
- void CompileFunction(Isolate*, Handle<WasmInstanceObject>, int func_index);
-
- public:
- Handle<Code> CompileLazy(Isolate*, Handle<WasmInstanceObject>,
- Handle<Code> caller, int call_offset,
- int exported_func_index, bool patch_caller);
-};
-
const char* ExternalKindName(WasmExternalKind);
// TruncatedUserString makes it easy to output names up to a certain length, and
@@ -470,21 +328,6 @@ class TruncatedUserString {
char buffer_[kMaxLen];
};
-namespace testing {
-void ValidateInstancesChain(Isolate* isolate,
- Handle<WasmModuleObject> module_obj,
- int instance_count);
-void ValidateModuleState(Isolate* isolate, Handle<WasmModuleObject> module_obj);
-void ValidateOrphanedInstance(Isolate* isolate,
- Handle<WasmInstanceObject> instance);
-} // namespace testing
-
-void ResolvePromise(Isolate* isolate, Handle<Context> context,
- Handle<JSPromise> promise, Handle<Object> result);
-
-void RejectPromise(Isolate* isolate, Handle<Context> context,
- ErrorThrower& thrower, Handle<JSPromise> promise);
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
new file mode 100644
index 0000000000..c435fc7913
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -0,0 +1,210 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_OBJECTS_INL_H_
+#define V8_WASM_OBJECTS_INL_H_
+
+#include "src/heap/heap-inl.h"
+#include "src/wasm/wasm-objects.h"
+
+namespace v8 {
+namespace internal {
+
+// Has to be the last include (doesn't have include guards)
+#include "src/objects/object-macros.h"
+
+CAST_ACCESSOR(WasmInstanceObject)
+CAST_ACCESSOR(WasmMemoryObject)
+CAST_ACCESSOR(WasmModuleObject)
+CAST_ACCESSOR(WasmTableObject)
+
+#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
+ bool holder::has_##name() { \
+ return !READ_FIELD(this, offset)->IsUndefined(GetIsolate()); \
+ } \
+ ACCESSORS(holder, name, type, offset)
+
+// WasmModuleObject
+ACCESSORS(WasmModuleObject, compiled_module, WasmCompiledModule,
+ kCompiledModuleOffset)
+
+// WasmTableObject
+ACCESSORS(WasmTableObject, functions, FixedArray, kFunctionsOffset)
+ACCESSORS(WasmTableObject, maximum_length, Object, kMaximumLengthOffset)
+ACCESSORS(WasmTableObject, dispatch_tables, FixedArray, kDispatchTablesOffset)
+
+// WasmMemoryObject
+ACCESSORS(WasmMemoryObject, array_buffer, JSArrayBuffer, kArrayBufferOffset)
+SMI_ACCESSORS(WasmMemoryObject, maximum_pages, kMaximumPagesOffset)
+OPTIONAL_ACCESSORS(WasmMemoryObject, instances, WeakFixedArray,
+ kInstancesOffset)
+ACCESSORS(WasmMemoryObject, wasm_context, Managed<WasmContext>,
+ kWasmContextOffset)
+
+// WasmInstanceObject
+ACCESSORS(WasmInstanceObject, compiled_module, WasmCompiledModule,
+ kCompiledModuleOffset)
+ACCESSORS(WasmInstanceObject, exports_object, JSObject, kExportsObjectOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, memory_object, WasmMemoryObject,
+ kMemoryObjectOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, memory_buffer, JSArrayBuffer,
+ kMemoryBufferOffset)
+ACCESSORS(WasmInstanceObject, globals_buffer, JSArrayBuffer,
+ kGlobalsBufferOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, WasmDebugInfo,
+ kDebugInfoOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, function_tables, FixedArray,
+ kFunctionTablesOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, signature_tables, FixedArray,
+ kSignatureTablesOffset)
+ACCESSORS(WasmInstanceObject, directly_called_instances, FixedArray,
+ kDirectlyCalledInstancesOffset)
+ACCESSORS(WasmInstanceObject, js_imports_table, FixedArray,
+ kJsImportsTableOffset)
+
+// WasmSharedModuleData
+ACCESSORS(WasmSharedModuleData, module_bytes, SeqOneByteString,
+ kModuleBytesOffset)
+ACCESSORS(WasmSharedModuleData, script, Script, kScriptOffset)
+OPTIONAL_ACCESSORS(WasmSharedModuleData, asm_js_offset_table, ByteArray,
+ kAsmJsOffsetTableOffset)
+OPTIONAL_ACCESSORS(WasmSharedModuleData, breakpoint_infos, FixedArray,
+ kBreakPointInfosOffset)
+
+OPTIONAL_ACCESSORS(WasmSharedModuleData, lazy_compilation_orchestrator, Foreign,
+ kLazyCompilationOrchestratorOffset)
+
+OPTIONAL_ACCESSORS(WasmDebugInfo, locals_names, FixedArray, kLocalsNamesOffset)
+OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entries, FixedArray,
+ kCWasmEntriesOffset)
+OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entry_map, Managed<wasm::SignatureMap>,
+ kCWasmEntryMapOffset)
+
+#undef OPTIONAL_ACCESSORS
+
+#define FORWARD_SHARED(type, name) \
+ type WasmCompiledModule::name() { return shared()->name(); }
+FORWARD_SHARED(SeqOneByteString*, module_bytes)
+FORWARD_SHARED(wasm::WasmModule*, module)
+FORWARD_SHARED(Script*, script)
+FORWARD_SHARED(bool, is_asm_js)
+#undef FORWARD_SHARED
+
+#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID, TYPE_CHECK, SETTER_MODIFIER) \
+ Handle<TYPE> WasmCompiledModule::NAME() const { \
+ return handle(ptr_to_##NAME()); \
+ } \
+ \
+ MaybeHandle<TYPE> WasmCompiledModule::maybe_##NAME() const { \
+ if (has_##NAME()) return NAME(); \
+ return MaybeHandle<TYPE>(); \
+ } \
+ \
+ TYPE* WasmCompiledModule::maybe_ptr_to_##NAME() const { \
+ Object* obj = get(ID); \
+ if (!(TYPE_CHECK)) return nullptr; \
+ return TYPE::cast(obj); \
+ } \
+ \
+ TYPE* WasmCompiledModule::ptr_to_##NAME() const { \
+ Object* obj = get(ID); \
+ DCHECK(TYPE_CHECK); \
+ return TYPE::cast(obj); \
+ } \
+ \
+ bool WasmCompiledModule::has_##NAME() const { \
+ Object* obj = get(ID); \
+ return TYPE_CHECK; \
+ } \
+ \
+ void WasmCompiledModule::reset_##NAME() { set_undefined(ID); } \
+ \
+ void WasmCompiledModule::set_##NAME(Handle<TYPE> value) { \
+ set_ptr_to_##NAME(*value); \
+ } \
+ void WasmCompiledModule::set_ptr_to_##NAME(TYPE* value) { set(ID, value); }
+
+#define WCM_OBJECT(TYPE, NAME) \
+ WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE(), public)
+
+#define WCM_CONST_OBJECT(TYPE, NAME) \
+ WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE(), private)
+
+#define WCM_WASM_OBJECT(TYPE, NAME) \
+ WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, TYPE::Is##TYPE(obj), private)
+
+#define WCM_SMALL_CONST_NUMBER(TYPE, NAME) \
+ TYPE WasmCompiledModule::NAME() const { \
+ return static_cast<TYPE>(Smi::ToInt(get(kID_##NAME))); \
+ } \
+ \
+ void WasmCompiledModule::set_##NAME(TYPE value) { \
+ set(kID_##NAME, Smi::FromInt(value)); \
+ }
+
+#define WCM_WEAK_LINK(TYPE, NAME) \
+ WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME, obj->IsWeakCell(), \
+ public) \
+ \
+ Handle<TYPE> WasmCompiledModule::NAME() const { \
+ return handle(TYPE::cast(weak_##NAME()->value())); \
+ }
+
+#define WCM_LARGE_NUMBER(TYPE, NAME) \
+ TYPE WasmCompiledModule::NAME() const { \
+ Object* value = get(kID_##NAME); \
+ DCHECK(value->IsMutableHeapNumber()); \
+ return static_cast<TYPE>(HeapNumber::cast(value)->value()); \
+ } \
+ \
+ void WasmCompiledModule::set_##NAME(TYPE value) { \
+ Object* number = get(kID_##NAME); \
+ DCHECK(number->IsMutableHeapNumber()); \
+ HeapNumber::cast(number)->set_value(static_cast<double>(value)); \
+ } \
+ \
+ void WasmCompiledModule::recreate_##NAME(Handle<WasmCompiledModule> obj, \
+ Factory* factory, TYPE init_val) { \
+ Handle<HeapNumber> number = factory->NewHeapNumber( \
+ static_cast<double>(init_val), MutableMode::MUTABLE, TENURED); \
+ obj->set(kID_##NAME, *number); \
+ } \
+ bool WasmCompiledModule::has_##NAME() const { \
+ return get(kID_##NAME)->IsMutableHeapNumber(); \
+ }
+
+#define DEFINITION(KIND, TYPE, NAME) WCM_##KIND(TYPE, NAME)
+WCM_PROPERTY_TABLE(DEFINITION)
+#undef DECLARATION
+
+#undef WCM_CONST_OBJECT
+#undef WCM_LARGE_NUMBER
+#undef WCM_OBJECT_OR_WEAK
+#undef WCM_SMALL_CONST_NUMBER
+#undef WCM_WEAK_LINK
+
+uint32_t WasmTableObject::current_length() { return functions()->length(); }
+
+bool WasmTableObject::has_maximum_length() {
+ return maximum_length()->Number() >= 0;
+}
+
+bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
+
+Address WasmCompiledModule::GetGlobalsStartOrNull() const {
+ return has_globals_start() ? reinterpret_cast<Address>(globals_start())
+ : nullptr;
+}
+
+void WasmCompiledModule::ReplaceCodeTableForTesting(
+ Handle<FixedArray> testing_table) {
+ set_code_table(testing_table);
+}
+
+#include "src/objects/object-macros-undef.h"
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_OBJECTS_INL_H_
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 779a2d8430..012aa6644b 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -11,9 +11,12 @@
#include "src/debug/debug-interface.h"
#include "src/objects-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-specialization.h"
+#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-text.h"
#define TRACE(...) \
@@ -21,24 +24,13 @@
if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \
} while (false)
-#define TRACE_CHAIN(instance) \
- do { \
- instance->PrintInstancesChain(); \
- } while (false)
-
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
+namespace v8 {
+namespace internal {
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic pop
-#endif
+// Import a few often used types from the wasm namespace.
+using GlobalHandleAddress = wasm::GlobalHandleAddress;
+using WasmFunction = wasm::WasmFunction;
+using WasmModule = wasm::WasmModule;
namespace {
@@ -151,11 +143,12 @@ bool IsBreakablePosition(Handle<WasmCompiledModule> compiled_module,
DisallowHeapAllocation no_gc;
AccountingAllocator alloc;
Zone tmp(&alloc, ZONE_NAME);
- BodyLocalDecls locals(&tmp);
+ wasm::BodyLocalDecls locals(&tmp);
const byte* module_start = compiled_module->module_bytes()->GetChars();
WasmFunction& func = compiled_module->module()->functions[func_index];
- BytecodeIterator iterator(module_start + func.code.offset(),
- module_start + func.code.end_offset(), &locals);
+ wasm::BytecodeIterator iterator(module_start + func.code.offset(),
+ module_start + func.code.end_offset(),
+ &locals);
DCHECK_LT(0, locals.encoded_size);
for (uint32_t offset : iterator.offsets()) {
if (offset > static_cast<uint32_t>(offset_in_func)) break;
@@ -180,6 +173,17 @@ Handle<WasmModuleObject> WasmModuleObject::New(
return module_object;
}
+void WasmModuleObject::ValidateStateForTesting(
+ Isolate* isolate, Handle<WasmModuleObject> module_obj) {
+ DisallowHeapAllocation no_gc;
+ WasmCompiledModule* compiled_module = module_obj->compiled_module();
+ CHECK(compiled_module->has_weak_wasm_module());
+ CHECK_EQ(compiled_module->ptr_to_weak_wasm_module()->value(), *module_obj);
+ CHECK(!compiled_module->has_weak_prev_instance());
+ CHECK(!compiled_module->has_weak_next_instance());
+ CHECK(!compiled_module->has_weak_owning_instance());
+}
+
Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
int64_t maximum,
Handle<FixedArray>* js_functions) {
@@ -228,7 +232,7 @@ Handle<FixedArray> WasmTableObject::AddDispatchTable(
return new_dispatch_tables;
}
-void WasmTableObject::grow(Isolate* isolate, uint32_t count) {
+void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
Handle<FixedArray> dispatch_tables(this->dispatch_tables());
DCHECK_EQ(0, dispatch_tables->length() % 4);
uint32_t old_size = functions()->length();
@@ -256,7 +260,8 @@ void WasmTableObject::grow(Isolate* isolate, uint32_t count) {
// Patch the code of the respective instance.
{
DisallowHeapAllocation no_gc;
- CodeSpecialization code_specialization(isolate, &specialization_zone);
+ wasm::CodeSpecialization code_specialization(isolate,
+ &specialization_zone);
WasmInstanceObject* instance =
WasmInstanceObject::cast(dispatch_tables->get(i));
WasmCompiledModule* compiled_module = instance->compiled_module();
@@ -282,15 +287,38 @@ void WasmTableObject::grow(Isolate* isolate, uint32_t count) {
}
}
+void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
+ int32_t index, Handle<JSFunction> function) {
+ Handle<FixedArray> array(table->functions(), isolate);
+
+ Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
+
+ WasmFunction* wasm_function = nullptr;
+ Handle<Code> code = Handle<Code>::null();
+ Handle<Object> value = isolate->factory()->null_value();
+
+ if (!function.is_null()) {
+ wasm_function = wasm::GetWasmFunctionForExport(isolate, function);
+ // The verification that {function} is an export was done
+ // by the caller.
+ DCHECK_NOT_NULL(wasm_function);
+ code = wasm::UnwrapExportWrapper(function);
+ value = Handle<Object>::cast(function);
+ }
+
+ UpdateDispatchTables(isolate, dispatch_tables, index, wasm_function, code);
+ array->set(index, *value);
+}
+
namespace {
Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
Handle<JSArrayBuffer> old_buffer,
uint32_t pages, uint32_t maximum_pages) {
+ if (!old_buffer->is_growable()) return Handle<JSArrayBuffer>::null();
Address old_mem_start = nullptr;
uint32_t old_size = 0;
if (!old_buffer.is_null()) {
- if (!old_buffer->is_growable()) return Handle<JSArrayBuffer>::null();
old_mem_start = static_cast<Address>(old_buffer->backing_store());
CHECK(old_buffer->byte_length()->ToUint32(&old_size));
}
@@ -301,46 +329,51 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
if (old_pages > maximum_pages || pages > maximum_pages - old_pages) {
return Handle<JSArrayBuffer>::null();
}
-
- // TODO(gdeepti): Change the protection here instead of allocating a new
- // buffer before guard regions are turned on, see issue #5886.
const bool enable_guard_regions = old_buffer.is_null()
- ? EnableGuardRegions()
+ ? trap_handler::UseTrapHandler()
: old_buffer->has_guard_region();
size_t new_size =
static_cast<size_t>(old_pages + pages) * WasmModule::kPageSize;
- Handle<JSArrayBuffer> new_buffer =
- NewArrayBuffer(isolate, new_size, enable_guard_regions);
- if (new_buffer.is_null()) return new_buffer;
- Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
- memcpy(new_mem_start, old_mem_start, old_size);
- return new_buffer;
+ if (enable_guard_regions && old_size != 0) {
+ DCHECK_NOT_NULL(old_buffer->backing_store());
+ if (new_size > FLAG_wasm_max_mem_pages * WasmModule::kPageSize ||
+ new_size > kMaxInt) {
+ return Handle<JSArrayBuffer>::null();
+ }
+ isolate->array_buffer_allocator()->SetProtection(
+ old_mem_start, new_size,
+ v8::ArrayBuffer::Allocator::Protection::kReadWrite);
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(pages * WasmModule::kPageSize);
+ Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(new_size);
+ old_buffer->set_byte_length(*length_obj);
+ return old_buffer;
+ } else {
+ Handle<JSArrayBuffer> new_buffer;
+ new_buffer = wasm::NewArrayBuffer(isolate, new_size, enable_guard_regions);
+ if (new_buffer.is_null() || old_size == 0) return new_buffer;
+ Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
+ memcpy(new_mem_start, old_mem_start, old_size);
+ return new_buffer;
+ }
}
// May GC, because SetSpecializationMemInfoFrom may GC
void SetInstanceMemory(Isolate* isolate, Handle<WasmInstanceObject> instance,
Handle<JSArrayBuffer> buffer) {
instance->set_memory_buffer(*buffer);
- WasmCompiledModule::SetSpecializationMemInfoFrom(
- isolate->factory(), handle(instance->compiled_module()), buffer);
if (instance->has_debug_info()) {
instance->debug_info()->UpdateMemory(*buffer);
}
}
-void UncheckedUpdateInstanceMemory(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- Address old_mem_start, uint32_t old_size) {
- DCHECK(instance->has_memory_buffer());
- Handle<JSArrayBuffer> mem_buffer(instance->memory_buffer());
- uint32_t new_size = mem_buffer->byte_length()->Number();
- Address new_mem_start = static_cast<Address>(mem_buffer->backing_store());
+void UpdateWasmContext(WasmContext* wasm_context,
+ Handle<JSArrayBuffer> buffer) {
+ uint32_t new_mem_size = buffer->byte_length()->Number();
+ Address new_mem_start = static_cast<Address>(buffer->backing_store());
DCHECK_NOT_NULL(new_mem_start);
- Zone specialization_zone(isolate->allocator(), ZONE_NAME);
- CodeSpecialization code_specialization(isolate, &specialization_zone);
- code_specialization.RelocateMemoryReferences(old_mem_start, old_size,
- new_mem_start, new_size);
- code_specialization.ApplyToWholeInstance(*instance);
+ wasm_context->mem_start = new_mem_start;
+ wasm_context->mem_size = new_mem_size;
}
} // namespace
@@ -352,20 +385,28 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
isolate->native_context()->wasm_memory_constructor());
auto memory_obj = Handle<WasmMemoryObject>::cast(
isolate->factory()->NewJSObject(memory_ctor, TENURED));
+ auto wasm_context = Managed<WasmContext>::Allocate(isolate);
if (buffer.is_null()) {
- const bool enable_guard_regions = EnableGuardRegions();
- buffer = SetupArrayBuffer(isolate, nullptr, 0, nullptr, 0, false,
- enable_guard_regions);
+ const bool enable_guard_regions = trap_handler::UseTrapHandler();
+ buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, nullptr, 0, false,
+ enable_guard_regions);
+ wasm_context->get()->mem_size = 0;
+ wasm_context->get()->mem_start = nullptr;
+ } else {
+ CHECK(buffer->byte_length()->ToUint32(&wasm_context->get()->mem_size));
+ wasm_context->get()->mem_start =
+ static_cast<Address>(buffer->backing_store());
}
memory_obj->set_array_buffer(*buffer);
memory_obj->set_maximum_pages(maximum);
+ memory_obj->set_wasm_context(*wasm_context);
return memory_obj;
}
uint32_t WasmMemoryObject::current_pages() {
uint32_t byte_length;
CHECK(array_buffer()->byte_length()->ToUint32(&byte_length));
- return byte_length / wasm::WasmModule::kPageSize;
+ return byte_length / WasmModule::kPageSize;
}
void WasmMemoryObject::AddInstance(Isolate* isolate,
@@ -388,6 +429,32 @@ void WasmMemoryObject::RemoveInstance(Isolate* isolate,
}
}
+void WasmMemoryObject::SetupNewBufferWithSameBackingStore(
+ Isolate* isolate, Handle<WasmMemoryObject> memory_object, uint32_t size) {
+ // In case of Memory.Grow(0), or Memory.Grow(delta) with guard pages enabled,
+ // Setup a new buffer, update memory object, and instances associated with the
+ // memory object, as the current buffer will be detached.
+ Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer());
+ Handle<JSArrayBuffer> new_buffer;
+
+ constexpr bool is_external = false;
+ new_buffer = wasm::SetupArrayBuffer(
+ isolate, old_buffer->allocation_base(), old_buffer->allocation_length(),
+ old_buffer->backing_store(), size * WasmModule::kPageSize, is_external,
+ old_buffer->has_guard_region());
+ if (memory_object->has_instances()) {
+ Handle<WeakFixedArray> instances(memory_object->instances(), isolate);
+ for (int i = 0; i < instances->Length(); i++) {
+ Object* elem = instances->Get(i);
+ if (!elem->IsWasmInstanceObject()) continue;
+ Handle<WasmInstanceObject> instance(WasmInstanceObject::cast(elem),
+ isolate);
+ SetInstanceMemory(isolate, instance, new_buffer);
+ }
+ }
+ memory_object->set_array_buffer(*new_buffer);
+}
+
// static
int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<WasmMemoryObject> memory_object,
@@ -399,13 +466,6 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<JSArrayBuffer> new_buffer;
// Return current size if grow by 0.
if (pages == 0) {
- // Even for pages == 0, we need to attach a new JSArrayBuffer with the same
- // backing store and neuter the old one to be spec compliant.
- new_buffer = SetupArrayBuffer(
- isolate, old_buffer->allocation_base(),
- old_buffer->allocation_length(), old_buffer->backing_store(),
- old_size, old_buffer->is_external(), old_buffer->has_guard_region());
- memory_object->set_array_buffer(*new_buffer);
DCHECK_EQ(0, old_size % WasmModule::kPageSize);
return old_size / WasmModule::kPageSize;
}
@@ -420,8 +480,13 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
new_buffer = GrowMemoryBuffer(isolate, old_buffer, pages, maximum_pages);
if (new_buffer.is_null()) return -1;
+ // Verify that the values we will change are actually the ones we expect.
+ DCHECK_EQ(memory_object->wasm_context()->get()->mem_size, old_size);
+ DCHECK_EQ(memory_object->wasm_context()->get()->mem_start,
+ static_cast<Address>(old_buffer->backing_store()));
+ UpdateWasmContext(memory_object->wasm_context()->get(), new_buffer);
+
if (memory_object->has_instances()) {
- Address old_mem_start = static_cast<Address>(old_buffer->backing_store());
Handle<WeakFixedArray> instances(memory_object->instances(), isolate);
for (int i = 0; i < instances->Length(); i++) {
Object* elem = instances->Get(i);
@@ -429,10 +494,8 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<WasmInstanceObject> instance(WasmInstanceObject::cast(elem),
isolate);
SetInstanceMemory(isolate, instance, new_buffer);
- UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
}
}
-
memory_object->set_array_buffer(*new_buffer);
DCHECK_EQ(0, old_size % WasmModule::kPageSize);
return old_size / WasmModule::kPageSize;
@@ -442,6 +505,11 @@ WasmModuleObject* WasmInstanceObject::module_object() {
return *compiled_module()->wasm_module();
}
+WasmContext* WasmInstanceObject::wasm_context() {
+ DCHECK(has_memory_object());
+ return memory_object()->wasm_context()->get();
+}
+
WasmModule* WasmInstanceObject::module() { return compiled_module()->module(); }
Handle<WasmDebugInfo> WasmInstanceObject::GetOrCreateDebugInfo(
@@ -477,28 +545,9 @@ int32_t WasmInstanceObject::GrowMemory(Isolate* isolate,
Handle<WasmInstanceObject> instance,
uint32_t pages) {
if (pages == 0) return instance->GetMemorySize();
- if (instance->has_memory_object()) {
- return WasmMemoryObject::Grow(
- isolate, handle(instance->memory_object(), isolate), pages);
- }
-
- // No other instances to grow, grow just the one.
- uint32_t old_size = 0;
- Address old_mem_start = nullptr;
- Handle<JSArrayBuffer> old_buffer;
- if (instance->has_memory_buffer()) {
- old_buffer = handle(instance->memory_buffer(), isolate);
- old_size = old_buffer->byte_length()->Number();
- old_mem_start = static_cast<Address>(old_buffer->backing_store());
- }
- uint32_t maximum_pages = instance->GetMaxMemoryPages();
- Handle<JSArrayBuffer> buffer =
- GrowMemoryBuffer(isolate, old_buffer, pages, maximum_pages);
- if (buffer.is_null()) return -1;
- SetInstanceMemory(isolate, instance, buffer);
- UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
- DCHECK_EQ(0, old_size % WasmModule::kPageSize);
- return old_size / WasmModule::kPageSize;
+ DCHECK(instance->has_memory_object());
+ return WasmMemoryObject::Grow(
+ isolate, handle(instance->memory_object(), isolate), pages);
}
uint32_t WasmInstanceObject::GetMaxMemoryPages() {
@@ -518,6 +567,54 @@ uint32_t WasmInstanceObject::GetMaxMemoryPages() {
return FLAG_wasm_max_mem_pages;
}
+WasmInstanceObject* WasmInstanceObject::GetOwningInstance(Code* code) {
+ DisallowHeapAllocation no_gc;
+ DCHECK(code->kind() == Code::WASM_FUNCTION ||
+ code->kind() == Code::WASM_INTERPRETER_ENTRY);
+ FixedArray* deopt_data = code->deoptimization_data();
+ DCHECK_EQ(code->kind() == Code::WASM_INTERPRETER_ENTRY ? 1 : 2,
+ deopt_data->length());
+ Object* weak_link = deopt_data->get(0);
+ DCHECK(weak_link->IsWeakCell());
+ WeakCell* cell = WeakCell::cast(weak_link);
+ if (cell->cleared()) return nullptr;
+ return WasmInstanceObject::cast(cell->value());
+}
+
+void WasmInstanceObject::ValidateInstancesChainForTesting(
+ Isolate* isolate, Handle<WasmModuleObject> module_obj, int instance_count) {
+ CHECK_GE(instance_count, 0);
+ DisallowHeapAllocation no_gc;
+ WasmCompiledModule* compiled_module = module_obj->compiled_module();
+ CHECK_EQ(JSObject::cast(compiled_module->ptr_to_weak_wasm_module()->value()),
+ *module_obj);
+ Object* prev = nullptr;
+ int found_instances = compiled_module->has_weak_owning_instance() ? 1 : 0;
+ WasmCompiledModule* current_instance = compiled_module;
+ while (current_instance->has_weak_next_instance()) {
+ CHECK((prev == nullptr && !current_instance->has_weak_prev_instance()) ||
+ current_instance->ptr_to_weak_prev_instance()->value() == prev);
+ CHECK_EQ(current_instance->ptr_to_weak_wasm_module()->value(), *module_obj);
+ CHECK(current_instance->ptr_to_weak_owning_instance()
+ ->value()
+ ->IsWasmInstanceObject());
+ prev = current_instance;
+ current_instance = WasmCompiledModule::cast(
+ current_instance->ptr_to_weak_next_instance()->value());
+ ++found_instances;
+ CHECK_LE(found_instances, instance_count);
+ }
+ CHECK_EQ(found_instances, instance_count);
+}
+
+void WasmInstanceObject::ValidateOrphanedInstanceForTesting(
+ Isolate* isolate, Handle<WasmInstanceObject> instance) {
+ DisallowHeapAllocation no_gc;
+ WasmCompiledModule* compiled_module = instance->compiled_module();
+ CHECK(compiled_module->has_weak_wasm_module());
+ CHECK(compiled_module->ptr_to_weak_wasm_module()->cleared());
+}
+
bool WasmExportedFunction::IsWasmExportedFunction(Object* object) {
if (!object->IsJSFunction()) return false;
Handle<JSFunction> js_function(JSFunction::cast(object));
@@ -610,13 +707,13 @@ WasmSharedModuleData* WasmSharedModuleData::cast(Object* object) {
return reinterpret_cast<WasmSharedModuleData*>(object);
}
-wasm::WasmModule* WasmSharedModuleData::module() {
+WasmModule* WasmSharedModuleData::module() {
// We populate the kModuleWrapper field with a Foreign holding the
// address to the address of a WasmModule. This is because we can
// handle both cases when the WasmModule's lifetime is managed through
// a Managed<WasmModule> object, as well as cases when it's managed
// by the embedder. CcTests fall into the latter case.
- return *(reinterpret_cast<wasm::WasmModule**>(
+ return *(reinterpret_cast<WasmModule**>(
Foreign::cast(get(kModuleWrapperIndex))->foreign_address()));
}
@@ -673,8 +770,8 @@ void WasmSharedModuleData::ReinitializeAfterDeserialization(
const byte* end = start + module_bytes->length();
// TODO(titzer): remember the module origin in the compiled_module
// For now, we assume serialized modules did not originate from asm.js.
- ModuleResult result =
- SyncDecodeWasmModule(isolate, start, end, false, kWasmOrigin);
+ wasm::ModuleResult result =
+ SyncDecodeWasmModule(isolate, start, end, false, wasm::kWasmOrigin);
CHECK(result.ok());
CHECK_NOT_NULL(result.val);
// Take ownership of the WasmModule and immediately transfer it to the
@@ -682,8 +779,8 @@ void WasmSharedModuleData::ReinitializeAfterDeserialization(
module = result.val.release();
}
- Handle<WasmModuleWrapper> module_wrapper =
- WasmModuleWrapper::New(isolate, module);
+ Handle<wasm::WasmModuleWrapper> module_wrapper =
+ wasm::WasmModuleWrapper::From(isolate, module);
shared->set(kModuleWrapperIndex, *module_wrapper);
DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*shared));
@@ -813,17 +910,16 @@ void WasmSharedModuleData::PrepareForLazyCompilation(
Handle<WasmSharedModuleData> shared) {
if (shared->has_lazy_compilation_orchestrator()) return;
Isolate* isolate = shared->GetIsolate();
- LazyCompilationOrchestrator* orch = new LazyCompilationOrchestrator();
- Handle<Managed<LazyCompilationOrchestrator>> orch_handle =
- Managed<LazyCompilationOrchestrator>::New(isolate, orch);
+ auto orch_handle =
+ Managed<wasm::LazyCompilationOrchestrator>::Allocate(isolate);
shared->set_lazy_compilation_orchestrator(*orch_handle);
}
Handle<WasmCompiledModule> WasmCompiledModule::New(
Isolate* isolate, Handle<WasmSharedModuleData> shared,
Handle<FixedArray> code_table, Handle<FixedArray> export_wrappers,
- const std::vector<wasm::GlobalHandleAddress>& function_tables,
- const std::vector<wasm::GlobalHandleAddress>& signature_tables) {
+ const std::vector<GlobalHandleAddress>& function_tables,
+ const std::vector<GlobalHandleAddress>& signature_tables) {
DCHECK_EQ(function_tables.size(), signature_tables.size());
Handle<FixedArray> ret =
isolate->factory()->NewFixedArray(PropertyIndices::Count, TENURED);
@@ -881,18 +977,10 @@ Handle<WasmCompiledModule> WasmCompiledModule::Clone(
ret->reset_weak_next_instance();
ret->reset_weak_prev_instance();
ret->reset_weak_exported_functions();
- if (ret->has_embedded_mem_start()) {
- WasmCompiledModule::recreate_embedded_mem_start(ret, isolate->factory(),
- ret->embedded_mem_start());
- }
if (ret->has_globals_start()) {
WasmCompiledModule::recreate_globals_start(ret, isolate->factory(),
ret->globals_start());
}
- if (ret->has_embedded_mem_size()) {
- WasmCompiledModule::recreate_embedded_mem_size(ret, isolate->factory(),
- ret->embedded_mem_size());
- }
return ret;
}
@@ -924,20 +1012,10 @@ void WasmCompiledModule::Reset(Isolate* isolate,
Object* undefined = *isolate->factory()->undefined_value();
Object* fct_obj = compiled_module->ptr_to_code_table();
if (fct_obj != nullptr && fct_obj != undefined) {
- uint32_t old_mem_size = compiled_module->GetEmbeddedMemSizeOrZero();
- // We use default_mem_size throughout, as the mem size of an uninstantiated
- // module, because if we can statically prove a memory access is over
- // bounds, we'll codegen a trap. See {WasmGraphBuilder::BoundsCheckMem}
- uint32_t default_mem_size = compiled_module->default_mem_size();
- Address old_mem_start = compiled_module->GetEmbeddedMemStartOrNull();
-
// Patch code to update memory references, global references, and function
// table references.
Zone specialization_zone(isolate->allocator(), ZONE_NAME);
- CodeSpecialization code_specialization(isolate, &specialization_zone);
-
- code_specialization.RelocateMemoryReferences(old_mem_start, old_mem_size,
- nullptr, default_mem_size);
+ wasm::CodeSpecialization code_specialization(isolate, &specialization_zone);
if (compiled_module->has_globals_start()) {
Address globals_start =
@@ -998,7 +1076,6 @@ void WasmCompiledModule::Reset(Isolate* isolate,
}
}
}
- compiled_module->ResetSpecializationMemInfoIfNeeded();
}
void WasmCompiledModule::InitId() {
@@ -1009,32 +1086,6 @@ void WasmCompiledModule::InitId() {
#endif
}
-void WasmCompiledModule::ResetSpecializationMemInfoIfNeeded() {
- DisallowHeapAllocation no_gc;
- if (has_embedded_mem_start()) {
- set_embedded_mem_size(default_mem_size());
- set_embedded_mem_start(0);
- }
-}
-
-void WasmCompiledModule::SetSpecializationMemInfoFrom(
- Factory* factory, Handle<WasmCompiledModule> compiled_module,
- Handle<JSArrayBuffer> buffer) {
- DCHECK(!buffer.is_null());
- size_t start_address = reinterpret_cast<size_t>(buffer->backing_store());
- uint32_t size = static_cast<uint32_t>(buffer->byte_length()->Number());
- if (!compiled_module->has_embedded_mem_start()) {
- DCHECK(!compiled_module->has_embedded_mem_size());
- WasmCompiledModule::recreate_embedded_mem_start(compiled_module, factory,
- start_address);
- WasmCompiledModule::recreate_embedded_mem_size(compiled_module, factory,
- size);
- } else {
- compiled_module->set_embedded_mem_start(start_address);
- compiled_module->set_embedded_mem_size(size);
- }
-}
-
void WasmCompiledModule::SetGlobalsStartAddressFrom(
Factory* factory, Handle<WasmCompiledModule> compiled_module,
Handle<JSArrayBuffer> buffer) {
@@ -1050,7 +1101,7 @@ void WasmCompiledModule::SetGlobalsStartAddressFrom(
MaybeHandle<String> WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
- WireBytesRef ref) {
+ wasm::WireBytesRef ref) {
// TODO(wasm): cache strings from modules if it's a performance win.
Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
isolate);
@@ -1104,7 +1155,15 @@ bool WasmCompiledModule::IsWasmCompiledModule(Object* obj) {
#define WCM_CHECK_LARGE_NUMBER(TYPE, NAME) \
WCM_CHECK_TYPE(NAME, obj->IsUndefined(isolate) || obj->IsMutableHeapNumber())
WCM_PROPERTY_TABLE(WCM_CHECK)
+#undef WCM_CHECK_TYPE
+#undef WCM_CHECK_OBJECT
+#undef WCM_CHECK_CONST_OBJECT
+#undef WCM_CHECK_WASM_OBJECT
+#undef WCM_CHECK_WEAK_LINK
+#undef WCM_CHECK_SMALL_NUMBER
#undef WCM_CHECK
+#undef WCM_CHECK_SMALL_CONST_NUMBER
+#undef WCM_CHECK_LARGE_NUMBER
// All checks passed.
return true;
@@ -1116,7 +1175,7 @@ void WasmCompiledModule::PrintInstancesChain() {
for (WasmCompiledModule* current = this; current != nullptr;) {
PrintF("->%d", current->instance_id());
if (!current->has_weak_next_instance()) break;
- CHECK(!current->ptr_to_weak_next_instance()->cleared());
+ DCHECK(!current->ptr_to_weak_next_instance()->cleared());
current =
WasmCompiledModule::cast(current->ptr_to_weak_next_instance()->value());
}
@@ -1276,7 +1335,7 @@ Handle<ByteArray> GetDecodedAsmJsOffsetTable(
DCHECK(table_type == Encoded || table_type == Decoded);
if (table_type == Decoded) return offset_table;
- AsmJsOffsetsResult asm_offsets;
+ wasm::AsmJsOffsetsResult asm_offsets;
{
DisallowHeapAllocation no_gc;
const byte* bytes_start = offset_table->GetDataStartAddress();
@@ -1309,10 +1368,11 @@ Handle<ByteArray> GetDecodedAsmJsOffsetTable(
int idx = 0;
std::vector<WasmFunction>& wasm_funs = compiled_module->module()->functions;
for (int func = 0; func < num_functions; ++func) {
- std::vector<AsmJsOffsetEntry>& func_asm_offsets = asm_offsets.val[func];
+ std::vector<wasm::AsmJsOffsetEntry>& func_asm_offsets =
+ asm_offsets.val[func];
if (func_asm_offsets.empty()) continue;
int func_offset = wasm_funs[num_imported_functions + func].code.offset();
- for (AsmJsOffsetEntry& e : func_asm_offsets) {
+ for (wasm::AsmJsOffsetEntry& e : func_asm_offsets) {
// Byte offsets must be strictly monotonously increasing:
DCHECK_IMPLIES(idx > 0, func_offset + e.byte_offset >
decoded_table->get_int(idx - kOTESize));
@@ -1329,16 +1389,24 @@ Handle<ByteArray> GetDecodedAsmJsOffsetTable(
} // namespace
-int WasmCompiledModule::GetAsmJsSourcePosition(
+int WasmCompiledModule::GetSourcePosition(
Handle<WasmCompiledModule> compiled_module, uint32_t func_index,
uint32_t byte_offset, bool is_at_number_conversion) {
Isolate* isolate = compiled_module->GetIsolate();
+ const WasmModule* module = compiled_module->module();
+
+ if (!module->is_asm_js()) {
+ // for non-asm.js modules, we just add the function's start offset
+ // to make a module-relative position.
+ return byte_offset + compiled_module->GetFunctionOffset(func_index);
+ }
+
+ // asm.js modules have an additional offset table that must be searched.
Handle<ByteArray> offset_table =
GetDecodedAsmJsOffsetTable(compiled_module, isolate);
- DCHECK_LT(func_index, compiled_module->module()->functions.size());
- uint32_t func_code_offset =
- compiled_module->module()->functions[func_index].code.offset();
+ DCHECK_LT(func_index, module->functions.size());
+ uint32_t func_code_offset = module->functions[func_index].code.offset();
uint32_t total_offset = func_code_offset + byte_offset;
// Binary search for the total byte offset.
@@ -1437,9 +1505,10 @@ bool WasmCompiledModule::GetPossibleBreakpoints(
WasmFunction& func = functions[func_idx];
if (func.code.length() == 0) continue;
- BodyLocalDecls locals(&tmp);
- BytecodeIterator iterator(module_start + func.code.offset(),
- module_start + func.code.end_offset(), &locals);
+ wasm::BodyLocalDecls locals(&tmp);
+ wasm::BytecodeIterator iterator(module_start + func.code.offset(),
+ module_start + func.code.end_offset(),
+ &locals);
DCHECK_LT(0u, locals.encoded_size);
for (uint32_t offset : iterator.offsets()) {
uint32_t total_offset = func.code.offset() + offset;
@@ -1512,8 +1581,13 @@ Handle<Code> WasmCompiledModule::CompileLazy(
isolate->set_context(*instance->compiled_module()->native_context());
Object* orch_obj =
instance->compiled_module()->shared()->lazy_compilation_orchestrator();
- LazyCompilationOrchestrator* orch =
- Managed<LazyCompilationOrchestrator>::cast(orch_obj)->get();
+ auto* orch =
+ Managed<wasm::LazyCompilationOrchestrator>::cast(orch_obj)->get();
return orch->CompileLazy(isolate, instance, caller, offset, func_index,
patch_caller);
}
+
+#undef TRACE
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 79d2db865c..86a7913d7a 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -7,13 +7,13 @@
#include "src/debug/debug.h"
#include "src/debug/interface-types.h"
+#include "src/managed.h"
#include "src/objects.h"
#include "src/objects/script.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/decoder.h"
#include "src/wasm/wasm-limits.h"
-#include "src/wasm/wasm-module.h"
-#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
// Has to be the last include (doesn't have include guards)
@@ -24,9 +24,11 @@ namespace internal {
namespace wasm {
class InterpretedFrame;
class WasmInterpreter;
-
+struct WasmModule;
+class SignatureMap;
typedef Address GlobalHandleAddress;
-
+using ValueType = MachineRepresentation;
+using FunctionSig = Signature<ValueType>;
} // namespace wasm
class WasmCompiledModule;
@@ -50,6 +52,17 @@ class WasmInstanceObject;
static const int k##name##Offset = \
kSize + (k##name##Index - kFieldCount) * kPointerSize;
+// Wasm context used to store the mem_size and mem_start address of the linear
+// memory. These variables can be accessed at C++ level at graph build time
+// (e.g., initialized during instance building / changed at runtime by
+// grow_memory). The address of the WasmContext is provided to the wasm entry
+// functions using a RelocatableIntPtrConstant, then the address is passed as
+// parameter to the other wasm functions.
+struct WasmContext {
+ byte* mem_start;
+ uint32_t mem_size;
+};
+
// Representation of a WebAssembly.Module JavaScript-level object.
class WasmModuleObject : public JSObject {
public:
@@ -68,6 +81,9 @@ class WasmModuleObject : public JSObject {
static Handle<WasmModuleObject> New(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
+
+ static void ValidateStateForTesting(Isolate* isolate,
+ Handle<WasmModuleObject> module);
};
// Representation of a WebAssembly.Table JavaScript-level object.
@@ -92,9 +108,9 @@ class WasmTableObject : public JSObject {
DEF_OFFSET(MaximumLength)
DEF_OFFSET(DispatchTables)
- inline uint32_t current_length() { return functions()->length(); }
- inline bool has_maximum_length() { return maximum_length()->Number() >= 0; }
- void grow(Isolate* isolate, uint32_t count);
+ inline uint32_t current_length();
+ inline bool has_maximum_length();
+ void Grow(Isolate* isolate, uint32_t count);
static Handle<WasmTableObject> New(Isolate* isolate, uint32_t initial,
int64_t maximum,
@@ -103,6 +119,9 @@ class WasmTableObject : public JSObject {
Isolate* isolate, Handle<WasmTableObject> table,
Handle<WasmInstanceObject> instance, int table_index,
Handle<FixedArray> function_table, Handle<FixedArray> signature_table);
+
+ static void Set(Isolate* isolate, Handle<WasmTableObject> table,
+ int32_t index, Handle<JSFunction> function);
};
// Representation of a WebAssembly.Memory JavaScript-level object.
@@ -113,11 +132,13 @@ class WasmMemoryObject : public JSObject {
DECL_ACCESSORS(array_buffer, JSArrayBuffer)
DECL_INT_ACCESSORS(maximum_pages)
DECL_OPTIONAL_ACCESSORS(instances, WeakFixedArray)
+ DECL_ACCESSORS(wasm_context, Managed<WasmContext>)
enum { // --
kArrayBufferIndex,
kMaximumPagesIndex,
kInstancesIndex,
+ kWasmContextIndex,
kFieldCount
};
@@ -125,6 +146,7 @@ class WasmMemoryObject : public JSObject {
DEF_OFFSET(ArrayBuffer)
DEF_OFFSET(MaximumPages)
DEF_OFFSET(Instances)
+ DEF_OFFSET(WasmContext)
// Add an instance to the internal (weak) list. amortized O(n).
static void AddInstance(Isolate* isolate, Handle<WasmMemoryObject> memory,
@@ -133,13 +155,15 @@ class WasmMemoryObject : public JSObject {
static void RemoveInstance(Isolate* isolate, Handle<WasmMemoryObject> memory,
Handle<WasmInstanceObject> object);
uint32_t current_pages();
- inline bool has_maximum_pages() { return maximum_pages() >= 0; }
+ inline bool has_maximum_pages();
static Handle<WasmMemoryObject> New(Isolate* isolate,
Handle<JSArrayBuffer> buffer,
int32_t maximum);
static int32_t Grow(Isolate*, Handle<WasmMemoryObject>, uint32_t pages);
+ static void SetupNewBufferWithSameBackingStore(
+ Isolate* isolate, Handle<WasmMemoryObject> memory_object, uint32_t size);
};
// A WebAssembly.Instance JavaScript-level object.
@@ -158,6 +182,7 @@ class WasmInstanceObject : public JSObject {
// FixedArray of all instances whose code was imported
DECL_OPTIONAL_ACCESSORS(directly_called_instances, FixedArray)
+ DECL_ACCESSORS(js_imports_table, FixedArray)
enum { // --
kCompiledModuleIndex,
@@ -169,6 +194,7 @@ class WasmInstanceObject : public JSObject {
kFunctionTablesIndex,
kSignatureTablesIndex,
kDirectlyCalledInstancesIndex,
+ kJsImportsTableIndex,
kFieldCount
};
@@ -182,8 +208,10 @@ class WasmInstanceObject : public JSObject {
DEF_OFFSET(FunctionTables)
DEF_OFFSET(SignatureTables)
DEF_OFFSET(DirectlyCalledInstances)
+ DEF_OFFSET(JsImportsTable)
WasmModuleObject* module_object();
+ WasmContext* wasm_context();
V8_EXPORT_PRIVATE wasm::WasmModule* module();
// Get the debug info associated with the given wasm object.
@@ -198,15 +226,27 @@ class WasmInstanceObject : public JSObject {
uint32_t pages);
uint32_t GetMaxMemoryPages();
+
+ // Assumed to be called with a code object associated to a wasm module
+ // instance. Intended to be called from runtime functions. Returns nullptr on
+ // failing to get owning instance.
+ static WasmInstanceObject* GetOwningInstance(Code* code);
+
+ static void ValidateInstancesChainForTesting(
+ Isolate* isolate, Handle<WasmModuleObject> module_obj,
+ int instance_count);
+
+ static void ValidateOrphanedInstanceForTesting(
+ Isolate* isolate, Handle<WasmInstanceObject> instance);
};
// A WASM function that is wrapped and exported to JavaScript.
class WasmExportedFunction : public JSFunction {
public:
WasmInstanceObject* instance();
- int function_index();
+ V8_EXPORT_PRIVATE int function_index();
- static WasmExportedFunction* cast(Object* object);
+ V8_EXPORT_PRIVATE static WasmExportedFunction* cast(Object* object);
static bool IsWasmExportedFunction(Object* object);
static Handle<WasmExportedFunction> New(Isolate* isolate,
@@ -214,6 +254,8 @@ class WasmExportedFunction : public JSFunction {
MaybeHandle<String> maybe_name,
int func_index, int arity,
Handle<Code> export_wrapper);
+
+ Handle<Code> GetWasmCode();
};
// Information shared by all WasmCompiledModule objects for the same module.
@@ -274,8 +316,11 @@ class WasmSharedModuleData : public FixedArray {
// with all the information necessary for re-specializing them.
//
// We specialize wasm functions to their instance by embedding:
-// - raw interior pointers into the backing store of the array buffer
-// used as memory of a particular WebAssembly.Instance object.
+// - raw pointer to the wasm_context, that contains the size of the
+// memory and the pointer to the backing store of the array buffer
+// used as memory of a particular WebAssembly.Instance object. This
+// information are then used at runtime to access memory / verify bounds
+// check limits.
// - bounds check limits, computed at compile time, relative to the
// size of the memory.
// - the objects representing the function tables and signature tables
@@ -307,35 +352,16 @@ class WasmCompiledModule : public FixedArray {
#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID, TYPE_CHECK, SETTER_MODIFIER) \
public: \
- Handle<TYPE> NAME() const { return handle(ptr_to_##NAME()); } \
- \
- MaybeHandle<TYPE> maybe_##NAME() const { \
- if (has_##NAME()) return NAME(); \
- return MaybeHandle<TYPE>(); \
- } \
- \
- TYPE* maybe_ptr_to_##NAME() const { \
- Object* obj = get(ID); \
- if (!(TYPE_CHECK)) return nullptr; \
- return TYPE::cast(obj); \
- } \
- \
- TYPE* ptr_to_##NAME() const { \
- Object* obj = get(ID); \
- DCHECK(TYPE_CHECK); \
- return TYPE::cast(obj); \
- } \
- \
- bool has_##NAME() const { \
- Object* obj = get(ID); \
- return TYPE_CHECK; \
- } \
- \
- void reset_##NAME() { set_undefined(ID); } \
+ inline Handle<TYPE> NAME() const; \
+ inline MaybeHandle<TYPE> maybe_##NAME() const; \
+ inline TYPE* maybe_ptr_to_##NAME() const; \
+ inline TYPE* ptr_to_##NAME() const; \
+ inline bool has_##NAME() const; \
+ inline void reset_##NAME(); \
\
SETTER_MODIFIER: \
- void set_##NAME(Handle<TYPE> value) { set_ptr_to_##NAME(*value); } \
- void set_ptr_to_##NAME(TYPE* value) { set(ID, value); }
+ inline void set_##NAME(Handle<TYPE> value); \
+ inline void set_ptr_to_##NAME(TYPE* value);
#define WCM_OBJECT(TYPE, NAME) \
WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE(), public)
@@ -346,43 +372,27 @@ class WasmCompiledModule : public FixedArray {
#define WCM_WASM_OBJECT(TYPE, NAME) \
WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, TYPE::Is##TYPE(obj), private)
-#define WCM_SMALL_CONST_NUMBER(TYPE, NAME) \
- public: \
- TYPE NAME() const { return static_cast<TYPE>(Smi::ToInt(get(kID_##NAME))); } \
- \
- private: \
- void set_##NAME(TYPE value) { set(kID_##NAME, Smi::FromInt(value)); }
+#define WCM_SMALL_CONST_NUMBER(TYPE, NAME) \
+ public: \
+ inline TYPE NAME() const; \
+ \
+ private: \
+ inline void set_##NAME(TYPE value);
#define WCM_WEAK_LINK(TYPE, NAME) \
WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME, obj->IsWeakCell(), \
public) \
\
public: \
- Handle<TYPE> NAME() const { \
- return handle(TYPE::cast(weak_##NAME()->value())); \
- }
+ inline Handle<TYPE> NAME() const;
#define WCM_LARGE_NUMBER(TYPE, NAME) \
public: \
- TYPE NAME() const { \
- Object* value = get(kID_##NAME); \
- DCHECK(value->IsMutableHeapNumber()); \
- return static_cast<TYPE>(HeapNumber::cast(value)->value()); \
- } \
- \
- void set_##NAME(TYPE value) { \
- Object* number = get(kID_##NAME); \
- DCHECK(number->IsMutableHeapNumber()); \
- HeapNumber::cast(number)->set_value(static_cast<double>(value)); \
- } \
- \
- static void recreate_##NAME(Handle<WasmCompiledModule> obj, \
- Factory* factory, TYPE init_val) { \
- Handle<HeapNumber> number = factory->NewHeapNumber( \
- static_cast<double>(init_val), MutableMode::MUTABLE, TENURED); \
- obj->set(kID_##NAME, *number); \
- } \
- bool has_##NAME() const { return get(kID_##NAME)->IsMutableHeapNumber(); }
+ inline TYPE NAME() const; \
+ inline void set_##NAME(TYPE value); \
+ inline static void recreate_##NAME(Handle<WasmCompiledModule> obj, \
+ Factory* factory, TYPE init_val); \
+ inline bool has_##NAME() const;
// Add values here if they are required for creating new instances or
// for deserialization, and if they are serializable.
@@ -399,9 +409,7 @@ class WasmCompiledModule : public FixedArray {
MACRO(OBJECT, FixedArray, signature_tables) \
MACRO(CONST_OBJECT, FixedArray, empty_function_tables) \
MACRO(CONST_OBJECT, FixedArray, empty_signature_tables) \
- MACRO(LARGE_NUMBER, size_t, embedded_mem_start) \
MACRO(LARGE_NUMBER, size_t, globals_start) \
- MACRO(LARGE_NUMBER, uint32_t, embedded_mem_size) \
MACRO(SMALL_CONST_NUMBER, uint32_t, initial_pages) \
MACRO(WEAK_LINK, WasmCompiledModule, next_instance) \
MACRO(WEAK_LINK, WasmCompiledModule, prev_instance) \
@@ -439,27 +447,10 @@ class WasmCompiledModule : public FixedArray {
Handle<WasmCompiledModule> module);
static void Reset(Isolate* isolate, WasmCompiledModule* module);
- Address GetEmbeddedMemStartOrNull() const {
- return has_embedded_mem_start()
- ? reinterpret_cast<Address>(embedded_mem_start())
- : nullptr;
- }
-
- Address GetGlobalsStartOrNull() const {
- return has_globals_start() ? reinterpret_cast<Address>(globals_start())
- : nullptr;
- }
-
- uint32_t GetEmbeddedMemSizeOrZero() const {
- return has_embedded_mem_size() ? embedded_mem_size() : 0;
- }
+ inline Address GetGlobalsStartOrNull() const;
uint32_t default_mem_size() const;
- void ResetSpecializationMemInfoIfNeeded();
- static void SetSpecializationMemInfoFrom(
- Factory* factory, Handle<WasmCompiledModule> compiled_module,
- Handle<JSArrayBuffer> buffer);
static void SetGlobalsStartAddressFrom(
Factory* factory, Handle<WasmCompiledModule> compiled_module,
Handle<JSArrayBuffer> buffer);
@@ -470,8 +461,7 @@ class WasmCompiledModule : public FixedArray {
public:
// Allow to call method on WasmSharedModuleData also on this object.
-#define FORWARD_SHARED(type, name) \
- type name() { return shared()->name(); }
+#define FORWARD_SHARED(type, name) inline type name();
FORWARD_SHARED(SeqOneByteString*, module_bytes)
FORWARD_SHARED(wasm::WasmModule*, module)
FORWARD_SHARED(Script*, script)
@@ -524,11 +514,11 @@ class WasmCompiledModule : public FixedArray {
// Returns true if the position is valid inside this module, false otherwise.
bool GetPositionInfo(uint32_t position, Script::PositionInfo* info);
- // Get the asm.js source position from a byte offset.
- // Must only be called if the associated wasm object was created from asm.js.
- static int GetAsmJsSourcePosition(Handle<WasmCompiledModule> compiled_module,
- uint32_t func_index, uint32_t byte_offset,
- bool is_at_number_conversion);
+ // Get the source position from a given function index and byte offset,
+ // for either asm.js or pure WASM modules.
+ static int GetSourcePosition(Handle<WasmCompiledModule> compiled_module,
+ uint32_t func_index, uint32_t byte_offset,
+ bool is_at_number_conversion);
// Compute the disassembly of a wasm function.
// Returns the disassembly string and a list of <byte_offset, line, column>
@@ -576,9 +566,7 @@ class WasmCompiledModule : public FixedArray {
Handle<Code> caller, int offset,
int func_index, bool patch_caller);
- void ReplaceCodeTableForTesting(Handle<FixedArray> testing_table) {
- set_code_table(testing_table);
- }
+ inline void ReplaceCodeTableForTesting(Handle<FixedArray> testing_table);
static void SetTableValue(Isolate* isolate, Handle<FixedArray> table,
int index, Address value);
@@ -689,76 +677,15 @@ class WasmDebugInfo : public FixedArray {
wasm::FunctionSig*);
};
-// TODO(titzer): these should be moved to wasm-objects-inl.h
-CAST_ACCESSOR(WasmInstanceObject)
-CAST_ACCESSOR(WasmMemoryObject)
-CAST_ACCESSOR(WasmModuleObject)
-CAST_ACCESSOR(WasmTableObject)
-
-#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
- bool holder::has_##name() { \
- return !READ_FIELD(this, offset)->IsUndefined(GetIsolate()); \
- } \
- ACCESSORS(holder, name, type, offset)
-
-// WasmModuleObject
-ACCESSORS(WasmModuleObject, compiled_module, WasmCompiledModule,
- kCompiledModuleOffset)
-
-// WasmTableObject
-ACCESSORS(WasmTableObject, functions, FixedArray, kFunctionsOffset)
-ACCESSORS(WasmTableObject, maximum_length, Object, kMaximumLengthOffset)
-ACCESSORS(WasmTableObject, dispatch_tables, FixedArray, kDispatchTablesOffset)
-
-// WasmMemoryObject
-ACCESSORS(WasmMemoryObject, array_buffer, JSArrayBuffer, kArrayBufferOffset)
-SMI_ACCESSORS(WasmMemoryObject, maximum_pages, kMaximumPagesOffset)
-OPTIONAL_ACCESSORS(WasmMemoryObject, instances, WeakFixedArray,
- kInstancesOffset)
-
-// WasmInstanceObject
-ACCESSORS(WasmInstanceObject, compiled_module, WasmCompiledModule,
- kCompiledModuleOffset)
-ACCESSORS(WasmInstanceObject, exports_object, JSObject,
- kExportsObjectOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, memory_object, WasmMemoryObject,
- kMemoryObjectOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, memory_buffer, JSArrayBuffer,
- kMemoryBufferOffset)
-ACCESSORS(WasmInstanceObject, globals_buffer, JSArrayBuffer,
- kGlobalsBufferOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, WasmDebugInfo,
- kDebugInfoOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, function_tables, FixedArray,
- kFunctionTablesOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, signature_tables, FixedArray,
- kSignatureTablesOffset)
-ACCESSORS(WasmInstanceObject, directly_called_instances, FixedArray,
- kDirectlyCalledInstancesOffset)
-
-// WasmSharedModuleData
-ACCESSORS(WasmSharedModuleData, module_bytes, SeqOneByteString,
- kModuleBytesOffset)
-ACCESSORS(WasmSharedModuleData, script, Script, kScriptOffset)
-OPTIONAL_ACCESSORS(WasmSharedModuleData, asm_js_offset_table, ByteArray,
- kAsmJsOffsetTableOffset)
-OPTIONAL_ACCESSORS(WasmSharedModuleData, breakpoint_infos, FixedArray,
- kBreakPointInfosOffset)
-
-OPTIONAL_ACCESSORS(WasmSharedModuleData, lazy_compilation_orchestrator, Foreign,
- kLazyCompilationOrchestratorOffset)
-
-OPTIONAL_ACCESSORS(WasmDebugInfo, locals_names, FixedArray, kLocalsNamesOffset)
-OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entries, FixedArray,
- kCWasmEntriesOffset)
-OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entry_map, Managed<wasm::SignatureMap>,
- kCWasmEntryMapOffset)
-
-#undef OPTIONAL_ACCESSORS
#undef DECL_OOL_QUERY
#undef DECL_OOL_CAST
#undef DECL_GETTER
#undef DECL_OPTIONAL_ACCESSORS
+#undef WCM_CONST_OBJECT
+#undef WCM_LARGE_NUMBER
+#undef WCM_OBJECT_OR_WEAK
+#undef WCM_SMALL_CONST_NUMBER
+#undef WCM_WEAK_LINK
#include "src/objects/object-macros-undef.h"
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 10bc69dfb2..5f2507996d 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -235,6 +235,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S1x16_OP(AllTrue, "all_true")
// Atomic operations.
+ CASE_U32_OP(AtomicLoad, "atomic_load")
+ CASE_U32_OP(AtomicStore, "atomic_store")
CASE_U32_OP(AtomicAdd, "atomic_add")
CASE_U32_OP(AtomicSub, "atomic_sub")
CASE_U32_OP(AtomicAnd, "atomic_and")
@@ -248,6 +250,34 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
}
}
+#undef CASE_OP
+#undef CASE_I32_OP
+#undef CASE_I64_OP
+#undef CASE_F32_OP
+#undef CASE_F64_OP
+#undef CASE_F32x4_OP
+#undef CASE_I32x4_OP
+#undef CASE_I16x8_OP
+#undef CASE_I8x16_OP
+#undef CASE_S128_OP
+#undef CASE_S32x4_OP
+#undef CASE_S16x8_OP
+#undef CASE_S8x16_OP
+#undef CASE_S1x4_OP
+#undef CASE_S1x8_OP
+#undef CASE_S1x16_OP
+#undef CASE_INT_OP
+#undef CASE_FLOAT_OP
+#undef CASE_ALL_OP
+#undef CASE_SIMD_OP
+#undef CASE_SIMDI_OP
+#undef CASE_SIGN_OP
+#undef CASE_UNSIGNED_OP
+#undef CASE_ALL_SIGN_OP
+#undef CASE_CONVERT_OP
+#undef CASE_L32_OP
+#undef CASE_U32_OP
+
bool WasmOpcodes::IsPrefixOpcode(WasmOpcode opcode) {
switch (opcode) {
#define CHECK_PREFIX(name, opcode) case k##name##Prefix:
@@ -305,23 +335,23 @@ bool IsJSCompatibleSignature(const FunctionSig* sig) {
namespace {
#define DECLARE_SIG_ENUM(name, ...) kSigEnum_##name,
-
enum WasmOpcodeSig : byte {
kSigEnum_None,
FOREACH_SIGNATURE(DECLARE_SIG_ENUM)
};
+#undef DECLARE_SIG_ENUM
#define DECLARE_SIG(name, ...) \
constexpr ValueType kTypes_##name[] = {__VA_ARGS__}; \
constexpr FunctionSig kSig_##name( \
1, static_cast<int>(arraysize(kTypes_##name)) - 1, kTypes_##name);
-
FOREACH_SIGNATURE(DECLARE_SIG)
+#undef DECLARE_SIG
#define DECLARE_SIG_ENTRY(name, ...) &kSig_##name,
-
constexpr const FunctionSig* kSimpleExprSigs[] = {
nullptr, FOREACH_SIGNATURE(DECLARE_SIG_ENTRY)};
+#undef DECLARE_SIG_ENTRY
// The following constexpr functions are used to initialize the constant arrays
// defined below. They must have exactly one return statement, and no switch.
@@ -375,6 +405,8 @@ CONSTEXPR_IF_NOT_GCC_4 std::array<WasmOpcodeSig, 256> kSimdExprSigTable =
CONSTEXPR_IF_NOT_GCC_4 std::array<WasmOpcodeSig, 256> kAtomicExprSigTable =
base::make_array<256>(GetAtomicOpcodeSigIndex);
+#undef CONSTEXPR_IF_NOT_GCC_4
+
} // namespace
FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index c936958600..2401e0446c 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -14,6 +14,10 @@ namespace v8 {
namespace internal {
namespace wasm {
+// Binary encoding of the module header.
+const uint32_t kWasmMagic = 0x6d736100;
+const uint32_t kWasmVersion = 0x01;
+
// Binary encoding of local types.
enum ValueTypeCode {
kLocalVoid = 0x40,
@@ -415,6 +419,12 @@ constexpr WasmCodePosition kNoCodePosition = -1;
V(S128StoreMem, 0xfd81, s_is)
#define FOREACH_ATOMIC_OPCODE(V) \
+ V(I32AtomicLoad, 0xfe10, i_i) \
+ V(I32AtomicLoad8U, 0xfe12, i_i) \
+ V(I32AtomicLoad16U, 0xfe13, i_i) \
+ V(I32AtomicStore, 0xfe17, i_ii) \
+ V(I32AtomicStore8U, 0xfe19, i_ii) \
+ V(I32AtomicStore16U, 0xfe1a, i_ii) \
V(I32AtomicAdd, 0xfe1e, i_ii) \
V(I32AtomicAdd8U, 0xfe20, i_ii) \
V(I32AtomicAdd16U, 0xfe21, i_ii) \
@@ -646,6 +656,36 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
}
}
};
+
+// Representation of an initializer expression.
+struct WasmInitExpr {
+ enum WasmInitKind {
+ kNone,
+ kGlobalIndex,
+ kI32Const,
+ kI64Const,
+ kF32Const,
+ kF64Const
+ } kind;
+
+ union {
+ int32_t i32_const;
+ int64_t i64_const;
+ float f32_const;
+ double f64_const;
+ uint32_t global_index;
+ } val;
+
+ WasmInitExpr() : kind(kNone) {}
+ explicit WasmInitExpr(int32_t v) : kind(kI32Const) { val.i32_const = v; }
+ explicit WasmInitExpr(int64_t v) : kind(kI64Const) { val.i64_const = v; }
+ explicit WasmInitExpr(float v) : kind(kF32Const) { val.f32_const = v; }
+ explicit WasmInitExpr(double v) : kind(kF64Const) { val.f64_const = v; }
+ WasmInitExpr(WasmInitKind kind, uint32_t global_index) : kind(kGlobalIndex) {
+ val.global_index = global_index;
+ }
+};
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 9ae8c33f2f..7744b42923 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -11,8 +11,8 @@
#include "src/base/compiler-specific.h"
#include "src/utils.h"
-#include "src/handles.h"
#include "src/globals.h"
+#include "src/handles.h"
namespace v8 {
namespace internal {
@@ -24,14 +24,15 @@ namespace wasm {
// Base class for Result<T>.
class V8_EXPORT_PRIVATE ResultBase {
protected:
- ResultBase(ResultBase&& other)
- : error_offset_(other.error_offset_),
- error_msg_(std::move(other.error_msg_)) {}
ResultBase() = default;
ResultBase& operator=(ResultBase&& other) = default;
public:
+ ResultBase(ResultBase&& other)
+ : error_offset_(other.error_offset_),
+ error_msg_(std::move(other.error_msg_)) {}
+
void error(uint32_t offset, std::string error_msg);
void PRINTF_FORMAT(2, 3) error(const char* format, ...) {
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index e596d6770a..e1fea08d31 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -14,20 +14,9 @@
#include "src/wasm/wasm-opcodes.h"
#include "src/zone/zone.h"
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
-
-using namespace v8;
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic pop
-#endif
+namespace v8 {
+namespace internal {
+namespace wasm {
namespace {
bool IsValidFunctionName(const Vector<const char> &name) {
@@ -43,10 +32,9 @@ bool IsValidFunctionName(const Vector<const char> &name) {
} // namespace
-void wasm::PrintWasmText(const WasmModule *module,
- const ModuleWireBytes &wire_bytes, uint32_t func_index,
- std::ostream &os,
- debug::WasmDisassembly::OffsetTable *offset_table) {
+void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
+ uint32_t func_index, std::ostream& os,
+ debug::WasmDisassembly::OffsetTable* offset_table) {
DCHECK_NOT_NULL(module);
DCHECK_GT(module->functions.size(), func_index);
const WasmFunction *fun = &module->functions[func_index];
@@ -181,6 +169,7 @@ void wasm::PrintWasmText(const WasmModule *module,
CASE_CONST(I64, i64, int64_t)
CASE_CONST(F32, f32, float)
CASE_CONST(F64, f64, double)
+#undef CASE_CONST
#define CASE_OPCODE(opcode, _, __) case kExpr##opcode:
FOREACH_LOAD_MEM_OPCODE(CASE_OPCODE)
@@ -201,6 +190,21 @@ void wasm::PrintWasmText(const WasmModule *module,
case kExprSelect:
os << WasmOpcodes::OpcodeName(opcode);
break;
+ case kAtomicPrefix: {
+ WasmOpcode atomic_opcode = i.prefixed_opcode();
+ switch (atomic_opcode) {
+ FOREACH_ATOMIC_OPCODE(CASE_OPCODE) {
+ MemoryAccessOperand<false> operand(&i, i.pc(), kMaxUInt32);
+ os << WasmOpcodes::OpcodeName(atomic_opcode)
+ << " offset=" << operand.offset
+ << " align=" << (1ULL << operand.alignment);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
// This group is just printed by their internal opcode name, as they
// should never be shown to end-users.
@@ -211,9 +215,9 @@ void wasm::PrintWasmText(const WasmModule *module,
FOREACH_SIMD_1_OPERAND_OPCODE(CASE_OPCODE)
FOREACH_SIMD_MASK_OPERAND_OPCODE(CASE_OPCODE)
FOREACH_SIMD_MEM_OPCODE(CASE_OPCODE)
- FOREACH_ATOMIC_OPCODE(CASE_OPCODE)
os << WasmOpcodes::OpcodeName(opcode);
break;
+#undef CASE_OPCODE
default:
UNREACHABLE();
@@ -225,3 +229,7 @@ void wasm::PrintWasmText(const WasmModule *module,
DCHECK_EQ(0, control_depth);
DCHECK(i.ok());
}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 36547d0c94..27ffff5375 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -56,13 +56,13 @@ void Assembler::emitw(uint16_t x) {
void Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
RecordRelocInfo(rmode);
- int current = code_targets_.length();
+ int current = static_cast<int>(code_targets_.size());
if (current > 0 && !target.is_null() &&
- code_targets_.last().address() == target.address()) {
+ code_targets_.back().address() == target.address()) {
// Optimization if we keep jumping to the same code target.
emitl(current - 1);
} else {
- code_targets_.Add(target);
+ code_targets_.push_back(target);
emitl(current);
}
}
@@ -234,9 +234,9 @@ void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg,
void Assembler::emit_vex_prefix(Register reg, Register vreg, Register rm,
VectorLength l, SIMDPrefix pp, LeadingOpcode mm,
VexW w) {
- XMMRegister ireg = {reg.code()};
- XMMRegister ivreg = {vreg.code()};
- XMMRegister irm = {rm.code()};
+ XMMRegister ireg = XMMRegister::from_code(reg.code());
+ XMMRegister ivreg = XMMRegister::from_code(vreg.code());
+ XMMRegister irm = XMMRegister::from_code(rm.code());
emit_vex_prefix(ireg, ivreg, irm, l, pp, mm, w);
}
@@ -258,8 +258,8 @@ void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg,
void Assembler::emit_vex_prefix(Register reg, Register vreg, const Operand& rm,
VectorLength l, SIMDPrefix pp, LeadingOpcode mm,
VexW w) {
- XMMRegister ireg = {reg.code()};
- XMMRegister ivreg = {vreg.code()};
+ XMMRegister ireg = XMMRegister::from_code(reg.code());
+ XMMRegister ivreg = XMMRegister::from_code(vreg.code());
emit_vex_prefix(ireg, ivreg, rm, l, pp, mm, w);
}
@@ -462,7 +462,7 @@ void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
DCHECK(is_uint2(scale));
// Use SIB with no index register only for base rsp or r12. Otherwise we
// would skip the SIB byte entirely.
- DCHECK(!index.is(rsp) || base.is(rsp) || base.is(r12));
+ DCHECK(index != rsp || base == rsp || base == r12);
buf_[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
rex_ |= index.high_bit() << 1 | base.high_bit();
len_ = 2;
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 68699a71dc..d246e65f62 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -148,12 +148,12 @@ void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
Operand::Operand(Register base, int32_t disp) : rex_(0) {
len_ = 1;
- if (base.is(rsp) || base.is(r12)) {
+ if (base == rsp || base == r12) {
// SIB byte is needed to encode (rsp + offset) or (r12 + offset).
set_sib(times_1, rsp, base);
}
- if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
+ if (disp == 0 && base != rbp && base != r13) {
set_modrm(0, base);
} else if (is_int8(disp)) {
set_modrm(1, base);
@@ -169,10 +169,10 @@ Operand::Operand(Register base,
Register index,
ScaleFactor scale,
int32_t disp) : rex_(0) {
- DCHECK(!index.is(rsp));
+ DCHECK(index != rsp);
len_ = 1;
set_sib(scale, index, base);
- if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
+ if (disp == 0 && base != rbp && base != r13) {
// This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
// possibly set by set_sib.
set_modrm(0, rsp);
@@ -189,7 +189,7 @@ Operand::Operand(Register base,
Operand::Operand(Register index,
ScaleFactor scale,
int32_t disp) : rex_(0) {
- DCHECK(!index.is(rsp));
+ DCHECK(index != rsp);
len_ = 1;
set_modrm(0, rsp);
set_sib(scale, index, rbp);
@@ -300,7 +300,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
// Implementation of Assembler.
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
- : AssemblerBase(isolate_data, buffer, buffer_size), code_targets_(100) {
+ : AssemblerBase(isolate_data, buffer, buffer_size) {
// Clear the buffer in debug mode unless it was provided by the
// caller in which case we can't be sure it's okay to overwrite
// existing code in it.
@@ -310,6 +310,7 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
}
#endif
+ code_targets_.reserve(100);
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
}
@@ -654,7 +655,7 @@ void Assembler::immediate_arithmetic_op(byte subcode,
emit(0x83);
emit_modrm(subcode, dst);
emit(src.value_);
- } else if (dst.is(rax)) {
+ } else if (dst == rax) {
emit(0x05 | (subcode << 3));
emit(src);
} else {
@@ -692,7 +693,7 @@ void Assembler::immediate_arithmetic_op_16(byte subcode,
emit(0x83);
emit_modrm(subcode, dst);
emit(src.value_);
- } else if (dst.is(rax)) {
+ } else if (dst == rax) {
emit(0x05 | (subcode << 3));
emitw(src.value_);
} else {
@@ -2111,8 +2112,8 @@ void Assembler::xchgw(Register reg, const Operand& op) {
void Assembler::emit_xchg(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
- if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
- Register other = src.is(rax) ? dst : src;
+ if (src == rax || dst == rax) { // Single-byte encoding
+ Register other = src == rax ? dst : src;
emit_rex(other, size);
emit(0x90 | other.low_bits());
} else if (dst.low_bits() == 4) {
@@ -2234,7 +2235,7 @@ void Assembler::emit_test(Register reg, Immediate mask, int size) {
} else {
emit_rex(reg, size);
}
- if (reg.is(rax)) {
+ if (reg == rax) {
emit(byte_operand ? 0xA8 : 0xA9);
} else {
emit(byte_operand ? 0xF6 : 0xF7);
@@ -4110,7 +4111,7 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
void Assembler::vmovd(XMMRegister dst, Register src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- XMMRegister isrc = {src.code()};
+ XMMRegister isrc = XMMRegister::from_code(src.code());
emit_vex_prefix(dst, xmm0, isrc, kL128, k66, k0F, kW0);
emit(0x6e);
emit_sse_operand(dst, src);
@@ -4129,7 +4130,7 @@ void Assembler::vmovd(XMMRegister dst, const Operand& src) {
void Assembler::vmovd(Register dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- XMMRegister idst = {dst.code()};
+ XMMRegister idst = XMMRegister::from_code(dst.code());
emit_vex_prefix(src, xmm0, idst, kL128, k66, k0F, kW0);
emit(0x7e);
emit_sse_operand(src, dst);
@@ -4139,7 +4140,7 @@ void Assembler::vmovd(Register dst, XMMRegister src) {
void Assembler::vmovq(XMMRegister dst, Register src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- XMMRegister isrc = {src.code()};
+ XMMRegister isrc = XMMRegister::from_code(src.code());
emit_vex_prefix(dst, xmm0, isrc, kL128, k66, k0F, kW1);
emit(0x6e);
emit_sse_operand(dst, src);
@@ -4158,7 +4159,7 @@ void Assembler::vmovq(XMMRegister dst, const Operand& src) {
void Assembler::vmovq(Register dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- XMMRegister idst = {dst.code()};
+ XMMRegister idst = XMMRegister::from_code(dst.code());
emit_vex_prefix(src, xmm0, idst, kL128, k66, k0F, kW1);
emit(0x7e);
emit_sse_operand(src, dst);
@@ -4474,7 +4475,7 @@ void Assembler::bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg,
void Assembler::rorxq(Register dst, Register src, byte imm8) {
DCHECK(IsEnabled(BMI2));
DCHECK(is_uint8(imm8));
- Register vreg = {0}; // VEX.vvvv unused
+ Register vreg = Register::from_code<0>(); // VEX.vvvv unused
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, vreg, src, kLZ, kF2, k0F3A, kW1);
emit(0xF0);
@@ -4486,7 +4487,7 @@ void Assembler::rorxq(Register dst, Register src, byte imm8) {
void Assembler::rorxq(Register dst, const Operand& src, byte imm8) {
DCHECK(IsEnabled(BMI2));
DCHECK(is_uint8(imm8));
- Register vreg = {0}; // VEX.vvvv unused
+ Register vreg = Register::from_code<0>(); // VEX.vvvv unused
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, vreg, src, kLZ, kF2, k0F3A, kW1);
emit(0xF0);
@@ -4498,7 +4499,7 @@ void Assembler::rorxq(Register dst, const Operand& src, byte imm8) {
void Assembler::rorxl(Register dst, Register src, byte imm8) {
DCHECK(IsEnabled(BMI2));
DCHECK(is_uint8(imm8));
- Register vreg = {0}; // VEX.vvvv unused
+ Register vreg = Register::from_code<0>(); // VEX.vvvv unused
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, vreg, src, kLZ, kF2, k0F3A, kW0);
emit(0xF0);
@@ -4510,7 +4511,7 @@ void Assembler::rorxl(Register dst, Register src, byte imm8) {
void Assembler::rorxl(Register dst, const Operand& src, byte imm8) {
DCHECK(IsEnabled(BMI2));
DCHECK(is_uint8(imm8));
- Register vreg = {0}; // VEX.vvvv unused
+ Register vreg = Register::from_code<0>(); // VEX.vvvv unused
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, vreg, src, kLZ, kF2, k0F3A, kW0);
emit(0xF0);
@@ -4775,14 +4776,13 @@ void Assembler::pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle) {
}
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
- Register ireg = { reg.code() };
+ Register ireg = Register::from_code(reg.code());
emit_operand(ireg, adr);
}
void Assembler::emit_sse_operand(Register reg, const Operand& adr) {
- Register ireg = {reg.code()};
- emit_operand(ireg, adr);
+ emit_operand(reg, adr);
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index db4e7b8c45..c3720784a0 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -39,6 +39,7 @@
#include <deque>
#include <forward_list>
+#include <vector>
#include "src/assembler.h"
#include "src/x64/sse-instr.h"
@@ -96,85 +97,50 @@ const int kNumJSCallerSaved = 5;
// Number of registers for which space is reserved in safepoints.
const int kNumSafepointRegisters = 16;
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-//
-struct Register {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = -1
- };
-
- static constexpr int kNumRegisters = Code::kAfterLast;
-
- static Register from_code(int code) {
- DCHECK(code >= 0);
- DCHECK(code < kNumRegisters);
- Register r = {code};
- return r;
- }
- bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
- bool is(Register reg) const { return reg_code == reg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
+ kRegAfterLast
+};
- bool is_byte_register() const { return reg_code <= 3; }
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ public:
+ bool is_byte_register() const { return reg_code_ <= 3; }
// Return the high bit of the register code as a 0 or 1. Used often
// when constructing the REX prefix byte.
- int high_bit() const { return reg_code >> 3; }
+ int high_bit() const { return reg_code_ >> 3; }
// Return the 3 low bits of the register code. Used when encoding registers
// in modR/M, SIB, and opcode bytes.
- int low_bits() const { return reg_code & 0x7; }
+ int low_bits() const { return reg_code_ & 0x7; }
- // Unfortunately we can't make this private in a struct when initializing
- // by assignment.
- int reg_code;
+ private:
+ friend class RegisterBase<Register, kRegAfterLast>;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
};
-#define DECLARE_REGISTER(R) constexpr Register R = {Register::kCode_##R};
+static_assert(IS_TRIVIALLY_COPYABLE(Register) &&
+ sizeof(Register) == sizeof(int),
+ "Register can efficiently be passed by value");
+
+#define DECLARE_REGISTER(R) \
+ constexpr Register R = Register::from_code<kRegCode_##R>();
GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
-constexpr Register no_reg = {Register::kCode_no_reg};
+constexpr Register no_reg = Register::no_reg();
#ifdef _WIN64
// Windows calling convention
-constexpr Register arg_reg_1 = {Register::kCode_rcx};
-constexpr Register arg_reg_2 = {Register::kCode_rdx};
-constexpr Register arg_reg_3 = {Register::kCode_r8};
-constexpr Register arg_reg_4 = {Register::kCode_r9};
+constexpr Register arg_reg_1 = rcx;
+constexpr Register arg_reg_2 = rdx;
+constexpr Register arg_reg_3 = r8;
+constexpr Register arg_reg_4 = r9;
#else
// AMD64 calling convention
-constexpr Register arg_reg_1 = {Register::kCode_rdi};
-constexpr Register arg_reg_2 = {Register::kCode_rsi};
-constexpr Register arg_reg_3 = {Register::kCode_rdx};
-constexpr Register arg_reg_4 = {Register::kCode_rcx};
+constexpr Register arg_reg_1 = rdi;
+constexpr Register arg_reg_2 = rsi;
+constexpr Register arg_reg_3 = rdx;
+constexpr Register arg_reg_4 = rcx;
#endif // _WIN64
@@ -219,41 +185,31 @@ constexpr Register arg_reg_4 = {Register::kCode_rcx};
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
-struct XMMRegister {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = -1
- };
-
- static constexpr int kMaxNumRegisters = Code::kAfterLast;
-
- static XMMRegister from_code(int code) {
- XMMRegister result = {code};
- return result;
- }
-
- bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
- bool is(XMMRegister reg) const { return reg_code == reg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
+ kDoubleAfterLast
+};
+class XMMRegister : public RegisterBase<XMMRegister, kDoubleAfterLast> {
+ public:
// Return the high bit of the register code as a 0 or 1. Used often
// when constructing the REX prefix byte.
- int high_bit() const { return reg_code >> 3; }
+ int high_bit() const { return reg_code_ >> 3; }
// Return the 3 low bits of the register code. Used when encoding registers
// in modR/M, SIB, and opcode bytes.
- int low_bits() const { return reg_code & 0x7; }
+ int low_bits() const { return reg_code_ & 0x7; }
- // Unfortunately we can't make this private in a struct when initializing
- // by assignment.
- int reg_code;
+ private:
+ friend class RegisterBase<XMMRegister, kDoubleAfterLast>;
+ explicit constexpr XMMRegister(int code) : RegisterBase(code) {}
};
+static_assert(IS_TRIVIALLY_COPYABLE(XMMRegister) &&
+ sizeof(XMMRegister) == sizeof(int),
+ "XMMRegister can efficiently be passed by value");
+
typedef XMMRegister FloatRegister;
typedef XMMRegister DoubleRegister;
@@ -261,10 +217,10 @@ typedef XMMRegister DoubleRegister;
typedef XMMRegister Simd128Register;
#define DECLARE_REGISTER(R) \
- constexpr DoubleRegister R = {DoubleRegister::kCode_##R};
+ constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
DOUBLE_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
-constexpr DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
+constexpr DoubleRegister no_double_reg = DoubleRegister::no_reg();
enum Condition {
// any value < 0 is considered no_condition
@@ -1531,13 +1487,11 @@ class Assembler : public AssemblerBase {
#undef AVX_SP_3
void vpsrlq(XMMRegister dst, XMMRegister src, byte imm8) {
- XMMRegister iop = {2};
- vpd(0x73, iop, dst, src);
+ vpd(0x73, xmm2, dst, src);
emit(imm8);
}
void vpsllq(XMMRegister dst, XMMRegister src, byte imm8) {
- XMMRegister iop = {6};
- vpd(0x73, iop, dst, src);
+ vpd(0x73, xmm6, dst, src);
emit(imm8);
}
void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1547,67 +1501,67 @@ class Assembler : public AssemblerBase {
vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
}
void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
- XMMRegister isrc2 = {src2.code()};
+ XMMRegister isrc2 = XMMRegister::from_code(src2.code());
vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW0);
}
void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vinstr(0x2a, dst, src1, src2, kF2, k0F, kW0);
}
void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
- XMMRegister isrc2 = {src2.code()};
+ XMMRegister isrc2 = XMMRegister::from_code(src2.code());
vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW0);
}
void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vinstr(0x2a, dst, src1, src2, kF3, k0F, kW0);
}
void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
- XMMRegister isrc2 = {src2.code()};
+ XMMRegister isrc2 = XMMRegister::from_code(src2.code());
vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW1);
}
void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vinstr(0x2a, dst, src1, src2, kF3, k0F, kW1);
}
void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
- XMMRegister isrc2 = {src2.code()};
+ XMMRegister isrc2 = XMMRegister::from_code(src2.code());
vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW1);
}
void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vinstr(0x2a, dst, src1, src2, kF2, k0F, kW1);
}
void vcvttss2si(Register dst, XMMRegister src) {
- XMMRegister idst = {dst.code()};
+ XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
}
void vcvttss2si(Register dst, const Operand& src) {
- XMMRegister idst = {dst.code()};
+ XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
}
void vcvttsd2si(Register dst, XMMRegister src) {
- XMMRegister idst = {dst.code()};
+ XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
}
void vcvttsd2si(Register dst, const Operand& src) {
- XMMRegister idst = {dst.code()};
+ XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
}
void vcvttss2siq(Register dst, XMMRegister src) {
- XMMRegister idst = {dst.code()};
+ XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
}
void vcvttss2siq(Register dst, const Operand& src) {
- XMMRegister idst = {dst.code()};
+ XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
}
void vcvttsd2siq(Register dst, XMMRegister src) {
- XMMRegister idst = {dst.code()};
+ XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
}
void vcvttsd2siq(Register dst, const Operand& src) {
- XMMRegister idst = {dst.code()};
+ XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
}
void vcvtsd2si(Register dst, XMMRegister src) {
- XMMRegister idst = {dst.code()};
+ XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2d, idst, xmm0, src, kF2, k0F, kW0);
}
void vucomisd(XMMRegister dst, XMMRegister src) {
@@ -1664,11 +1618,11 @@ class Assembler : public AssemblerBase {
vpd(0x11, src, xmm0, dst);
}
void vmovmskps(Register dst, XMMRegister src) {
- XMMRegister idst = {dst.code()};
+ XMMRegister idst = XMMRegister::from_code(dst.code());
vps(0x50, idst, xmm0, src);
}
void vmovmskpd(Register dst, XMMRegister src) {
- XMMRegister idst = {dst.code()};
+ XMMRegister idst = XMMRegister::from_code(dst.code());
vpd(0x50, idst, xmm0, src);
}
void vcmpps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp) {
@@ -1717,37 +1671,31 @@ class Assembler : public AssemblerBase {
vinstr(0xF0, dst, xmm0, src, kF2, k0F, kWIG);
}
void vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8) {
- XMMRegister iop = {6};
- vinstr(0x71, iop, dst, src, k66, k0F, kWIG);
+ vinstr(0x71, xmm6, dst, src, k66, k0F, kWIG);
emit(imm8);
}
void vpsrlw(XMMRegister dst, XMMRegister src, int8_t imm8) {
- XMMRegister iop = {2};
- vinstr(0x71, iop, dst, src, k66, k0F, kWIG);
+ vinstr(0x71, xmm2, dst, src, k66, k0F, kWIG);
emit(imm8);
}
void vpsraw(XMMRegister dst, XMMRegister src, int8_t imm8) {
- XMMRegister iop = {4};
- vinstr(0x71, iop, dst, src, k66, k0F, kWIG);
+ vinstr(0x71, xmm4, dst, src, k66, k0F, kWIG);
emit(imm8);
}
void vpslld(XMMRegister dst, XMMRegister src, int8_t imm8) {
- XMMRegister iop = {6};
- vinstr(0x72, iop, dst, src, k66, k0F, kWIG);
+ vinstr(0x72, xmm6, dst, src, k66, k0F, kWIG);
emit(imm8);
}
void vpsrld(XMMRegister dst, XMMRegister src, int8_t imm8) {
- XMMRegister iop = {2};
- vinstr(0x72, iop, dst, src, k66, k0F, kWIG);
+ vinstr(0x72, xmm2, dst, src, k66, k0F, kWIG);
emit(imm8);
}
void vpsrad(XMMRegister dst, XMMRegister src, int8_t imm8) {
- XMMRegister iop = {4};
- vinstr(0x72, iop, dst, src, k66, k0F, kWIG);
+ vinstr(0x72, xmm4, dst, src, k66, k0F, kWIG);
emit(imm8);
}
void vpextrb(Register dst, XMMRegister src, int8_t imm8) {
- XMMRegister idst = {dst.code()};
+ XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x14, src, xmm0, idst, k66, k0F3A, kW0);
emit(imm8);
}
@@ -1756,7 +1704,7 @@ class Assembler : public AssemblerBase {
emit(imm8);
}
void vpextrw(Register dst, XMMRegister src, int8_t imm8) {
- XMMRegister idst = {dst.code()};
+ XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0xc5, idst, xmm0, src, k66, k0F, kW0);
emit(imm8);
}
@@ -1765,7 +1713,7 @@ class Assembler : public AssemblerBase {
emit(imm8);
}
void vpextrd(Register dst, XMMRegister src, int8_t imm8) {
- XMMRegister idst = {dst.code()};
+ XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x16, src, xmm0, idst, k66, k0F3A, kW0);
emit(imm8);
}
@@ -1774,7 +1722,7 @@ class Assembler : public AssemblerBase {
emit(imm8);
}
void vpinsrb(XMMRegister dst, XMMRegister src1, Register src2, int8_t imm8) {
- XMMRegister isrc = {src2.code()};
+ XMMRegister isrc = XMMRegister::from_code(src2.code());
vinstr(0x20, dst, src1, isrc, k66, k0F3A, kW0);
emit(imm8);
}
@@ -1784,7 +1732,7 @@ class Assembler : public AssemblerBase {
emit(imm8);
}
void vpinsrw(XMMRegister dst, XMMRegister src1, Register src2, int8_t imm8) {
- XMMRegister isrc = {src2.code()};
+ XMMRegister isrc = XMMRegister::from_code(src2.code());
vinstr(0xc4, dst, src1, isrc, k66, k0F, kW0);
emit(imm8);
}
@@ -1794,7 +1742,7 @@ class Assembler : public AssemblerBase {
emit(imm8);
}
void vpinsrd(XMMRegister dst, XMMRegister src1, Register src2, int8_t imm8) {
- XMMRegister isrc = {src2.code()};
+ XMMRegister isrc = XMMRegister::from_code(src2.code());
vinstr(0x22, dst, src1, isrc, k66, k0F3A, kW0);
emit(imm8);
}
@@ -1838,54 +1786,18 @@ class Assembler : public AssemblerBase {
void bextrl(Register dst, const Operand& src1, Register src2) {
bmi1l(0xf7, dst, src2, src1);
}
- void blsiq(Register dst, Register src) {
- Register ireg = {3};
- bmi1q(0xf3, ireg, dst, src);
- }
- void blsiq(Register dst, const Operand& src) {
- Register ireg = {3};
- bmi1q(0xf3, ireg, dst, src);
- }
- void blsil(Register dst, Register src) {
- Register ireg = {3};
- bmi1l(0xf3, ireg, dst, src);
- }
- void blsil(Register dst, const Operand& src) {
- Register ireg = {3};
- bmi1l(0xf3, ireg, dst, src);
- }
- void blsmskq(Register dst, Register src) {
- Register ireg = {2};
- bmi1q(0xf3, ireg, dst, src);
- }
- void blsmskq(Register dst, const Operand& src) {
- Register ireg = {2};
- bmi1q(0xf3, ireg, dst, src);
- }
- void blsmskl(Register dst, Register src) {
- Register ireg = {2};
- bmi1l(0xf3, ireg, dst, src);
- }
- void blsmskl(Register dst, const Operand& src) {
- Register ireg = {2};
- bmi1l(0xf3, ireg, dst, src);
- }
- void blsrq(Register dst, Register src) {
- Register ireg = {1};
- bmi1q(0xf3, ireg, dst, src);
- }
- void blsrq(Register dst, const Operand& src) {
- Register ireg = {1};
- bmi1q(0xf3, ireg, dst, src);
- }
- void blsrl(Register dst, Register src) {
- Register ireg = {1};
- bmi1l(0xf3, ireg, dst, src);
- }
- void blsrl(Register dst, const Operand& src) {
- Register ireg = {1};
- bmi1l(0xf3, ireg, dst, src);
- }
+ void blsiq(Register dst, Register src) { bmi1q(0xf3, rbx, dst, src); }
+ void blsiq(Register dst, const Operand& src) { bmi1q(0xf3, rbx, dst, src); }
+ void blsil(Register dst, Register src) { bmi1l(0xf3, rbx, dst, src); }
+ void blsil(Register dst, const Operand& src) { bmi1l(0xf3, rbx, dst, src); }
+ void blsmskq(Register dst, Register src) { bmi1q(0xf3, rdx, dst, src); }
+ void blsmskq(Register dst, const Operand& src) { bmi1q(0xf3, rdx, dst, src); }
+ void blsmskl(Register dst, Register src) { bmi1l(0xf3, rdx, dst, src); }
+ void blsmskl(Register dst, const Operand& src) { bmi1l(0xf3, rdx, dst, src); }
+ void blsrq(Register dst, Register src) { bmi1q(0xf3, rcx, dst, src); }
+ void blsrq(Register dst, const Operand& src) { bmi1q(0xf3, rcx, dst, src); }
+ void blsrl(Register dst, Register src) { bmi1l(0xf3, rcx, dst, src); }
+ void blsrl(Register dst, const Operand& src) { bmi1l(0xf3, rcx, dst, src); }
void tzcntq(Register dst, Register src);
void tzcntq(Register dst, const Operand& src);
void tzcntl(Register dst, Register src);
@@ -2511,7 +2423,7 @@ class Assembler : public AssemblerBase {
// are already bound.
std::deque<int> internal_reference_positions_;
- List< Handle<Code> > code_targets_;
+ std::vector<Handle<Code>> code_targets_;
// The following functions help with avoiding allocations of embedded heap
// objects during the code assembly phase. {RequestHeapObject} records the
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index be3720323d..c24f54a0db 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -82,30 +82,30 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
int double_offset = offset();
// Account for return address and saved regs if input is rsp.
- if (input_reg.is(rsp)) double_offset += 3 * kRegisterSize;
+ if (input_reg == rsp) double_offset += 3 * kRegisterSize;
MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
MemOperand exponent_operand(MemOperand(input_reg,
double_offset + kDoubleSize / 2));
- Register scratch1;
+ Register scratch1 = no_reg;
Register scratch_candidates[3] = { rbx, rdx, rdi };
for (int i = 0; i < 3; i++) {
scratch1 = scratch_candidates[i];
- if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
+ if (final_result_reg != scratch1 && input_reg != scratch1) break;
}
// Since we must use rcx for shifts below, use some other register (rax)
// to calculate the result if ecx is the requested return register.
- Register result_reg = final_result_reg.is(rcx) ? rax : final_result_reg;
+ Register result_reg = final_result_reg == rcx ? rax : final_result_reg;
// Save ecx if it isn't the return register and therefore volatile, or if it
// is the return register, then save the temp register we use in its stead
// for the result.
- Register save_reg = final_result_reg.is(rcx) ? rax : rcx;
+ Register save_reg = final_result_reg == rcx ? rax : rcx;
__ pushq(scratch1);
__ pushq(save_reg);
- bool stash_exponent_copy = !input_reg.is(rsp);
+ bool stash_exponent_copy = input_reg != rsp;
__ movl(scratch1, mantissa_operand);
__ Movsd(kScratchDoubleReg, mantissa_operand);
__ movl(rcx, exponent_operand);
@@ -146,9 +146,9 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
if (stash_exponent_copy) {
__ addp(rsp, Immediate(kDoubleSize));
}
- if (!final_result_reg.is(result_reg)) {
- DCHECK(final_result_reg.is(rcx));
- __ movl(final_result_reg, result_reg);
+ if (final_result_reg != result_reg) {
+ DCHECK(final_result_reg == rcx);
+ __ movl(final_result_reg, result_reg);
}
__ popq(save_reg);
__ popq(scratch1);
@@ -188,7 +188,7 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(exponent.is(rdx));
+ DCHECK(exponent == rdx);
const Register scratch = rcx;
const XMMRegister double_result = xmm3;
const XMMRegister double_base = xmm2;
@@ -316,7 +316,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&call_runtime);
// Move base to the correct argument register. Exponent is already in xmm1.
__ Movsd(xmm0, double_base);
- DCHECK(double_exponent.is(xmm1));
+ DCHECK(double_exponent == xmm1);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(2);
@@ -429,7 +429,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ movp(kCCallArg1, r15); // argv.
__ Move(kCCallArg2, ExternalReference::isolate_address(isolate()));
} else {
- DCHECK_LE(result_size(), 3);
+ DCHECK_LE(result_size(), 2);
// Pass a pointer to the result location as the first argument.
__ leap(kCCallArg0, StackSpaceOperand(kArgExtraStackSpace));
// Pass a pointer to the Arguments object as the second argument.
@@ -442,14 +442,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (result_size() > kMaxRegisterResultSize) {
// Read result values stored on stack. Result is stored
// above the the two Arguments object slots on Win64.
- DCHECK_LE(result_size(), 3);
+ DCHECK_LE(result_size(), 2);
__ movq(kReturnRegister0, StackSpaceOperand(kArgExtraStackSpace + 0));
__ movq(kReturnRegister1, StackSpaceOperand(kArgExtraStackSpace + 1));
- if (result_size() > 2) {
- __ movq(kReturnRegister2, StackSpaceOperand(kArgExtraStackSpace + 2));
- }
}
- // Result is in rax, rdx:rax or r8:rdx:rax - do not destroy these registers!
+ // Result is in rax or rdx:rax - do not destroy these registers!
// Check result for exception sentinel.
Label exception_returned;
@@ -610,9 +607,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ bind(&invoke);
__ PushStackHandler();
- // Fake a receiver (NULL).
- __ Push(Immediate(0)); // receiver
-
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return. We load the address from an
// external reference instead of inlining the call target address directly
@@ -969,44 +963,82 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
stub2.GetCode();
}
+RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
+ byte first_instruction = stub->instruction_start()[0];
+ byte second_instruction = stub->instruction_start()[2];
+
+ if (first_instruction == kTwoByteJumpInstruction) {
+ return INCREMENTAL;
+ }
+
+ DCHECK(first_instruction == kTwoByteNopInstruction);
+
+ if (second_instruction == kTwoByteJumpInstruction) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ DCHECK(second_instruction == kTwoByteNopInstruction);
+
+ return STORE_BUFFER_ONLY;
+}
+
+void RecordWriteStub::Patch(Code* stub, Mode mode) {
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ DCHECK(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ stub->instruction_start()[0] = kTwoByteNopInstruction;
+ stub->instruction_start()[2] = kTwoByteNopInstruction;
+ break;
+ case INCREMENTAL:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ stub->instruction_start()[0] = kTwoByteJumpInstruction;
+ break;
+ case INCREMENTAL_COMPACTION:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ stub->instruction_start()[0] = kTwoByteNopInstruction;
+ stub->instruction_start()[2] = kTwoByteJumpInstruction;
+ break;
+ }
+ DCHECK(GetMode(stub) == mode);
+ Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(), 7);
+}
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
// we keep the GC informed. The word in the object where the value has been
// written is in the address register.
void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
+ Label skip_to_incremental;
+ Label second_instr;
// The first two instructions are generated with labels so as to get the
// offset fixed up correctly by the bind(Label*) call. We patch it back and
// forth between a compare instructions (a nop in this position) and the
// real branch when we start and stop incremental heap marking.
// See RecordWriteStub::Patch for details.
- __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
- __ jmp(&skip_to_incremental_compacting, Label::kFar);
+ __ jmp(&skip_to_incremental, Label::kNear);
+ __ bind(&second_instr);
+ __ jmp(&skip_to_incremental, Label::kNear);
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ ret(0);
}
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
+ __ bind(&skip_to_incremental);
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+ GenerateIncremental(masm, &second_instr);
// Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
// Will be checked in IncrementalMarking::ActivateGeneratedStub.
masm->set_byte_at(0, kTwoByteNopInstruction);
- masm->set_byte_at(2, kFiveByteNopInstruction);
+ masm->set_byte_at(2, kTwoByteNopInstruction);
}
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm,
+ Label* second_instr) {
regs_.Save(masm);
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
@@ -1023,17 +1055,17 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// First notify the incremental marker if necessary, then update the
// remembered set.
CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
+ second_instr);
InformIncrementalMarker(masm);
regs_.Restore(masm);
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
__ bind(&dont_need_remembered_set);
}
CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+ masm, kReturnOnNoNeedToInformIncrementalMarker, second_instr);
InformIncrementalMarker(masm);
regs_.Restore(masm);
__ ret(0);
@@ -1043,9 +1075,9 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
Register address =
- arg_reg_1.is(regs_.address()) ? kScratchRegister : regs_.address();
- DCHECK(!address.is(regs_.object()));
- DCHECK(!address.is(arg_reg_1));
+ arg_reg_1 == regs_.address() ? kScratchRegister : regs_.address();
+ DCHECK(address != regs_.object());
+ DCHECK(address != arg_reg_1);
__ Move(address, regs_.address());
__ Move(arg_reg_1, regs_.object());
// TODO(gc) Can we just set address arg2 in the beginning?
@@ -1067,9 +1099,8 @@ void RecordWriteStub::Activate(Code* code) {
}
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
+ MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
+ Label* second_instr) {
Label need_incremental;
Label need_incremental_pop_object;
@@ -1085,8 +1116,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ ret(0);
}
@@ -1097,24 +1127,22 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Get the value from the slot.
__ movp(regs_.scratch0(), Operand(regs_.address(), 0));
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
+ Label ensure_not_white;
+ // If second instruction is TwoByteNopInstruction, we're in noncompacting
+ // mode.
+ __ cmpb(Operand(second_instr), Immediate(kTwoByteNopInstruction));
+ __ j(equal, &ensure_not_white, Label::kNear);
+ __ CheckPageFlag(regs_.scratch0(), // Contains value.
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kEvacuationCandidateMask, zero,
+ &ensure_not_white, Label::kNear);
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- zero,
- &ensure_not_white,
- Label::kNear);
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask, zero,
+ &need_incremental);
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- zero,
- &need_incremental);
-
- __ bind(&ensure_not_white);
- }
+ __ bind(&ensure_not_white);
// We need an extra register for this, so we push the object register
// temporarily.
@@ -1127,8 +1155,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ ret(0);
}
@@ -1536,7 +1563,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
ExternalReference scheduled_exception_address =
ExternalReference::scheduled_exception_address(isolate);
- DCHECK(rdx.is(function_address) || r8.is(function_address));
+ DCHECK(rdx == function_address || r8 == function_address);
// Allocate HandleScope in callee-save registers.
Register prev_next_address_reg = r14;
Register prev_limit_reg = rbx;
@@ -1794,7 +1821,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// It's okay if api_function_address == callback_arg
// but not arguments_arg
- DCHECK(!api_function_address.is(arguments_arg));
+ DCHECK(api_function_address != arguments_arg);
// v8::InvocationCallback's argument.
__ leap(arguments_arg, StackSpaceOperand(0));
@@ -1883,8 +1910,8 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// It's okay if api_function_address == getter_arg
// but not accessor_info_arg or name_arg
- DCHECK(!api_function_address.is(accessor_info_arg));
- DCHECK(!api_function_address.is(name_arg));
+ DCHECK(api_function_address != accessor_info_arg);
+ DCHECK(api_function_address != name_arg);
__ movp(scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
__ movp(api_function_address,
FieldOperand(scratch, Foreign::kForeignAddressOffset));
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index 8f8097b0f4..bba64fcb4a 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -119,49 +119,9 @@ class RecordWriteStub: public PlatformCodeStub {
static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
- static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
- static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
+ static Mode GetMode(Code* stub);
- static Mode GetMode(Code* stub) {
- byte first_instruction = stub->instruction_start()[0];
- byte second_instruction = stub->instruction_start()[2];
-
- if (first_instruction == kTwoByteJumpInstruction) {
- return INCREMENTAL;
- }
-
- DCHECK(first_instruction == kTwoByteNopInstruction);
-
- if (second_instruction == kFiveByteJumpInstruction) {
- return INCREMENTAL_COMPACTION;
- }
-
- DCHECK(second_instruction == kFiveByteNopInstruction);
-
- return STORE_BUFFER_ONLY;
- }
-
- static void Patch(Code* stub, Mode mode) {
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteNopInstruction;
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteJumpInstruction;
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteJumpInstruction;
- break;
- }
- DCHECK(GetMode(stub) == mode);
- Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(), 7);
- }
+ static void Patch(Code* stub, Mode mode);
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
@@ -171,50 +131,48 @@ class RecordWriteStub: public PlatformCodeStub {
// that must be preserved and one scratch register provided by the caller.
class RegisterAllocation {
public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch0)
+ RegisterAllocation(Register object, Register address, Register scratch0)
: object_orig_(object),
address_orig_(address),
scratch0_orig_(scratch0),
object_(object),
address_(address),
- scratch0_(scratch0) {
+ scratch0_(scratch0),
+ scratch1_(no_reg) {
DCHECK(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegThatIsNotRcxOr(object_, address_, scratch0_);
- if (scratch0.is(rcx)) {
+ if (scratch0 == rcx) {
scratch0_ = GetRegThatIsNotRcxOr(object_, address_, scratch1_);
}
- if (object.is(rcx)) {
+ if (object == rcx) {
object_ = GetRegThatIsNotRcxOr(address_, scratch0_, scratch1_);
}
- if (address.is(rcx)) {
+ if (address == rcx) {
address_ = GetRegThatIsNotRcxOr(object_, scratch0_, scratch1_);
}
DCHECK(!AreAliased(scratch0_, object_, address_, rcx));
}
void Save(MacroAssembler* masm) {
- DCHECK(!address_orig_.is(object_));
- DCHECK(object_.is(object_orig_) || address_.is(address_orig_));
+ DCHECK(address_orig_ != object_);
+ DCHECK(object_ == object_orig_ || address_ == address_orig_);
DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
DCHECK(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
DCHECK(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
// We don't have to save scratch0_orig_ because it was given to us as
// a scratch register. But if we had to switch to a different reg then
// we should save the new scratch0_.
- if (!scratch0_.is(scratch0_orig_)) masm->Push(scratch0_);
- if (!rcx.is(scratch0_orig_) &&
- !rcx.is(object_orig_) &&
- !rcx.is(address_orig_)) {
+ if (scratch0_ != scratch0_orig_) masm->Push(scratch0_);
+ if (rcx != scratch0_orig_ && rcx != object_orig_ &&
+ rcx != address_orig_) {
masm->Push(rcx);
}
masm->Push(scratch1_);
- if (!address_.is(address_orig_)) {
+ if (address_ != address_orig_) {
masm->Push(address_);
masm->movp(address_, address_orig_);
}
- if (!object_.is(object_orig_)) {
+ if (object_ != object_orig_) {
masm->Push(object_);
masm->movp(object_, object_orig_);
}
@@ -224,21 +182,20 @@ class RecordWriteStub: public PlatformCodeStub {
// These will have been preserved the entire time, so we just need to move
// them back. Only in one case is the orig_ reg different from the plain
// one, since only one of them can alias with rcx.
- if (!object_.is(object_orig_)) {
+ if (object_ != object_orig_) {
masm->movp(object_orig_, object_);
masm->Pop(object_);
}
- if (!address_.is(address_orig_)) {
+ if (address_ != address_orig_) {
masm->movp(address_orig_, address_);
masm->Pop(address_);
}
masm->Pop(scratch1_);
- if (!rcx.is(scratch0_orig_) &&
- !rcx.is(object_orig_) &&
- !rcx.is(address_orig_)) {
+ if (rcx != scratch0_orig_ && rcx != object_orig_ &&
+ rcx != address_orig_) {
masm->Pop(rcx);
}
- if (!scratch0_.is(scratch0_orig_)) masm->Pop(scratch0_);
+ if (scratch0_ != scratch0_orig_) masm->Pop(scratch0_);
}
// If we have to call into C then we need to save and restore all caller-
@@ -277,11 +234,10 @@ class RecordWriteStub: public PlatformCodeStub {
for (int i = 0; i < Register::kNumRegisters; i++) {
if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(i)) {
Register candidate = Register::from_code(i);
- if (candidate.is(rcx)) continue;
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
+ if (candidate != rcx && candidate != r1 && candidate != r2 &&
+ candidate != r3) {
+ return candidate;
+ }
}
}
UNREACHABLE();
@@ -297,11 +253,10 @@ class RecordWriteStub: public PlatformCodeStub {
Major MajorKey() const final { return RecordWrite; }
void Generate(MacroAssembler* masm) override;
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
+ void GenerateIncremental(MacroAssembler* masm, Label* second_instr);
void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
+ MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
+ Label* second_instr);
void InformIncrementalMarker(MacroAssembler* masm);
void Activate(Code* code) override;
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 881b7cf7f2..f9b0cfd1a8 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -160,10 +160,10 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
DCHECK(index >= 0);
int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
- int displacement_to_last_argument = base_reg_.is(rsp) ?
- kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
+ int displacement_to_last_argument =
+ base_reg_ == rsp ? kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
displacement_to_last_argument += extra_displacement_to_last_argument_;
- if (argument_count_reg_.is(no_reg)) {
+ if (argument_count_reg_ == no_reg) {
// argument[0] is at base_reg_ + displacement_to_last_argument +
// (argument_count_immediate_ + receiver - 1) * kPointerSize.
DCHECK(argument_count_immediate_ + receiver > 0);
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 2097a5b9b1..1214142e6f 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -24,7 +24,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
- const int kDoubleRegsSize = kDoubleSize * XMMRegister::kMaxNumRegisters;
+ const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
__ subp(rsp, Immediate(kDoubleRegsSize));
const RegisterConfiguration* config = RegisterConfiguration::Default();
@@ -35,7 +35,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Movsd(Operand(rsp, offset), xmm_reg);
}
- const int kFloatRegsSize = kFloatSize * XMMRegister::kMaxNumRegisters;
+ const int kFloatRegsSize = kFloatSize * XMMRegister::kNumRegisters;
__ subp(rsp, Immediate(kFloatRegsSize));
for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
@@ -113,7 +113,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Fill in the float input registers.
int float_regs_offset = FrameDescription::float_registers_offset();
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
int src_offset = i * kFloatSize;
int dst_offset = i * kFloatSize + float_regs_offset;
__ movl(rcx, Operand(rsp, src_offset));
@@ -123,7 +123,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
__ popq(Operand(rbx, dst_offset));
}
@@ -196,8 +196,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Movsd(xmm_reg, Operand(rbx, src_offset));
}
- // Push state, pc, and continuation from the last output frame.
- __ Push(Operand(rbx, FrameDescription::state_offset()));
+ // Push pc and continuation from the last output frame.
__ PushQuad(Operand(rbx, FrameDescription::pc_offset()));
__ PushQuad(Operand(rbx, FrameDescription::continuation_offset()));
@@ -212,7 +211,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
Register r = Register::from_code(i);
// Do not restore rsp, simply pop the value into the next register
// and overwrite this afterwards.
- if (r.is(rsp)) {
+ if (r == rsp) {
DCHECK(i > 0);
r = Register::from_code(i - 1);
}
@@ -240,6 +239,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&done);
}
+bool Deoptimizer::PadTopOfStackRegister() { return false; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
if (kPCOnStackSize == 2 * kPointerSize) {
diff --git a/deps/v8/src/x64/eh-frame-x64.cc b/deps/v8/src/x64/eh-frame-x64.cc
index 8604332704..ec4fc11289 100644
--- a/deps/v8/src/x64/eh-frame-x64.cc
+++ b/deps/v8/src/x64/eh-frame-x64.cc
@@ -29,11 +29,11 @@ void EhFrameWriter::WriteInitialStateInCie() {
// static
int EhFrameWriter::RegisterToDwarfCode(Register name) {
switch (name.code()) {
- case Register::kCode_rbp:
+ case kRegCode_rbp:
return kRbpDwarfCode;
- case Register::kCode_rsp:
+ case kRegCode_rsp:
return kRspDwarfCode;
- case Register::kCode_rax:
+ case kRegCode_rax:
return kRaxDwarfCode;
default:
UNIMPLEMENTED();
diff --git a/deps/v8/src/x64/frame-constants-x64.cc b/deps/v8/src/x64/frame-constants-x64.cc
index c31edc66ab..ab29aed277 100644
--- a/deps/v8/src/x64/frame-constants-x64.cc
+++ b/deps/v8/src/x64/frame-constants-x64.cc
@@ -18,6 +18,10 @@ Register JavaScriptFrame::fp_register() { return rbp; }
Register JavaScriptFrame::context_register() { return rsi; }
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
+int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
+ return register_count;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index b9e3bcc8df..a978a14aaa 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -91,27 +91,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return rax; }
-void FastCloneRegExpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdi, rax, rcx, rdx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rax, rbx, rcx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rax, rbx, rcx, rdx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdi};
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index d92fe983a6..4255e583e3 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -8,6 +8,7 @@
#include "src/base/division-by-constant.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
+#include "src/callable.h"
#include "src/codegen.h"
#include "src/counters.h"
#include "src/debug/debug.h"
@@ -27,6 +28,15 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, size, create_code_object) {}
+TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
+ }
+}
+
static const int64_t kInvalidRootRegisterDelta = -1;
int64_t TurboAssembler::RootRegisterDelta(ExternalReference other) {
@@ -76,7 +86,7 @@ void MacroAssembler::Load(Register destination, ExternalReference source) {
}
}
// Safe code.
- if (destination.is(rax)) {
+ if (destination == rax) {
load_rax(source);
} else {
Move(kScratchRegister, source);
@@ -94,7 +104,7 @@ void MacroAssembler::Store(ExternalReference destination, Register source) {
}
}
// Safe code.
- if (source.is(rax)) {
+ if (source == rax) {
store_rax(destination);
} else {
Move(kScratchRegister, destination);
@@ -155,17 +165,6 @@ void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
(index << kPointerSizeLog2) - kRootRegisterBias));
}
-void MacroAssembler::LoadRootIndexed(Register destination,
- Register variable_offset,
- int fixed_offset) {
- DCHECK(root_array_available_);
- movp(destination,
- Operand(kRootRegister,
- variable_offset, times_pointer_size,
- (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
-}
-
-
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
DCHECK(root_array_available_);
Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
@@ -185,12 +184,9 @@ void TurboAssembler::CompareRoot(const Operand& with,
cmpp(with, kScratchRegister);
}
-
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then) {
+ Register addr, Register scratch,
+ SaveFPRegsMode save_fp) {
if (emit_debug_code()) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
@@ -200,7 +196,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Load store buffer top.
ExternalReference store_buffer =
ExternalReference::store_buffer_top(isolate());
- DCHECK(!scratch.is(kScratchRegister));
+ DCHECK(scratch != kScratchRegister);
Move(kScratchRegister, store_buffer);
movp(scratch, Operand(kScratchRegister, 0));
// Store pointer to buffer.
@@ -213,23 +209,13 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Label done;
// Check for end of buffer.
testp(scratch, Immediate(StoreBuffer::kStoreBufferMask));
- if (and_then == kReturnAtEnd) {
- Label buffer_overflowed;
- j(equal, &buffer_overflowed, Label::kNear);
- ret(0);
- bind(&buffer_overflowed);
- } else {
- DCHECK(and_then == kFallThroughAtEnd);
- j(not_equal, &done, Label::kNear);
- }
+ Label buffer_overflowed;
+ j(equal, &buffer_overflowed, Label::kNear);
+ ret(0);
+ bind(&buffer_overflowed);
StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
CallStub(&store_buffer_overflow);
- if (and_then == kReturnAtEnd) {
- ret(0);
- } else {
- DCHECK(and_then == kFallThroughAtEnd);
- bind(&done);
- }
+ ret(0);
}
@@ -242,16 +228,11 @@ void MacroAssembler::InNewSpace(Register object,
distance);
}
-
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
+void MacroAssembler::RecordWriteField(Register object, int offset,
+ Register value, Register dst,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -275,7 +256,7 @@ void MacroAssembler::RecordWriteField(
}
RecordWrite(object, dst, value, save_fp, remembered_set_action,
- OMIT_SMI_CHECK, pointers_to_here_check_for_value);
+ OMIT_SMI_CHECK);
bind(&done);
@@ -287,89 +268,65 @@ void MacroAssembler::RecordWriteField(
}
}
-
-void MacroAssembler::RecordWriteForMap(Register object,
- Register map,
- Register dst,
- SaveFPRegsMode fp_mode) {
- DCHECK(!object.is(kScratchRegister));
- DCHECK(!object.is(map));
- DCHECK(!object.is(dst));
- DCHECK(!map.is(dst));
- AssertNotSmi(object);
-
- if (emit_debug_code()) {
- Label ok;
- if (map.is(kScratchRegister)) pushq(map);
- CompareMap(map, isolate()->factory()->meta_map());
- if (map.is(kScratchRegister)) popq(map);
- j(equal, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
-
- if (!FLAG_incremental_marking) {
- return;
+void TurboAssembler::SaveRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ pushq(Register::from_code(i));
+ }
}
+}
- if (emit_debug_code()) {
- Label ok;
- if (map.is(kScratchRegister)) pushq(map);
- cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
- if (map.is(kScratchRegister)) popq(map);
- j(equal, &ok, Label::kNear);
- int3();
- bind(&ok);
+void TurboAssembler::RestoreRegisters(RegList registers) {
+ DCHECK(NumRegs(registers) > 0);
+ for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
+ if ((registers >> i) & 1u) {
+ popq(Register::from_code(i));
+ }
}
+}
- // Compute the address.
- leap(dst, FieldOperand(object, HeapObject::kMapOffset));
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
+ RegList registers = callable.descriptor().allocatable_registers();
- // First, check if a write barrier is even needed. The tests below
- // catch stores of smis and stores into the young generation.
- Label done;
+ SaveRegisters(registers);
- // A single check of the map's pages interesting flag suffices, since it is
- // only set during incremental collection, and then it's also guaranteed that
- // the from object's page's interesting flag is also set. This optimization
- // relies on the fact that maps can never be in new space.
- CheckPageFlag(map,
- map, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
+ Register object_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kObject));
+ Register slot_parameter(
+ callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register isolate_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kIsolate));
+ Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
+ RecordWriteDescriptor::kFPMode));
- RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
- fp_mode);
- CallStub(&stub);
+ pushq(object);
+ pushq(address);
- bind(&done);
+ popq(slot_parameter);
+ popq(object_parameter);
- // Count number of write barriers in generated code.
- isolate()->counters()->write_barriers_static()->Increment();
- IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+ LoadAddress(isolate_parameter, ExternalReference::isolate_address(isolate()));
+ Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
+ Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
+ Call(callable.code(), RelocInfo::CODE_TARGET);
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- Move(dst, kZapValue, Assembler::RelocInfoNone());
- Move(map, kZapValue, Assembler::RelocInfoNone());
- }
+ RestoreRegisters(registers);
}
-
-void MacroAssembler::RecordWrite(
- Register object,
- Register address,
- Register value,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
- DCHECK(!object.is(value));
- DCHECK(!object.is(address));
- DCHECK(!value.is(address));
+void MacroAssembler::RecordWrite(Register object, Register address,
+ Register value, SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ DCHECK(object != value);
+ DCHECK(object != address);
+ DCHECK(value != address);
AssertNotSmi(object);
if (remembered_set_action == OMIT_REMEMBERED_SET &&
@@ -394,14 +351,10 @@ void MacroAssembler::RecordWrite(
JumpIfSmi(value, &done);
}
- if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
- }
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
+ Label::kNear);
CheckPageFlag(object,
value, // Used as scratch.
@@ -410,9 +363,13 @@ void MacroAssembler::RecordWrite(
&done,
Label::kNear);
+#ifdef V8_CSA_WRITE_BARRIER
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+#else
RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
fp_mode);
CallStub(&stub);
+#endif
bind(&done);
@@ -563,55 +520,81 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
-#define REG(Name) \
- { Register::kCode_##Name }
+static constexpr Register saved_regs[] = {rax, rcx, rdx, rbx, rbp, rsi,
+ rdi, r8, r9, r10, r11};
-static const Register saved_regs[] = {
- REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
- REG(r9), REG(r10), REG(r11)
-};
+static constexpr int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
-#undef REG
+int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) const {
+ int bytes = 0;
+ for (int i = 0; i < kNumberOfSavedRegs; i++) {
+ Register reg = saved_regs[i];
+ if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
+ bytes += kPointerSize;
+ }
+ }
-static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
+ // R12 to r15 are callee save on all platforms.
+ if (fp_mode == kSaveFPRegs) {
+ bytes += kDoubleSize * XMMRegister::kNumRegisters;
+ }
-void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1, Register exclusion2,
- Register exclusion3) {
+ return bytes;
+}
+
+int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
+ int bytes = 0;
for (int i = 0; i < kNumberOfSavedRegs; i++) {
Register reg = saved_regs[i];
- if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+ if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
pushq(reg);
+ bytes += kPointerSize;
}
}
+
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
- subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
+ int delta = kDoubleSize * XMMRegister::kNumRegisters;
+ subp(rsp, Immediate(delta));
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
Movsd(Operand(rsp, i * kDoubleSize), reg);
}
+ bytes += delta;
}
+
+ return bytes;
}
-void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
- Register exclusion2, Register exclusion3) {
+int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
if (fp_mode == kSaveFPRegs) {
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
Movsd(reg, Operand(rsp, i * kDoubleSize));
}
- addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ int delta = kDoubleSize * XMMRegister::kNumRegisters;
+ addp(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ bytes += delta;
}
+
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
Register reg = saved_regs[i];
- if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+ if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
popq(reg);
+ bytes += kPointerSize;
}
}
+
+ return bytes;
}
void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
@@ -934,7 +917,7 @@ void TurboAssembler::Move(Register dst, Smi* source) {
void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
- if (!dst.is(src)) {
+ if (dst != src) {
movl(dst, src);
}
shlp(dst, Immediate(kSmiShift));
@@ -942,7 +925,7 @@ void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
void TurboAssembler::SmiToInteger32(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
- if (!dst.is(src)) {
+ if (dst != src) {
movp(dst, src);
}
@@ -967,7 +950,7 @@ void TurboAssembler::SmiToInteger32(Register dst, const Operand& src) {
void MacroAssembler::SmiToInteger64(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
- if (!dst.is(src)) {
+ if (dst != src) {
movp(dst, src);
}
sarp(dst, Immediate(kSmiShift));
@@ -1009,7 +992,7 @@ void MacroAssembler::SmiCompare(Register dst, Smi* src) {
void MacroAssembler::Cmp(Register dst, Smi* src) {
- DCHECK(!dst.is(kScratchRegister));
+ DCHECK(dst != kScratchRegister);
if (src->value() == 0) {
testp(dst, dst);
} else {
@@ -1061,7 +1044,7 @@ void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
SmiToInteger64(dst, src);
return;
}
- if (!dst.is(src)) {
+ if (dst != src) {
movp(dst, src);
}
if (power < kSmiShift) {
@@ -1083,41 +1066,6 @@ Condition TurboAssembler::CheckSmi(const Operand& src) {
return zero;
}
-Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
- if (first.is(second)) {
- return CheckSmi(first);
- }
- STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
- if (SmiValuesAre32Bits()) {
- leal(kScratchRegister, Operand(first, second, times_1, 0));
- testb(kScratchRegister, Immediate(0x03));
- } else {
- DCHECK(SmiValuesAre31Bits());
- movl(kScratchRegister, first);
- orl(kScratchRegister, second);
- testb(kScratchRegister, Immediate(kSmiTagMask));
- }
- return zero;
-}
-
-Condition MacroAssembler::CheckEitherSmi(Register first,
- Register second,
- Register scratch) {
- if (first.is(second)) {
- return CheckSmi(first);
- }
- if (scratch.is(second)) {
- andl(scratch, first);
- } else {
- if (!scratch.is(first)) {
- movl(scratch, first);
- }
- andl(scratch, second);
- }
- testb(scratch, Immediate(kSmiTagMask));
- return zero;
-}
-
void TurboAssembler::JumpIfSmi(Register src, Label* on_smi,
Label::Distance near_jump) {
Condition smi = CheckSmi(src);
@@ -1138,22 +1086,14 @@ void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
j(NegateCondition(smi), on_not_smi, near_jump);
}
-void MacroAssembler::JumpIfNotBothSmi(Register src1,
- Register src2,
- Label* on_not_both_smi,
- Label::Distance near_jump) {
- Condition both_smi = CheckBothSmi(src1, src2);
- j(NegateCondition(both_smi), on_not_both_smi, near_jump);
-}
-
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
- if (!dst.is(src)) {
+ if (dst != src) {
movp(dst, src);
}
return;
- } else if (dst.is(src)) {
- DCHECK(!dst.is(kScratchRegister));
+ } else if (dst == src) {
+ DCHECK(dst != kScratchRegister);
Register constant_reg = GetSmiConstant(constant);
addp(dst, constant_reg);
} else {
@@ -1180,11 +1120,11 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant,
Label* bailout_label,
Label::Distance near_jump) {
if (constant->value() == 0) {
- if (!dst.is(src)) {
+ if (dst != src) {
movp(dst, src);
}
- } else if (dst.is(src)) {
- DCHECK(!dst.is(kScratchRegister));
+ } else if (dst == src) {
+ DCHECK(dst != kScratchRegister);
Move(kScratchRegister, constant);
addp(dst, kScratchRegister);
if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
@@ -1216,11 +1156,11 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant,
void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
- if (!dst.is(src)) {
+ if (dst != src) {
movp(dst, src);
}
- } else if (dst.is(src)) {
- DCHECK(!dst.is(kScratchRegister));
+ } else if (dst == src) {
+ DCHECK(dst != kScratchRegister);
Register constant_reg = GetSmiConstant(constant);
subp(dst, constant_reg);
} else {
@@ -1242,11 +1182,11 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant,
Label* bailout_label,
Label::Distance near_jump) {
if (constant->value() == 0) {
- if (!dst.is(src)) {
+ if (dst != src) {
movp(dst, src);
}
- } else if (dst.is(src)) {
- DCHECK(!dst.is(kScratchRegister));
+ } else if (dst == src) {
+ DCHECK(dst != kScratchRegister);
Move(kScratchRegister, constant);
subp(dst, kScratchRegister);
if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
@@ -1271,7 +1211,7 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant,
DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
if (constant->value() == Smi::kMinValue) {
- DCHECK(!dst.is(kScratchRegister));
+ DCHECK(dst != kScratchRegister);
movp(dst, src);
Move(kScratchRegister, constant);
subp(dst, kScratchRegister);
@@ -1292,7 +1232,7 @@ static void SmiAddHelper(MacroAssembler* masm,
T src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- if (dst.is(src1)) {
+ if (dst == src1) {
Label done;
masm->addp(dst, src2);
masm->j(no_overflow, &done, Label::kNear);
@@ -1314,7 +1254,7 @@ void MacroAssembler::SmiAdd(Register dst,
Label* on_not_smi_result,
Label::Distance near_jump) {
DCHECK_NOT_NULL(on_not_smi_result);
- DCHECK(!dst.is(src2));
+ DCHECK(dst != src2);
SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
@@ -1334,7 +1274,7 @@ void MacroAssembler::SmiAdd(Register dst,
Register src2) {
// No overflow checking. Use only when it's known that
// overflowing is impossible.
- if (!dst.is(src1)) {
+ if (dst != src1) {
if (emit_debug_code()) {
movp(kScratchRegister, src1);
addp(kScratchRegister, src2);
@@ -1355,7 +1295,7 @@ static void SmiSubHelper(MacroAssembler* masm,
T src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- if (dst.is(src1)) {
+ if (dst == src1) {
Label done;
masm->subp(dst, src2);
masm->j(no_overflow, &done, Label::kNear);
@@ -1376,7 +1316,7 @@ void MacroAssembler::SmiSub(Register dst,
Label* on_not_smi_result,
Label::Distance near_jump) {
DCHECK_NOT_NULL(on_not_smi_result);
- DCHECK(!dst.is(src2));
+ DCHECK(dst != src2);
SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
@@ -1397,7 +1337,7 @@ static void SmiSubNoOverflowHelper(MacroAssembler* masm,
T src2) {
// No overflow checking. Use only when it's known that
// overflowing is impossible (e.g., subtracting two positive smis).
- if (!dst.is(src1)) {
+ if (dst != src1) {
masm->movp(dst, src1);
}
masm->subp(dst, src2);
@@ -1406,7 +1346,7 @@ static void SmiSubNoOverflowHelper(MacroAssembler* masm,
void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
- DCHECK(!dst.is(src2));
+ DCHECK(dst != src2);
SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
}
@@ -1417,43 +1357,6 @@ void MacroAssembler::SmiSub(Register dst,
SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
}
-void MacroAssembler::SelectNonSmi(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smis,
- Label::Distance near_jump) {
- DCHECK(!dst.is(kScratchRegister));
- DCHECK(!src1.is(kScratchRegister));
- DCHECK(!src2.is(kScratchRegister));
- DCHECK(!dst.is(src1));
- DCHECK(!dst.is(src2));
- // Both operands must not be smis.
-#ifdef DEBUG
- Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
- Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
-#endif
- STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
- movl(kScratchRegister, Immediate(kSmiTagMask));
- andp(kScratchRegister, src1);
- testl(kScratchRegister, src2);
- // If non-zero then both are smis.
- j(not_zero, on_not_smis, near_jump);
-
- // Exactly one operand is a smi.
- DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
- // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
- subp(kScratchRegister, Immediate(1));
- // If src1 is a smi, then scratch register all 1s, else it is all 0s.
- movp(dst, src1);
- xorp(dst, src2);
- andp(dst, kScratchRegister);
- // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
- xorp(dst, src1);
- // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
-}
-
-
SmiIndex MacroAssembler::SmiToIndex(Register dst,
Register src,
int shift) {
@@ -1461,7 +1364,7 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst,
DCHECK(is_uint6(shift));
// There is a possible optimization if shift is in the range 60-63, but that
// will (and must) never happen.
- if (!dst.is(src)) {
+ if (dst != src) {
movp(dst, src);
}
if (shift < kSmiShift) {
@@ -1473,7 +1376,7 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst,
} else {
DCHECK(SmiValuesAre31Bits());
DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
- if (!dst.is(src)) {
+ if (dst != src) {
movp(dst, src);
}
// We have to sign extend the index register to 64-bit as the SMI might
@@ -1508,64 +1411,6 @@ void TurboAssembler::Push(Smi* source) {
// ----------------------------------------------------------------------------
-void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
- Register first_object, Register second_object, Register scratch1,
- Register scratch2, Label* on_fail, Label::Distance near_jump) {
- // Check that both objects are not smis.
- Condition either_smi = CheckEitherSmi(first_object, second_object);
- j(either_smi, on_fail, near_jump);
-
- // Load instance type for both strings.
- movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
- movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
- movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
- movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
- // Check that both are flat one-byte strings.
- DCHECK(kNotStringTag != 0);
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
-
- andl(scratch1, Immediate(kFlatOneByteStringMask));
- andl(scratch2, Immediate(kFlatOneByteStringMask));
- // Interleave the bits to check both scratch1 and scratch2 in one test.
- const int kShift = 8;
- DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << kShift));
- shlp(scratch2, Immediate(kShift));
- orp(scratch1, scratch2);
- cmpl(scratch1,
- Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << kShift)));
- j(not_equal, on_fail, near_jump);
-}
-
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
- Register first_object_instance_type, Register second_object_instance_type,
- Register scratch1, Register scratch2, Label* on_fail,
- Label::Distance near_jump) {
- // Load instance type for both strings.
- movp(scratch1, first_object_instance_type);
- movp(scratch2, second_object_instance_type);
-
- // Check that both are flat one-byte strings.
- DCHECK(kNotStringTag != 0);
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
-
- andl(scratch1, Immediate(kFlatOneByteStringMask));
- andl(scratch2, Immediate(kFlatOneByteStringMask));
- // Interleave the bits to check both scratch1 and scratch2 in one test.
- DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
- leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmpl(scratch1,
- Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
- j(not_equal, on_fail, near_jump);
-}
-
-
template<class T>
static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
T operand_or_register,
@@ -1597,7 +1442,7 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
}
void TurboAssembler::Move(Register dst, Register src) {
- if (!dst.is(src)) {
+ if (dst != src) {
movp(dst, src);
}
}
@@ -2073,7 +1918,7 @@ void MacroAssembler::Pop(const Operand& dst) {
movp(scratch, Operand(rsp, 0));
movp(dst, scratch);
leal(rsp, Operand(rsp, 4));
- if (scratch.is(kRootRegister)) {
+ if (scratch == kRootRegister) {
// Restore kRootRegister.
InitializeRootRegister();
}
@@ -2131,7 +1976,7 @@ void TurboAssembler::Call(ExternalReference ext) {
LoadAddress(kScratchRegister, ext);
call(kScratchRegister);
#ifdef DEBUG
- CHECK_EQ(end_position, pc_offset());
+ DCHECK_EQ(end_position, pc_offset());
#endif
}
@@ -2151,7 +1996,7 @@ void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
Move(kScratchRegister, destination, rmode);
call(kScratchRegister);
#ifdef DEBUG
- CHECK_EQ(pc_offset(), end_position);
+ DCHECK_EQ(pc_offset(), end_position);
#endif
}
@@ -2162,7 +2007,7 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
call(code_object, rmode);
#ifdef DEBUG
- CHECK_EQ(end_position, pc_offset());
+ DCHECK_EQ(end_position, pc_offset());
#endif
}
@@ -2474,23 +2319,6 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
Immediate(static_cast<int8_t>(type)));
}
-void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
- Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
-
- CompareMap(obj, map);
- j(not_equal, fail);
-}
-
void TurboAssembler::SlowTruncateToIDelayed(Zone* zone, Register result_reg,
Register input_reg, int offset) {
CallStubDelayed(
@@ -2577,7 +2405,7 @@ void MacroAssembler::AssertFixedArray(Register object) {
void TurboAssembler::AssertZeroExtended(Register int32_register) {
if (emit_debug_code()) {
- DCHECK(!int32_register.is(kScratchRegister));
+ DCHECK(int32_register != kScratchRegister);
movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
cmpq(kScratchRegister, int32_register);
Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
@@ -2657,14 +2485,6 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
bind(&done);
}
-void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand counter_operand = ExternalOperand(ExternalReference(counter));
- movl(counter_operand, Immediate(value));
- }
-}
-
-
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
@@ -2787,7 +2607,7 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag) {
- DCHECK(function.is(rdi));
+ DCHECK(function == rdi);
movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
InvokeFunctionCode(rdi, new_target, expected, actual, flag);
}
@@ -2798,8 +2618,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
InvokeFlag flag) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- DCHECK(function.is(rdi));
- DCHECK_IMPLIES(new_target.is_valid(), new_target.is(rdx));
+ DCHECK(function == rdi);
+ DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx);
// On function call, call into the debugger if necessary.
CheckDebugHook(function, new_target, expected, actual);
@@ -2863,14 +2683,14 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Set(rax, actual.immediate());
cmpp(expected.reg(), Immediate(actual.immediate()));
j(equal, &invoke, Label::kNear);
- DCHECK(expected.reg().is(rbx));
- } else if (!expected.reg().is(actual.reg())) {
+ DCHECK(expected.reg() == rbx);
+ } else if (expected.reg() != actual.reg()) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
cmpp(expected.reg(), actual.reg());
j(equal, &invoke, Label::kNear);
- DCHECK(actual.reg().is(rax));
- DCHECK(expected.reg().is(rbx));
+ DCHECK(actual.reg() == rax);
+ DCHECK(expected.reg() == rbx);
} else {
definitely_matches = true;
Move(rax, actual.reg());
@@ -3030,7 +2850,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
#endif
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
+ int space = XMMRegister::kNumRegisters * kDoubleSize +
arg_stack_space * kRegisterSize;
subp(rsp, Immediate(space));
int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
@@ -3135,168 +2955,6 @@ void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
}
-void MacroAssembler::LoadAllocationTopHelper(Register result,
- Register scratch,
- AllocationFlags flags) {
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- // Just return if allocation top is already known.
- if ((flags & RESULT_CONTAINS_TOP) != 0) {
- // No use of scratch if allocation top is provided.
- DCHECK(!scratch.is_valid());
-#ifdef DEBUG
- // Assert that result actually contains top on entry.
- Operand top_operand = ExternalOperand(allocation_top);
- cmpp(result, top_operand);
- Check(equal, kUnexpectedAllocationTop);
-#endif
- return;
- }
-
- // Move address of new object to result. Use scratch register if available,
- // and keep address in scratch until call to UpdateAllocationTopHelper.
- if (scratch.is_valid()) {
- LoadAddress(scratch, allocation_top);
- movp(result, Operand(scratch, 0));
- } else {
- Load(result, allocation_top);
- }
-}
-
-
-void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- if (kPointerSize == kDoubleSize) {
- if (FLAG_debug_code) {
- testl(result, Immediate(kDoubleAlignmentMask));
- Check(zero, kAllocationIsNotDoubleAligned);
- }
- } else {
- // Align the next allocation. Storing the filler map without checking top
- // is safe in new-space because the limit of the heap is aligned there.
- DCHECK(kPointerSize * 2 == kDoubleSize);
- DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- // Make sure scratch is not clobbered by this function as it might be
- // used in UpdateAllocationTopHelper later.
- DCHECK(!scratch.is(kScratchRegister));
- Label aligned;
- testl(result, Immediate(kDoubleAlignmentMask));
- j(zero, &aligned, Label::kNear);
- if ((flags & PRETENURE) != 0) {
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- cmpp(result, ExternalOperand(allocation_limit));
- j(above_equal, gc_required);
- }
- LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
- movp(Operand(result, 0), kScratchRegister);
- addp(result, Immediate(kDoubleSize / 2));
- bind(&aligned);
- }
-}
-
-
-void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
- Register scratch,
- AllocationFlags flags) {
- if (emit_debug_code()) {
- testp(result_end, Immediate(kObjectAlignmentMask));
- Check(zero, kUnalignedAllocationInNewSpace);
- }
-
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- // Update new top.
- if (scratch.is_valid()) {
- // Scratch already contains address of allocation top.
- movp(Operand(scratch, 0), result_end);
- } else {
- Store(allocation_top, result_end);
- }
-}
-
-
-void MacroAssembler::Allocate(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- DCHECK(object_size <= kMaxRegularHeapObjectSize);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- movl(result, Immediate(0x7091));
- if (result_end.is_valid()) {
- movl(result_end, Immediate(0x7191));
- }
- if (scratch.is_valid()) {
- movl(scratch, Immediate(0x7291));
- }
- }
- jmp(gc_required);
- return;
- }
- DCHECK(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
- }
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
- Register top_reg = result_end.is_valid() ? result_end : result;
-
- if (!top_reg.is(result)) {
- movp(top_reg, result);
- }
- addp(top_reg, Immediate(object_size));
- Operand limit_operand = ExternalOperand(allocation_limit);
- cmpp(top_reg, limit_operand);
- j(above, gc_required);
-
- UpdateAllocationTopHelper(top_reg, scratch, flags);
-
- if (top_reg.is(result)) {
- subp(result, Immediate(object_size - kHeapObjectTag));
- } else {
- // Tag the result.
- DCHECK(kHeapObjectTag == 1);
- incp(result);
- }
-}
-
-void MacroAssembler::AllocateJSValue(Register result, Register constructor,
- Register value, Register scratch,
- Label* gc_required) {
- DCHECK(!result.is(constructor));
- DCHECK(!result.is(scratch));
- DCHECK(!result.is(value));
-
- // Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch, no_reg, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Initialize the JSValue.
- LoadGlobalFunctionInitialMap(constructor, scratch);
- movp(FieldOperand(result, HeapObject::kMapOffset), scratch);
- LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- movp(FieldOperand(result, JSObject::kPropertiesOrHashOffset), scratch);
- movp(FieldOperand(result, JSObject::kElementsOffset), scratch);
- movp(FieldOperand(result, JSValue::kValueOffset), value);
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-}
-
#ifdef _WIN64
static const int kRegisterPassedArguments = 4;
#else
@@ -3310,20 +2968,6 @@ void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
}
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map) {
- // Load the initial map. The global functions all have initial maps.
- movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
- jmp(&ok);
- bind(&fail);
- Abort(kGlobalFunctionsMustHaveInitialMap);
- bind(&ok);
- }
-}
-
int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
// On Windows 64 stack slots are reserved by the caller for all arguments
// including the ones passed in registers, and space is always allocated for
@@ -3433,7 +3077,7 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met,
Label::Distance condition_met_distance) {
DCHECK(cc == zero || cc == not_zero);
- if (scratch.is(object)) {
+ if (scratch == object) {
andp(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
movp(scratch, Immediate(~Page::kPageAlignmentMask));
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 4addee4ed6..c3c92745fc 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -14,40 +14,36 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {Register::kCode_rax};
-const Register kReturnRegister1 = {Register::kCode_rdx};
-const Register kReturnRegister2 = {Register::kCode_r8};
-const Register kJSFunctionRegister = {Register::kCode_rdi};
-const Register kContextRegister = {Register::kCode_rsi};
-const Register kAllocateSizeRegister = {Register::kCode_rdx};
-const Register kInterpreterAccumulatorRegister = {Register::kCode_rax};
-const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r12};
-const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r14};
-const Register kInterpreterDispatchTableRegister = {Register::kCode_r15};
-const Register kJavaScriptCallArgCountRegister = {Register::kCode_rax};
-const Register kJavaScriptCallNewTargetRegister = {Register::kCode_rdx};
-const Register kRuntimeCallFunctionRegister = {Register::kCode_rbx};
-const Register kRuntimeCallArgCountRegister = {Register::kCode_rax};
+constexpr Register kReturnRegister0 = rax;
+constexpr Register kReturnRegister1 = rdx;
+constexpr Register kReturnRegister2 = r8;
+constexpr Register kJSFunctionRegister = rdi;
+constexpr Register kContextRegister = rsi;
+constexpr Register kAllocateSizeRegister = rdx;
+constexpr Register kInterpreterAccumulatorRegister = rax;
+constexpr Register kInterpreterBytecodeOffsetRegister = r12;
+constexpr Register kInterpreterBytecodeArrayRegister = r14;
+constexpr Register kInterpreterDispatchTableRegister = r15;
+constexpr Register kJavaScriptCallArgCountRegister = rax;
+constexpr Register kJavaScriptCallNewTargetRegister = rdx;
+constexpr Register kRuntimeCallFunctionRegister = rbx;
+constexpr Register kRuntimeCallArgCountRegister = rax;
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
-const Register kScratchRegister = {10}; // r10.
-const XMMRegister kScratchDoubleReg = {15}; // xmm15.
-const Register kRootRegister = {13}; // r13 (callee save).
+constexpr Register kScratchRegister = r10;
+constexpr XMMRegister kScratchDoubleReg = xmm15;
+constexpr Register kRootRegister = r13; // callee save
// Actual value of root register is offset from the root array's start
// to take advantage of negitive 8-bit displacement values.
-const int kRootRegisterBias = 128;
+constexpr int kRootRegisterBias = 128;
// Convenience for platform-independent signatures.
typedef Operand MemOperand;
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum PointersToHereCheck {
- kPointersToHereMaybeInteresting,
- kPointersToHereAreAlwaysInteresting
-};
enum class SmiOperationConstraint {
kPreserveSourceRegister = 1 << 0,
@@ -86,13 +82,7 @@ struct SmiIndex {
class TurboAssembler : public Assembler {
public:
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ =
- Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
- }
- }
+ CodeObjectRequired create_code_object);
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() const { return has_frame_; }
@@ -414,18 +404,38 @@ class TurboAssembler : public Assembler {
addp(kRootRegister, Immediate(kRootRegisterBias));
}
+ void SaveRegisters(RegList registers);
+ void RestoreRegisters(RegList registers);
+
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode);
+
void MoveNumber(Register dst, double value);
void MoveNonSmi(Register dst, double value);
- // These functions do not arrange the registers in any particular order so
- // they are not useful for calls that can cause a GC. The caller can
- // exclude up to 3 registers that do not need to be saved and restored.
- void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
- void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ // Calculate how much stack space (in bytes) are required to store caller
+ // registers excluding those specified in the arguments.
+ int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg) const;
+
+ // PushCallerSaved and PopCallerSaved do not arrange the registers in any
+ // particular order so they are not useful for calls that can cause a GC.
+ // The caller can exclude up to 3 registers that do not need to be saved and
+ // restored.
+
+ // Push caller saved registers on the stack, and return the number of bytes
+ // stack pointer is adjusted.
+ int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
+ // Restore caller saved registers from the stack, and return the number of
+ // bytes stack pointer is adjusted.
+ int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
protected:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -494,9 +504,6 @@ class MacroAssembler : public TurboAssembler {
// Load a root value where the index (or part of it) is variable.
// The variable_offset register is added to the fixed_offset value
// to get the index into the root-array.
- void LoadRootIndexed(Register destination,
- Register variable_offset,
- int fixed_offset);
void PushRoot(Heap::RootListIndex index);
// Compare the object in a register to a value and jump if they are equal.
@@ -531,19 +538,12 @@ class MacroAssembler : public TurboAssembler {
// GC Support
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
-
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
+ Register addr, Register scratch,
+ SaveFPRegsMode save_fp);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
@@ -579,43 +579,10 @@ class MacroAssembler : public TurboAssembler {
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
-
- // As above, but the offset has the tag presubtracted. For use with
- // Operand(reg, off).
- void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
+ Register object, int offset, Register value, Register scratch,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- save_fp,
- remembered_set_action,
- smi_check,
- pointers_to_here_check_for_value);
- }
-
- void RecordWriteForMap(
- Register object,
- Register map,
- Register dst,
- SaveFPRegsMode save_fp);
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// For page containing |object| mark region covering |address|
// dirty. |object| is the object being stored into, |value| is the
@@ -623,14 +590,9 @@ class MacroAssembler : public TurboAssembler {
// operation. RecordWrite filters out smis so it does not update
// the write barrier if the value is a smi.
void RecordWrite(
- Register object,
- Register address,
- Register value,
- SaveFPRegsMode save_fp,
+ Register object, Register address, Register value, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// Frame restart support.
void MaybeDropFrames();
@@ -725,13 +687,6 @@ class MacroAssembler : public TurboAssembler {
// Functions performing a check on a known or potential smi. Returns
// a condition that is satisfied if the check is successful.
- // Are both values tagged smis.
- Condition CheckBothSmi(Register first, Register second);
-
- // Are either value a tagged smi.
- Condition CheckEitherSmi(Register first,
- Register second,
- Register scratch = kScratchRegister);
// Test-and-jump functions. Typically combines a check function
// above with a conditional jump.
@@ -744,12 +699,6 @@ class MacroAssembler : public TurboAssembler {
void JumpIfNotSmi(Operand src, Label* on_not_smi,
Label::Distance near_jump = Label::kFar);
- // Jump if either or both register are not smi values.
- void JumpIfNotBothSmi(Register src1,
- Register src2,
- Label* on_not_both_smi,
- Label::Distance near_jump = Label::kFar);
-
// Operations on tagged smi values.
// Smis represent a subset of integers. The subset is always equivalent to
@@ -822,14 +771,6 @@ class MacroAssembler : public TurboAssembler {
// Specialized operations
- // Select the non-smi register of two registers where exactly one is a
- // smi. If neither are smis, jump to the failure label.
- void SelectNonSmi(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smis,
- Label::Distance near_jump = Label::kFar);
-
// Converts, if necessary, a smi to a combination of number and
// multiplier to be used as a scaled index.
// The src register contains a *positive* smi value. The shift is the
@@ -843,16 +784,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// String macros.
- void JumpIfNotBothSequentialOneByteStrings(
- Register first_object, Register second_object, Register scratch1,
- Register scratch2, Label* on_not_both_flat_one_byte,
- Label::Distance near_jump = Label::kFar);
-
- void JumpIfBothInstanceTypesAreNotSequentialOneByte(
- Register first_object_instance_type, Register second_object_instance_type,
- Register scratch1, Register scratch2, Label* on_fail,
- Label::Distance near_jump = Label::kFar);
-
// Checks if the given register or operand is a unique name
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
Label::Distance distance = Label::kFar);
@@ -974,18 +905,6 @@ class MacroAssembler : public TurboAssembler {
// Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
- // Compare an object's map with the specified map.
- void CompareMap(Register obj, Handle<Map> map);
-
- // Check if the map of an object is equal to a specified map and branch to
- // label if not. Skip the smi check if not required (object is known to be a
- // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type);
-
void DoubleToI(Register result_reg, XMMRegister input_reg,
XMMRegister scratch, MinusZeroMode minus_zero_mode,
Label* lost_precision, Label* is_nan, Label* minus_zero,
@@ -1040,31 +959,6 @@ class MacroAssembler : public TurboAssembler {
void PopStackHandler();
// ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space or old space. If the given space
- // is exhausted control continues at the gc_required label. The allocated
- // object is returned in result and end of the new object is returned in
- // result_end. The register scratch can be passed as no_reg in which case
- // an additional object reference will be added to the reloc info. The
- // returned pointers in result and result_end have not yet been tagged as
- // heap objects. If result_contains_top_on_entry is true the content of
- // result is known to be the allocation top on entry (could be result_end
- // from a previous call). If result_contains_top_on_entry is true scratch
- // should be no_reg as it is never used.
- void Allocate(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- // Allocate and initialize a JSValue wrapper with the specified {constructor}
- // and {value}.
- void AllocateJSValue(Register result, Register constructor, Register value,
- Register scratch, Label* gc_required);
-
- // ---------------------------------------------------------------------------
// Support functions.
// Machine code version of Map::GetConstructor().
@@ -1079,10 +973,6 @@ class MacroAssembler : public TurboAssembler {
// Load the native context slot with the current index.
void LoadNativeContextSlot(int index, Register dst);
- // Load the initial map from the global function. The registers
- // function and map can be the same.
- void LoadGlobalFunctionInitialMap(Register function, Register map);
-
// ---------------------------------------------------------------------------
// Runtime calls
@@ -1121,8 +1011,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// StatsCounter support
-
- void SetCounter(StatsCounter* counter, int value);
void IncrementCounter(StatsCounter* counter, int value);
void DecrementCounter(StatsCounter* counter, int value);
@@ -1157,25 +1045,6 @@ class MacroAssembler : public TurboAssembler {
void LeaveExitFrameEpilogue(bool restore_context);
- // Allocation support helpers.
- // Loads the top of new-space into the result register.
- // Otherwise the address of the new-space top is loaded into scratch (if
- // scratch is valid), and the new-space top is loaded into result.
- void LoadAllocationTopHelper(Register result,
- Register scratch,
- AllocationFlags flags);
-
- void MakeSureDoubleAlignedHelper(Register result,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- // Update allocation top with value in result_end register.
- // If scratch is valid, it contains the address of the allocation top.
- void UpdateAllocationTopHelper(Register result_end,
- Register scratch,
- AllocationFlags flags);
-
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
Register scratch,
diff --git a/deps/v8/src/zone/zone-list-inl.h b/deps/v8/src/zone/zone-list-inl.h
new file mode 100644
index 0000000000..efae3971a3
--- /dev/null
+++ b/deps/v8/src/zone/zone-list-inl.h
@@ -0,0 +1,164 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ZONE_ZONE_LIST_INL_H_
+#define V8_ZONE_ZONE_LIST_INL_H_
+
+#include "src/zone/zone.h"
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+void ZoneList<T>::Add(const T& element, Zone* zone) {
+ if (length_ < capacity_) {
+ data_[length_++] = element;
+ } else {
+ ZoneList<T>::ResizeAdd(element, ZoneAllocationPolicy(zone));
+ }
+}
+
+template <typename T>
+void ZoneList<T>::AddAll(const ZoneList<T>& other, Zone* zone) {
+ AddAll(other.ToVector(), zone);
+}
+
+template <typename T>
+void ZoneList<T>::AddAll(const Vector<T>& other, Zone* zone) {
+ int result_length = length_ + other.length();
+ if (capacity_ < result_length)
+ Resize(result_length, ZoneAllocationPolicy(zone));
+ if (std::is_fundamental<T>()) {
+ memcpy(data_ + length_, other.start(), sizeof(*data_) * other.length());
+ } else {
+ for (int i = 0; i < other.length(); i++) data_[length_ + i] = other.at(i);
+ }
+ length_ = result_length;
+}
+
+// Use two layers of inlining so that the non-inlined function can
+// use the same implementation as the inlined version.
+template <typename T>
+void ZoneList<T>::ResizeAdd(const T& element, ZoneAllocationPolicy alloc) {
+ ResizeAddInternal(element, alloc);
+}
+
+template <typename T>
+void ZoneList<T>::ResizeAddInternal(const T& element,
+ ZoneAllocationPolicy alloc) {
+ DCHECK(length_ >= capacity_);
+ // Grow the list capacity by 100%, but make sure to let it grow
+ // even when the capacity is zero (possible initial case).
+ int new_capacity = 1 + 2 * capacity_;
+ // Since the element reference could be an element of the list, copy
+ // it out of the old backing storage before resizing.
+ T temp = element;
+ Resize(new_capacity, alloc);
+ data_[length_++] = temp;
+}
+
+template <typename T>
+void ZoneList<T>::Resize(int new_capacity, ZoneAllocationPolicy alloc) {
+ DCHECK_LE(length_, new_capacity);
+ T* new_data = NewData(new_capacity, alloc);
+ MemCopy(new_data, data_, length_ * sizeof(T));
+ ZoneList<T>::DeleteData(data_);
+ data_ = new_data;
+ capacity_ = new_capacity;
+}
+
+template <typename T>
+Vector<T> ZoneList<T>::AddBlock(T value, int count, Zone* zone) {
+ int start = length_;
+ for (int i = 0; i < count; i++) Add(value, zone);
+ return Vector<T>(&data_[start], count);
+}
+
+template <typename T>
+void ZoneList<T>::Set(int index, const T& elm) {
+ DCHECK(index >= 0 && index <= length_);
+ data_[index] = elm;
+}
+
+template <typename T>
+void ZoneList<T>::InsertAt(int index, const T& elm, Zone* zone) {
+ DCHECK(index >= 0 && index <= length_);
+ Add(elm, zone);
+ for (int i = length_ - 1; i > index; --i) {
+ data_[i] = data_[i - 1];
+ }
+ data_[index] = elm;
+}
+
+template <typename T>
+T ZoneList<T>::Remove(int i) {
+ T element = at(i);
+ length_--;
+ while (i < length_) {
+ data_[i] = data_[i + 1];
+ i++;
+ }
+ return element;
+}
+
+template <typename T>
+void ZoneList<T>::Clear() {
+ DeleteData(data_);
+ // We don't call Initialize(0) since that requires passing a Zone,
+ // which we don't really need.
+ data_ = NULL;
+ capacity_ = 0;
+ length_ = 0;
+}
+
+template <typename T>
+void ZoneList<T>::Rewind(int pos) {
+ DCHECK(0 <= pos && pos <= length_);
+ length_ = pos;
+}
+
+template <typename T>
+template <class Visitor>
+void ZoneList<T>::Iterate(Visitor* visitor) {
+ for (int i = 0; i < length_; i++) visitor->Apply(&data_[i]);
+}
+
+template <typename T>
+bool ZoneList<T>::Contains(const T& elm) const {
+ for (int i = 0; i < length_; i++) {
+ if (data_[i] == elm) return true;
+ }
+ return false;
+}
+
+template <typename T>
+template <typename CompareFunction>
+void ZoneList<T>::Sort(CompareFunction cmp) {
+ ToVector().Sort(cmp, 0, length_);
+#ifdef DEBUG
+ for (int i = 1; i < length_; i++) {
+ DCHECK_LE(cmp(&data_[i - 1], &data_[i]), 0);
+ }
+#endif
+}
+
+template <typename T>
+template <typename CompareFunction>
+void ZoneList<T>::StableSort(CompareFunction cmp, size_t s, size_t l) {
+ ToVector().StableSort(cmp, s, l);
+#ifdef DEBUG
+ for (size_t i = s + 1; i < l; i++) {
+ DCHECK_LE(cmp(&data_[i - 1], &data_[i]), 0);
+ }
+#endif
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ZONE_ZONE_LIST_INL_H_
diff --git a/deps/v8/src/zone/zone.h b/deps/v8/src/zone/zone.h
index c916972dcf..ba79cfa666 100644
--- a/deps/v8/src/zone/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -10,7 +10,6 @@
#include "src/base/hashmap.h"
#include "src/base/logging.h"
#include "src/globals.h"
-#include "src/list.h"
#include "src/splay-tree.h"
#include "src/zone/accounting-allocator.h"
@@ -144,63 +143,146 @@ class ZoneAllocationPolicy final {
Zone* zone_;
};
+template <typename T>
+class Vector;
+
// ZoneLists are growable lists with constant-time access to the
// elements. The list itself and all its elements are allocated in the
// Zone. ZoneLists cannot be deleted individually; you can delete all
// objects in the Zone by calling Zone::DeleteAll().
template <typename T>
-class ZoneList final : public List<T, ZoneAllocationPolicy> {
+class ZoneList final {
public:
// Construct a new ZoneList with the given capacity; the length is
// always zero. The capacity must be non-negative.
- ZoneList(int capacity, Zone* zone)
- : List<T, ZoneAllocationPolicy>(capacity, ZoneAllocationPolicy(zone)) {}
-
+ ZoneList(int capacity, Zone* zone) { Initialize(capacity, zone); }
// Construct a new ZoneList from a std::initializer_list
- ZoneList(std::initializer_list<T> list, Zone* zone)
- : List<T, ZoneAllocationPolicy>(static_cast<int>(list.size()),
- ZoneAllocationPolicy(zone)) {
+ ZoneList(std::initializer_list<T> list, Zone* zone) {
+ Initialize(static_cast<int>(list.size()), zone);
for (auto& i : list) Add(i, zone);
}
-
- void* operator new(size_t size, Zone* zone) { return zone->New(size); }
-
// Construct a new ZoneList by copying the elements of the given ZoneList.
- ZoneList(const ZoneList<T>& other, Zone* zone)
- : List<T, ZoneAllocationPolicy>(other.length(),
- ZoneAllocationPolicy(zone)) {
+ ZoneList(const ZoneList<T>& other, Zone* zone) {
+ Initialize(other.length(), zone);
AddAll(other, zone);
}
- // We add some convenience wrappers so that we can pass in a Zone
- // instead of a (less convenient) ZoneAllocationPolicy.
- void Add(const T& element, Zone* zone) {
- List<T, ZoneAllocationPolicy>::Add(element, ZoneAllocationPolicy(zone));
- }
- void AddAll(const List<T, ZoneAllocationPolicy>& other, Zone* zone) {
- List<T, ZoneAllocationPolicy>::AddAll(other, ZoneAllocationPolicy(zone));
- }
- void AddAll(const Vector<T>& other, Zone* zone) {
- List<T, ZoneAllocationPolicy>::AddAll(other, ZoneAllocationPolicy(zone));
- }
- void InsertAt(int index, const T& element, Zone* zone) {
- List<T, ZoneAllocationPolicy>::InsertAt(index, element,
- ZoneAllocationPolicy(zone));
+ INLINE(~ZoneList()) { DeleteData(data_); }
+
+ // Please the MSVC compiler. We should never have to execute this.
+ INLINE(void operator delete(void* p, ZoneAllocationPolicy allocator)) {
+ UNREACHABLE();
}
- Vector<T> AddBlock(T value, int count, Zone* zone) {
- return List<T, ZoneAllocationPolicy>::AddBlock(value, count,
- ZoneAllocationPolicy(zone));
+
+ void* operator new(size_t size, Zone* zone) { return zone->New(size); }
+
+ // Returns a reference to the element at index i. This reference is not safe
+ // to use after operations that can change the list's backing store
+ // (e.g. Add).
+ inline T& operator[](int i) const {
+ DCHECK_LE(0, i);
+ DCHECK_GT(static_cast<unsigned>(length_), static_cast<unsigned>(i));
+ return data_[i];
}
- void Allocate(int length, Zone* zone) {
- List<T, ZoneAllocationPolicy>::Allocate(length, ZoneAllocationPolicy(zone));
+ inline T& at(int i) const { return operator[](i); }
+ inline T& last() const { return at(length_ - 1); }
+ inline T& first() const { return at(0); }
+
+ typedef T* iterator;
+ inline iterator begin() const { return &data_[0]; }
+ inline iterator end() const { return &data_[length_]; }
+
+ INLINE(bool is_empty() const) { return length_ == 0; }
+ INLINE(int length() const) { return length_; }
+ INLINE(int capacity() const) { return capacity_; }
+
+ Vector<T> ToVector() const { return Vector<T>(data_, length_); }
+
+ Vector<const T> ToConstVector() const {
+ return Vector<const T>(data_, length_);
}
- void Initialize(int capacity, Zone* zone) {
- List<T, ZoneAllocationPolicy>::Initialize(capacity,
- ZoneAllocationPolicy(zone));
+
+ INLINE(void Initialize(int capacity, Zone* zone)) {
+ DCHECK_GE(capacity, 0);
+ data_ =
+ (capacity > 0) ? NewData(capacity, ZoneAllocationPolicy(zone)) : NULL;
+ capacity_ = capacity;
+ length_ = 0;
}
+ // Adds a copy of the given 'element' to the end of the list,
+ // expanding the list if necessary.
+ void Add(const T& element, Zone* zone);
+ // Add all the elements from the argument list to this list.
+ void AddAll(const ZoneList<T>& other, Zone* zone);
+ // Add all the elements from the vector to this list.
+ void AddAll(const Vector<T>& other, Zone* zone);
+ // Inserts the element at the specific index.
+ void InsertAt(int index, const T& element, Zone* zone);
+
+ // Added 'count' elements with the value 'value' and returns a
+ // vector that allows access to the elements. The vector is valid
+ // until the next change is made to this list.
+ Vector<T> AddBlock(T value, int count, Zone* zone);
+
+ // Overwrites the element at the specific index.
+ void Set(int index, const T& element);
+
+ // Removes the i'th element without deleting it even if T is a
+ // pointer type; moves all elements above i "down". Returns the
+ // removed element. This function's complexity is linear in the
+ // size of the list.
+ T Remove(int i);
+
+ // Removes the last element without deleting it even if T is a
+ // pointer type. Returns the removed element.
+ INLINE(T RemoveLast()) { return Remove(length_ - 1); }
+
+ // Clears the list by freeing the storage memory. If you want to keep the
+ // memory, use Rewind(0) instead. Be aware, that even if T is a
+ // pointer type, clearing the list doesn't delete the entries.
+ INLINE(void Clear());
+
+ // Drops all but the first 'pos' elements from the list.
+ INLINE(void Rewind(int pos));
+
+ inline bool Contains(const T& elm) const;
+
+ // Iterate through all list entries, starting at index 0.
+ template <class Visitor>
+ void Iterate(Visitor* visitor);
+
+ // Sort all list entries (using QuickSort)
+ template <typename CompareFunction>
+ void Sort(CompareFunction cmp);
+ template <typename CompareFunction>
+ void StableSort(CompareFunction cmp, size_t start, size_t length);
+
void operator delete(void* pointer) { UNREACHABLE(); }
void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
+
+ private:
+ T* data_;
+ int capacity_;
+ int length_;
+
+ INLINE(T* NewData(int n, ZoneAllocationPolicy allocator)) {
+ return static_cast<T*>(allocator.New(n * sizeof(T)));
+ }
+ INLINE(void DeleteData(T* data)) { ZoneAllocationPolicy::Delete(data); }
+
+ // Increase the capacity of a full list, and add an element.
+ // List must be full already.
+ void ResizeAdd(const T& element, ZoneAllocationPolicy allocator);
+
+ // Inlined implementation of ResizeAdd, shared by inlined and
+ // non-inlined versions of ResizeAdd.
+ void ResizeAddInternal(const T& element, ZoneAllocationPolicy allocator);
+
+ // Resize the list.
+ void Resize(int new_capacity, ZoneAllocationPolicy allocator);
+
+ DISALLOW_COPY_AND_ASSIGN(ZoneList);
};
// A zone splay tree. The config type parameter encapsulates the
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index fdb281140f..3dae59bac2 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -6,6 +6,34 @@ import("../../gni/v8.gni")
v8_executable("cctest") {
testonly = true
+ deps = [
+ ":cctest_sources",
+ ]
+ configs = [
+ "../..:external_config",
+ "../..:internal_config_base",
+ ]
+
+ ldflags = []
+
+ # TODO(machenbach): Translate from gyp.
+ #["OS=="aix"", {
+ # "ldflags": [ "-Wl,-bbigtoc" ],
+ #}],
+
+ # crbug.com/676417: Suppress symbol import warning from linker.
+ if (is_win && is_component_build) {
+ ldflags += [
+ "/ignore:4217",
+ "/ignore:4049",
+ ]
+ remove_configs = [ "//build/config/win:default_incremental_linking" ]
+ configs += [ "//build/config/win:no_incremental_linking" ]
+ }
+}
+
+v8_source_set("cctest_sources") {
+ testonly = true
sources = [
"$target_gen_dir/resources.cc",
@@ -61,7 +89,6 @@ v8_executable("cctest") {
"compiler/test-run-wasm-machops.cc",
"compiler/value-helper.h",
"expression-type-collector-macros.h",
- "ffi/test-ffi.cc",
"gay-fixed.cc",
"gay-fixed.h",
"gay-precision.cc",
@@ -119,7 +146,6 @@ v8_executable("cctest") {
"test-bignum.cc",
"test-bit-vector.cc",
"test-circular-queue.cc",
- "test-code-cache.cc",
"test-code-layout.cc",
"test-code-stub-assembler.cc",
"test-compiler.cc",
@@ -150,7 +176,6 @@ v8_executable("cctest") {
"test-identity-map.cc",
"test-inobject-slack-tracking.cc",
"test-intl.cc",
- "test-list.cc",
"test-liveedit.cc",
"test-lockers.cc",
"test-log.cc",
@@ -200,13 +225,27 @@ v8_executable("cctest") {
"wasm/test-run-wasm-relocation.cc",
"wasm/test-run-wasm-simd.cc",
"wasm/test-run-wasm.cc",
+ "wasm/test-streaming-compilation.cc",
"wasm/test-wasm-breakpoints.cc",
"wasm/test-wasm-interpreter-entry.cc",
"wasm/test-wasm-stack.cc",
"wasm/test-wasm-trap-position.cc",
+ "wasm/wasm-run-utils.cc",
"wasm/wasm-run-utils.h",
]
+ if (use_jumbo_build) {
+ jumbo_excluded_sources = [
+ # TODO(mostynb@opera.com): figure out the jumbo issues with these source
+ # files, and include them in jumbo compilation units.
+ "interpreter/bytecode-expectations-printer.cc",
+ "interpreter/bytecode-expectations-printer.h",
+ "interpreter/test-bytecode-generator.cc",
+ "test-api.cc",
+ "test-api.h",
+ ]
+ }
+
if (v8_current_cpu == "arm") {
sources += [ ### gcmole(arch:arm) ###
"test-assembler-arm.cc",
@@ -318,14 +357,9 @@ v8_executable("cctest") {
"../..:internal_config_base",
]
- # TODO(machenbach): Translate from gyp.
- #["OS=="aix"", {
- # "ldflags": [ "-Wl,-bbigtoc" ],
- #}],
-
- deps = [
+ public_deps = [
":resources",
- "../..:v8_builtins_generators",
+ "../..:v8_initializers",
"../..:v8_libbase",
"../..:v8_libplatform",
"../..:wasm_module_runner",
@@ -334,6 +368,7 @@ v8_executable("cctest") {
]
defines = []
+ deps = []
if (is_component_build) {
# cctest can't be built against a shared library, so we
@@ -351,18 +386,6 @@ v8_executable("cctest") {
}
cflags = []
- ldflags = []
-
- # crbug.com/676417: Suppress symbol import warning from linker.
- if (is_win && is_component_build) {
- ldflags += [
- "/ignore:4217",
- "/ignore:4049",
- ]
- remove_configs = [ "//build/config/win:default_incremental_linking" ]
- configs += [ "//build/config/win:no_incremental_linking" ]
- }
-
if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" ||
v8_current_cpu == "arm" || v8_current_cpu == "arm64" ||
v8_current_cpu == "s390" || v8_current_cpu == "s390x" ||
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 9e2e2ad875..ffcd865531 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -79,7 +79,6 @@
'cctest.cc',
'cctest.h',
'expression-type-collector-macros.h',
- 'ffi/test-ffi.cc',
'gay-fixed.cc',
'gay-fixed.h',
'gay-precision.cc',
@@ -137,7 +136,6 @@
'test-bignum-dtoa.cc',
'test-bit-vector.cc',
'test-circular-queue.cc',
- 'test-code-cache.cc',
'test-code-layout.cc',
'test-code-stub-assembler.cc',
'test-compiler.cc',
@@ -168,7 +166,6 @@
'test-identity-map.cc',
'test-intl.cc',
'test-inobject-slack-tracking.cc',
- 'test-list.cc',
'test-liveedit.cc',
'test-lockers.cc',
'test-log.cc',
@@ -209,6 +206,7 @@
'types-fuzz.h',
'unicode-helpers.h',
'wasm/test-c-wasm-entry.cc',
+ 'wasm/test-streaming-compilation.cc',
'wasm/test-run-wasm.cc',
'wasm/test-run-wasm-64.cc',
'wasm/test-run-wasm-asmjs.cc',
@@ -222,6 +220,7 @@
'wasm/test-wasm-interpreter-entry.cc',
'wasm/test-wasm-stack.cc',
'wasm/test-wasm-trap-position.cc',
+ 'wasm/wasm-run-utils.cc',
'wasm/wasm-run-utils.h',
],
'cctest_sources_ia32': [ ### gcmole(arch:ia32) ###
@@ -432,7 +431,7 @@
'dependencies': ['../../src/v8.gyp:v8'],
}],
['v8_use_snapshot=="true"', {
- 'dependencies': ['../../src/v8.gyp:v8_builtins_generators'],
+ 'dependencies': ['../../src/v8.gyp:v8_initializers'],
}],
],
},
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 513885cb9a..155d7393a0 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -33,9 +33,10 @@
#include "include/libplatform/libplatform.h"
#include "include/v8-platform.h"
#include "src/debug/debug-interface.h"
+#include "src/factory.h"
#include "src/flags.h"
#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects.h"
#include "src/utils.h"
#include "src/v8.h"
#include "src/zone/accounting-allocator.h"
@@ -677,6 +678,10 @@ class TestPlatform : public v8::Platform {
return old_platform_->MonotonicallyIncreasingTime();
}
+ double CurrentClockTimeMillis() override {
+ return old_platform_->CurrentClockTimeMillis();
+ }
+
void CallIdleOnForegroundThread(v8::Isolate* isolate,
v8::IdleTask* task) override {
old_platform_->CallIdleOnForegroundThread(isolate, task);
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 2c9117ae80..c82ca85ab8 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -198,6 +198,15 @@
}], # 'no_snap == True and system == windows'
##############################################################################
+['byteorder == big', {
+ # Skip WASM atomic tests on big-endian machines.
+ # There is no support to emulate atomic WASM operations on big-endian
+ # platforms, since this would require bit swapping as a part of atomic
+ # operations.
+ 'test-run-wasm-atomics/*': [SKIP],
+}], # 'byteorder == big'
+
+##############################################################################
['system == windows', {
# BUG(3331). Fails on windows.
@@ -340,6 +349,11 @@
}], # 'arch == ppc or arch == ppc64 or arch == s390 or arch == s390x'
##############################################################################
+['variant == stress_incremental_marking', {
+ 'test-heap-profiler/SamplingHeapProfiler': [SKIP],
+}], # variant == stress_incremental_marking
+
+##############################################################################
['variant == wasm_traps', {
'test-accessors/*': [SKIP],
'test-api-interceptors/*': [SKIP],
diff --git a/deps/v8/test/cctest/compiler/code-assembler-tester.h b/deps/v8/test/cctest/compiler/code-assembler-tester.h
index f49e8c55c9..8bfdb72ea1 100644
--- a/deps/v8/test/cctest/compiler/code-assembler-tester.h
+++ b/deps/v8/test/cctest/compiler/code-assembler-tester.h
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef V8_TEST_CCTEST_COMPILER_CODE_ASSEMBLER_TESTER_H_
+#define V8_TEST_CCTEST_COMPILER_CODE_ASSEMBLER_TESTER_H_
+
#include "src/compiler/code-assembler.h"
#include "src/handles.h"
#include "src/interface-descriptors.h"
@@ -18,23 +21,21 @@ class CodeAssemblerTester {
explicit CodeAssemblerTester(Isolate* isolate)
: zone_(isolate->allocator(), ZONE_NAME),
scope_(isolate),
- state_(isolate, &zone_, VoidDescriptor(isolate),
- Code::ComputeFlags(Code::STUB), "test") {}
+ state_(isolate, &zone_, VoidDescriptor(isolate), Code::STUB, "test") {}
// Test generating code for a JS function (e.g. builtins).
CodeAssemblerTester(Isolate* isolate, int parameter_count,
Code::Kind kind = Code::BUILTIN)
: zone_(isolate->allocator(), ZONE_NAME),
scope_(isolate),
- state_(isolate, &zone_, parameter_count, Code::ComputeFlags(kind),
- "test") {}
+ state_(isolate, &zone_, parameter_count, kind, "test") {}
// This constructor is intended to be used for creating code objects with
// specific flags.
- CodeAssemblerTester(Isolate* isolate, Code::Flags flags)
+ CodeAssemblerTester(Isolate* isolate, Code::Kind kind)
: zone_(isolate->allocator(), ZONE_NAME),
scope_(isolate),
- state_(isolate, &zone_, 0, flags, "test") {}
+ state_(isolate, &zone_, 0, kind, "test") {}
CodeAssemblerState* state() { return &state_; }
@@ -54,3 +55,5 @@ class CodeAssemblerTester {
} // namespace compiler
} // namespace internal
} // namespace v8
+
+#endif // V8_TEST_CCTEST_COMPILER_CODE_ASSEMBLER_TESTER_H_
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index ca20e6fb6d..ea1eee55f1 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -69,7 +69,7 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
CallDescriptor* call_descriptor = this->call_descriptor();
Graph* graph = this->graph();
CompilationInfo info(ArrayVector("testing"), main_isolate(), main_zone(),
- Code::ComputeFlags(Code::STUB));
+ Code::STUB);
code_ = Pipeline::GenerateCodeForTesting(&info, call_descriptor, graph,
schedule);
}
@@ -100,7 +100,7 @@ class BufferedRawMachineAssemblerTester
// parameters from memory. Thereby it is possible to pass 64 bit parameters
// to the IR graph.
Node* Parameter(size_t index) {
- CHECK(index < 4);
+ CHECK_GT(4, index);
return parameter_nodes_[index];
}
@@ -260,7 +260,7 @@ class BufferedRawMachineAssemblerTester<void>
// parameters from memory. Thereby it is possible to pass 64 bit parameters
// to the IR graph.
Node* Parameter(size_t index) {
- CHECK(index < 4);
+ CHECK_GT(4, index);
return parameter_nodes_[index];
}
diff --git a/deps/v8/test/cctest/compiler/function-tester.cc b/deps/v8/test/cctest/compiler/function-tester.cc
index 3a396ed361..facbd8eb55 100644
--- a/deps/v8/test/cctest/compiler/function-tester.cc
+++ b/deps/v8/test/cctest/compiler/function-tester.cc
@@ -4,6 +4,7 @@
#include "test/cctest/compiler/function-tester.h"
+#include "src/api.h"
#include "src/ast/ast-numbering.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
@@ -42,7 +43,7 @@ FunctionTester::FunctionTester(Handle<Code> code, int param_count)
flags_(0) {
CHECK(!code.is_null());
Compile(function);
- function->ReplaceCode(*code);
+ function->set_code(*code);
}
FunctionTester::FunctionTester(Handle<Code> code) : FunctionTester(code, 0) {}
@@ -139,8 +140,8 @@ Handle<JSFunction> FunctionTester::ForMachineGraph(Graph* graph,
Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
Handle<SharedFunctionInfo> shared(function->shared());
ParseInfo parse_info(shared);
- CompilationInfo info(parse_info.zone(), function->GetIsolate(),
- parse_info.script(), shared, function);
+ CompilationInfo info(parse_info.zone(), function->GetIsolate(), shared,
+ function);
if (flags_ & CompilationInfo::kInliningEnabled) {
info.MarkAsInliningEnabled();
@@ -155,7 +156,7 @@ Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
CHECK(!code.is_null());
info.dependencies()->Commit(code);
info.context()->native_context()->AddOptimizedCode(*code);
- function->ReplaceCode(*code);
+ function->set_code(*code);
return function;
}
@@ -164,12 +165,12 @@ Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
Handle<JSFunction> FunctionTester::CompileGraph(Graph* graph) {
Handle<SharedFunctionInfo> shared(function->shared());
ParseInfo parse_info(shared);
- CompilationInfo info(parse_info.zone(), function->GetIsolate(),
- parse_info.script(), shared, function);
+ CompilationInfo info(parse_info.zone(), function->GetIsolate(), shared,
+ function);
Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, graph);
CHECK(!code.is_null());
- function->ReplaceCode(*code);
+ function->set_code(*code);
return function;
}
diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.h b/deps/v8/test/cctest/compiler/graph-builder-tester.h
index a2436ad1d2..0de682e86e 100644
--- a/deps/v8/test/cctest/compiler/graph-builder-tester.h
+++ b/deps/v8/test/cctest/compiler/graph-builder-tester.h
@@ -281,7 +281,7 @@ class GraphBuilderTester : public HandleAndZoneScope,
CallDescriptor* desc =
Linkage::GetSimplifiedCDescriptor(zone, this->csig_);
CompilationInfo info(ArrayVector("testing"), main_isolate(), main_zone(),
- Code::ComputeFlags(Code::STUB));
+ Code::STUB);
code_ = Pipeline::GenerateCodeForTesting(&info, desc, graph());
#ifdef ENABLE_DISASSEMBLER
if (!code_.is_null() && FLAG_print_opt_code) {
diff --git a/deps/v8/test/cctest/compiler/test-code-assembler.cc b/deps/v8/test/cctest/compiler/test-code-assembler.cc
index 43834ace6d..8661a5eb5c 100644
--- a/deps/v8/test/cctest/compiler/test-code-assembler.cc
+++ b/deps/v8/test/cctest/compiler/test-code-assembler.cc
@@ -4,6 +4,8 @@
#include "src/code-factory.h"
#include "src/compiler/code-assembler.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/opcodes.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "test/cctest/compiler/code-assembler-tester.h"
@@ -289,7 +291,7 @@ TEST(VariableMergeBindFirst) {
m.Goto(&merge);
m.Bind(&merge);
CHECK(var1.value() != temp);
- CHECK(var1.value() != nullptr);
+ CHECK_NOT_NULL(var1.value());
m.Goto(&end);
m.Bind(&l2);
Node* temp2 = m.Int32Constant(2);
@@ -298,7 +300,7 @@ TEST(VariableMergeBindFirst) {
m.Goto(&merge);
m.Bind(&end);
CHECK(var1.value() != temp);
- CHECK(var1.value() != nullptr);
+ CHECK_NOT_NULL(var1.value());
}
TEST(VariableMergeSwitch) {
@@ -309,18 +311,23 @@ TEST(VariableMergeSwitch) {
Label l1(&m), l2(&m), default_label(&m);
Label* labels[] = {&l1, &l2};
int32_t values[] = {1, 2};
- Node* temp = m.Int32Constant(0);
- var1.Bind(temp);
+ Node* temp1 = m.Int32Constant(0);
+ var1.Bind(temp1);
m.Switch(m.Int32Constant(2), &default_label, values, labels, 2);
m.Bind(&l1);
- DCHECK_EQ(temp, var1.value());
- m.Return(temp);
+ CHECK_EQ(temp1, var1.value());
+ m.Return(temp1);
m.Bind(&l2);
- DCHECK_EQ(temp, var1.value());
- m.Return(temp);
+ CHECK_EQ(temp1, var1.value());
+ Node* temp2 = m.Int32Constant(7);
+ var1.Bind(temp2);
+ m.Goto(&default_label);
m.Bind(&default_label);
- DCHECK_EQ(temp, var1.value());
- m.Return(temp);
+ CHECK_EQ(IrOpcode::kPhi, var1.value()->opcode());
+ CHECK_EQ(2, var1.value()->op()->ValueInputCount());
+ CHECK_EQ(temp1, NodeProperties::GetValueInput(var1.value(), 0));
+ CHECK_EQ(temp2, NodeProperties::GetValueInput(var1.value(), 1));
+ m.Return(temp1);
}
TEST(SplitEdgeBranchMerge) {
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index d4856d5f45..3de36ac986 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -2,34 +2,155 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/assembler-inl.h"
+#include "src/base/utils/random-number-generator.h"
#include "src/codegen.h"
#include "src/compilation-info.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/instruction.h"
#include "src/compiler/linkage.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/function-tester.h"
namespace v8 {
namespace internal {
namespace compiler {
-class CodeGeneratorTester : public InitializedHandleScope {
+namespace {
+
+int GetSlotSizeInBytes(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kFloat32:
+ return kPointerSize;
+ case MachineRepresentation::kFloat64:
+ return kDoubleSize;
+ case MachineRepresentation::kSimd128:
+ return kSimd128Size;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+} // namespace
+
+// Wrapper around the CodeGenerator with the ability to randomly generate moves
+// and swaps which can then be executed. The `slots` map represents how many
+// slots should be allocated per representation. Parallel moves will then be
+// generated by randomly picking slots. Constants can be provided so that
+// parallel moves may use them.
+//
+// At the moment, only the following representations are tested:
+// - kTagged
+// - kFloat32
+// - kFloat64
+// - kSimd128
+// There is no need to test using Word32 or Word64 as they are the same as
+// Tagged as far as the code generator is concerned.
+class CodeGeneratorTester : public HandleAndZoneScope {
public:
- CodeGeneratorTester()
- : zone_(main_isolate()->allocator(), ZONE_NAME),
- info_(ArrayVector("test"), main_isolate(), &zone_,
- Code::ComputeFlags(Code::STUB)),
- descriptor_(Linkage::GetJSCallDescriptor(&zone_, false, 0,
- CallDescriptor::kNoFlags)),
+ CodeGeneratorTester(std::map<MachineRepresentation, int> slots =
+ std::map<MachineRepresentation, int>{},
+ std::initializer_list<Constant> constants = {})
+ : info_(ArrayVector("test"), main_isolate(), main_zone(), Code::STUB),
+ descriptor_(Linkage::GetStubCallDescriptor(
+ main_isolate(), main_zone(), VoidDescriptor(main_isolate()), 0,
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), 0)),
linkage_(descriptor_),
- blocks_(&zone_),
- sequence_(main_isolate(), &zone_, &blocks_),
+ blocks_(main_zone()),
+ sequence_(main_isolate(), main_zone(), &blocks_),
+ rng_(CcTest::random_number_generator()),
frame_(descriptor_->CalculateFixedFrameSize()),
- generator_(&zone_, &frame_, &linkage_, &sequence_, &info_,
+ generator_(main_zone(), &frame_, &linkage_, &sequence_, &info_,
base::Optional<OsrHelper>(), kNoSourcePosition, nullptr) {
- info_.set_prologue_offset(generator_.tasm()->pc_offset());
+ // Keep track of all supported representations depending on what kind of
+ // stack slots are supported.
+ for (const auto& slot : slots) {
+ supported_reps_.push_back(slot.first);
+ }
+ // Allocate new slots until we run out of them.
+ while (std::any_of(slots.cbegin(), slots.cend(),
+ [](const std::pair<MachineRepresentation, int>& entry) {
+ // True if there are slots left to allocate for this
+ // representation.
+ return entry.second > 0;
+ })) {
+ // Pick a random MachineRepresentation from supported_reps_.
+ MachineRepresentation rep = CreateRandomMachineRepresentation();
+ auto entry = slots.find(rep);
+ DCHECK(entry != slots.end());
+ // We may have picked a representation for which all slots have already
+ // been allocated.
+ if (entry->second > 0) {
+ // Keep a map of (MachineRepresentation . std::vector<int>) with
+ // allocated slots to pick from for each representation.
+ RegisterSlot(rep, frame_.AllocateSpillSlot(GetSlotSizeInBytes(rep)));
+ entry->second--;
+ }
+ }
+ for (auto constant : constants) {
+ int virtual_register = AllocateConstant(constant);
+ // Associate constants with their compatible representations.
+ // TODO(all): Test all types of constants.
+ switch (constant.type()) {
+ // Integer constants are always moved to a tagged location, whatever
+ // their sizes.
+ case Constant::kInt32:
+ case Constant::kInt64:
+ RegisterConstant(MachineRepresentation::kTagged, virtual_register);
+ break;
+ // FP constants may be moved to a tagged location using a heap number,
+ // or directly to a location of the same size.
+ case Constant::kFloat32:
+ RegisterConstant(MachineRepresentation::kTagged, virtual_register);
+ RegisterConstant(MachineRepresentation::kFloat32, virtual_register);
+ break;
+ case Constant::kFloat64:
+ RegisterConstant(MachineRepresentation::kTagged, virtual_register);
+ RegisterConstant(MachineRepresentation::kFloat64, virtual_register);
+ break;
+ default:
+ break;
+ }
+ }
+ // Force a frame to be created.
+ generator_.frame_access_state()->MarkHasFrame(true);
+ generator_.AssembleConstructFrame();
+ // TODO(all): Generate a stack check here so that we fail gracefully if the
+ // frame is too big.
+ }
+
+ int AllocateConstant(Constant constant) {
+ int virtual_register = sequence_.NextVirtualRegister();
+ sequence_.AddConstant(virtual_register, constant);
+ return virtual_register;
+ }
+
+ // Register a constant referenced by `virtual_register` as compatible with
+ // `rep`.
+ void RegisterConstant(MachineRepresentation rep, int virtual_register) {
+ auto entry = constants_.find(rep);
+ if (entry == constants_.end()) {
+ std::vector<int> vregs = {virtual_register};
+ constants_.emplace(rep, vregs);
+ } else {
+ entry->second.push_back(virtual_register);
+ }
+ }
+
+ void RegisterSlot(MachineRepresentation rep, int slot) {
+ auto entry = allocated_slots_.find(rep);
+ if (entry == allocated_slots_.end()) {
+ std::vector<int> slots = {slot};
+ allocated_slots_.emplace(rep, slots);
+ } else {
+ entry->second.push_back(slot);
+ }
}
enum PushTypeFlag {
@@ -38,6 +159,124 @@ class CodeGeneratorTester : public InitializedHandleScope {
kScalarPush = CodeGenerator::kScalarPush
};
+ enum OperandConstraint {
+ kNone,
+ // Restrict operands to non-constants. This is useful when generating a
+ // destination.
+ kCannotBeConstant
+ };
+
+ // Generate parallel moves at random. Note that they may not be compatible
+ // between each other as this doesn't matter to the code generator.
+ ParallelMove* GenerateRandomMoves(int size) {
+ ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
+
+ for (int i = 0; i < size;) {
+ MachineRepresentation rep = CreateRandomMachineRepresentation();
+ MoveOperands mo(CreateRandomOperand(kNone, rep),
+ CreateRandomOperand(kCannotBeConstant, rep));
+ // It isn't valid to call `AssembleMove` and `AssembleSwap` with redundant
+ // moves.
+ if (mo.IsRedundant()) continue;
+ parallel_move->AddMove(mo.source(), mo.destination());
+ // Iterate only when a move was created.
+ i++;
+ }
+
+ return parallel_move;
+ }
+
+ ParallelMove* GenerateRandomSwaps(int size) {
+ ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
+
+ for (int i = 0; i < size;) {
+ MachineRepresentation rep = CreateRandomMachineRepresentation();
+ InstructionOperand lhs = CreateRandomOperand(kCannotBeConstant, rep);
+ InstructionOperand rhs = CreateRandomOperand(kCannotBeConstant, rep);
+ MoveOperands mo(lhs, rhs);
+ // It isn't valid to call `AssembleMove` and `AssembleSwap` with redundant
+ // moves.
+ if (mo.IsRedundant()) continue;
+ // Canonicalize the swap: the register operand has to be the left hand
+ // side.
+ if (lhs.IsStackSlot() || lhs.IsFPStackSlot()) {
+ std::swap(lhs, rhs);
+ }
+ parallel_move->AddMove(lhs, rhs);
+ // Iterate only when a swap was created.
+ i++;
+ }
+
+ return parallel_move;
+ }
+
+ MachineRepresentation CreateRandomMachineRepresentation() {
+ int index = rng_->NextInt(static_cast<int>(supported_reps_.size()));
+ return supported_reps_[index];
+ }
+
+ InstructionOperand CreateRandomOperand(OperandConstraint constraint,
+ MachineRepresentation rep) {
+ // Only generate a Constant if the operand is a source and we have a
+ // constant with a compatible representation in stock.
+ bool generate_constant = (constraint != kCannotBeConstant) &&
+ (constants_.find(rep) != constants_.end());
+ switch (rng_->NextInt(generate_constant ? 3 : 2)) {
+ case 0:
+ return CreateRandomStackSlotOperand(rep);
+ case 1:
+ return CreateRandomRegisterOperand(rep);
+ case 2:
+ return CreateRandomConstant(rep);
+ }
+ UNREACHABLE();
+ }
+
+ InstructionOperand CreateRandomRegisterOperand(MachineRepresentation rep) {
+ int code;
+ const RegisterConfiguration* conf = RegisterConfiguration::Default();
+ switch (rep) {
+ case MachineRepresentation::kFloat32: {
+ int index = rng_->NextInt(conf->num_allocatable_float_registers());
+ code = conf->RegisterConfiguration::GetAllocatableFloatCode(index);
+ break;
+ }
+ case MachineRepresentation::kFloat64: {
+ int index = rng_->NextInt(conf->num_allocatable_double_registers());
+ code = conf->RegisterConfiguration::GetAllocatableDoubleCode(index);
+ break;
+ }
+ case MachineRepresentation::kSimd128: {
+ int index = rng_->NextInt(conf->num_allocatable_simd128_registers());
+ code = conf->RegisterConfiguration::GetAllocatableSimd128Code(index);
+ break;
+ }
+ case MachineRepresentation::kTagged: {
+ // Pick an allocatable register that is not the return register.
+ do {
+ int index = rng_->NextInt(conf->num_allocatable_general_registers());
+ code = conf->RegisterConfiguration::GetAllocatableGeneralCode(index);
+ } while (code == kReturnRegister0.code());
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return AllocatedOperand(LocationOperand::REGISTER, rep, code);
+ }
+
+ InstructionOperand CreateRandomStackSlotOperand(MachineRepresentation rep) {
+ int index = rng_->NextInt(static_cast<int>(allocated_slots_[rep].size()));
+ return AllocatedOperand(LocationOperand::STACK_SLOT, rep,
+ allocated_slots_[rep][index]);
+ }
+
+ InstructionOperand CreateRandomConstant(MachineRepresentation rep) {
+ int index = rng_->NextInt(static_cast<int>(constants_[rep].size()));
+ return ConstantOperand(constants_[rep][index]);
+ }
+
void CheckAssembleTailCallGaps(Instruction* instr,
int first_unused_stack_slot,
CodeGeneratorTester::PushTypeFlag push_type) {
@@ -64,7 +303,24 @@ class CodeGeneratorTester : public InitializedHandleScope {
generator_.AssembleTailCallAfterGap(instr, first_unused_stack_slot);
}
+ void CheckAssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ int start = generator_.tasm()->pc_offset();
+ generator_.AssembleMove(source, destination);
+ CHECK(generator_.tasm()->pc_offset() > start);
+ }
+
+ void CheckAssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ int start = generator_.tasm()->pc_offset();
+ generator_.AssembleSwap(source, destination);
+ CHECK(generator_.tasm()->pc_offset() > start);
+ }
+
Handle<Code> Finalize() {
+ InstructionOperand zero = ImmediateOperand(ImmediateOperand::INLINE, 0);
+ generator_.AssembleReturn(&zero);
+
generator_.FinishCode();
generator_.safepoints()->Emit(generator_.tasm(),
frame_.GetTotalFrameSlotCount());
@@ -79,24 +335,138 @@ class CodeGeneratorTester : public InitializedHandleScope {
}
}
- Zone* zone() { return &zone_; }
+ void Run() {
+ HandleScope scope(main_isolate());
+ Handle<Code> code = Finalize();
+ if (FLAG_print_code) {
+ code->Print();
+ }
+ FunctionTester ft(code);
+ ft.Call();
+ }
+
+ v8::base::RandomNumberGenerator* rng() const { return rng_; }
private:
- Zone zone_;
CompilationInfo info_;
CallDescriptor* descriptor_;
Linkage linkage_;
ZoneVector<InstructionBlock*> blocks_;
InstructionSequence sequence_;
+ std::vector<MachineRepresentation> supported_reps_;
+ std::map<MachineRepresentation, std::vector<int>> allocated_slots_;
+ std::map<MachineRepresentation, std::vector<int>> constants_;
+ v8::base::RandomNumberGenerator* rng_;
Frame frame_;
CodeGenerator generator_;
};
+// The following fuzz tests will assemble a lot of moves, wrap them in
+// executable native code and run them. At this time, we only check that
+// something is actually generated, and that it runs on hardware or the
+// simulator.
+
+// TODO(all): It would be great to record the data on the stack after all moves
+// are executed so that we could test the functionality in an architecture
+// independent way. We would also have to make sure we generate moves compatible
+// with each other as the gap-resolver tests do.
+
+TEST(FuzzAssembleMove) {
+ // Test small and potentially large ranges separately. Note that the number of
+ // slots affects how much stack is allocated when running the generated code.
+ // This means we have to be careful not to exceed the stack limit, which is
+ // lower on Windows.
+ for (auto n : {64, 500}) {
+ std::map<MachineRepresentation, int> slots = {
+ {MachineRepresentation::kTagged, n},
+ {MachineRepresentation::kFloat32, n},
+ {MachineRepresentation::kFloat64, n}};
+ if (CpuFeatures::SupportsWasmSimd128()) {
+ // Generate fewer 128-bit slots.
+ slots.emplace(MachineRepresentation::kSimd128, n / 4);
+ }
+ CodeGeneratorTester c(
+ slots,
+ {Constant(0), Constant(1), Constant(2), Constant(3), Constant(4),
+ Constant(5), Constant(6), Constant(7),
+ Constant(static_cast<float>(0.1)), Constant(static_cast<float>(0.2)),
+ Constant(static_cast<float>(0.3)), Constant(static_cast<float>(0.4)),
+ Constant(static_cast<double>(0.5)), Constant(static_cast<double>(0.6)),
+ Constant(static_cast<double>(0.7)),
+ Constant(static_cast<double>(0.8))});
+ ParallelMove* moves = c.GenerateRandomMoves(1000);
+ for (const auto m : *moves) {
+ c.CheckAssembleMove(&m->source(), &m->destination());
+ }
+ c.Run();
+ }
+}
+
+TEST(FuzzAssembleSwap) {
+ // Test small and potentially large ranges separately. Note that the number of
+ // slots affects how much stack is allocated when running the generated code.
+ // This means we have to be careful not to exceed the stack limit, which is
+ // lower on Windows.
+ for (auto n : {64, 500}) {
+ std::map<MachineRepresentation, int> slots = {
+ {MachineRepresentation::kTagged, n},
+ {MachineRepresentation::kFloat32, n},
+ {MachineRepresentation::kFloat64, n}};
+ if (CpuFeatures::SupportsWasmSimd128()) {
+ // Generate fewer 128-bit slots.
+ slots.emplace(MachineRepresentation::kSimd128, n / 4);
+ }
+ CodeGeneratorTester c(slots);
+ ParallelMove* moves = c.GenerateRandomSwaps(1000);
+ for (const auto m : *moves) {
+ c.CheckAssembleSwap(&m->source(), &m->destination());
+ }
+ c.Run();
+ }
+}
+
+TEST(FuzzAssembleMoveAndSwap) {
+ // Test small and potentially large ranges separately. Note that the number of
+ // slots affects how much stack is allocated when running the generated code.
+ // This means we have to be careful not to exceed the stack limit, which is
+ // lower on Windows.
+ for (auto n : {64, 500}) {
+ std::map<MachineRepresentation, int> slots = {
+ {MachineRepresentation::kTagged, n},
+ {MachineRepresentation::kFloat32, n},
+ {MachineRepresentation::kFloat64, n}};
+ if (CpuFeatures::SupportsWasmSimd128()) {
+ // Generate fewer 128-bit slots.
+ slots.emplace(MachineRepresentation::kSimd128, n / 4);
+ }
+ CodeGeneratorTester c(
+ slots,
+ {Constant(0), Constant(1), Constant(2), Constant(3), Constant(4),
+ Constant(5), Constant(6), Constant(7),
+ Constant(static_cast<float>(0.1)), Constant(static_cast<float>(0.2)),
+ Constant(static_cast<float>(0.3)), Constant(static_cast<float>(0.4)),
+ Constant(static_cast<double>(0.5)), Constant(static_cast<double>(0.6)),
+ Constant(static_cast<double>(0.7)),
+ Constant(static_cast<double>(0.8))});
+ for (int i = 0; i < 1000; i++) {
+ // Randomly alternate between swaps and moves.
+ if (c.rng()->NextInt(2) == 0) {
+ MoveOperands* move = c.GenerateRandomMoves(1)->at(0);
+ c.CheckAssembleMove(&move->source(), &move->destination());
+ } else {
+ MoveOperands* move = c.GenerateRandomSwaps(1)->at(0);
+ c.CheckAssembleSwap(&move->source(), &move->destination());
+ }
+ }
+ c.Run();
+ }
+}
+
TEST(AssembleTailCallGap) {
const RegisterConfiguration* conf = RegisterConfiguration::Default();
// This test assumes at least 4 registers are allocatable.
- CHECK(conf->num_allocatable_general_registers() >= 4);
+ CHECK_LE(4, conf->num_allocatable_general_registers());
auto r0 = AllocatedOperand(LocationOperand::REGISTER,
MachineRepresentation::kTagged,
@@ -142,14 +512,22 @@ TEST(AssembleTailCallGap) {
{
// Generate a series of register pushes only.
CodeGeneratorTester c;
- Instruction* instr = Instruction::New(c.zone(), kArchNop);
- instr->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION, c.zone())
+ Instruction* instr = Instruction::New(c.main_zone(), kArchNop);
+ instr
+ ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
+ c.main_zone())
->AddMove(r3, slot_0);
- instr->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION, c.zone())
+ instr
+ ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
+ c.main_zone())
->AddMove(r2, slot_1);
- instr->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION, c.zone())
+ instr
+ ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
+ c.main_zone())
->AddMove(r1, slot_2);
- instr->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION, c.zone())
+ instr
+ ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
+ c.main_zone())
->AddMove(r0, slot_3);
c.CheckAssembleTailCallGaps(instr, first_slot + 4,
@@ -160,14 +538,22 @@ TEST(AssembleTailCallGap) {
{
// Generate a series of stack pushes only.
CodeGeneratorTester c;
- Instruction* instr = Instruction::New(c.zone(), kArchNop);
- instr->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION, c.zone())
+ Instruction* instr = Instruction::New(c.main_zone(), kArchNop);
+ instr
+ ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
+ c.main_zone())
->AddMove(slot_minus_4, slot_0);
- instr->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION, c.zone())
+ instr
+ ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
+ c.main_zone())
->AddMove(slot_minus_3, slot_1);
- instr->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION, c.zone())
+ instr
+ ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
+ c.main_zone())
->AddMove(slot_minus_2, slot_2);
- instr->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION, c.zone())
+ instr
+ ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
+ c.main_zone())
->AddMove(slot_minus_1, slot_3);
c.CheckAssembleTailCallGaps(instr, first_slot + 4,
@@ -178,14 +564,22 @@ TEST(AssembleTailCallGap) {
{
// Generate a mix of stack and register pushes.
CodeGeneratorTester c;
- Instruction* instr = Instruction::New(c.zone(), kArchNop);
- instr->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION, c.zone())
+ Instruction* instr = Instruction::New(c.main_zone(), kArchNop);
+ instr
+ ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
+ c.main_zone())
->AddMove(slot_minus_2, slot_0);
- instr->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION, c.zone())
+ instr
+ ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
+ c.main_zone())
->AddMove(r1, slot_1);
- instr->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION, c.zone())
+ instr
+ ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
+ c.main_zone())
->AddMove(slot_minus_1, slot_2);
- instr->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION, c.zone())
+ instr
+ ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
+ c.main_zone())
->AddMove(r0, slot_3);
c.CheckAssembleTailCallGaps(instr, first_slot + 4,
diff --git a/deps/v8/test/cctest/compiler/test-instruction.cc b/deps/v8/test/cctest/compiler/test-instruction.cc
index 2d2546690b..a8d34434d3 100644
--- a/deps/v8/test/cctest/compiler/test-instruction.cc
+++ b/deps/v8/test/cctest/compiler/test-instruction.cc
@@ -205,7 +205,7 @@ TEST(InstructionIsGapAt) {
R.code->AddInstruction(g);
R.code->EndBlock(R.RpoFor(b0));
- CHECK(R.code->instructions().size() == 2);
+ CHECK_EQ(2, R.code->instructions().size());
}
@@ -232,7 +232,7 @@ TEST(InstructionIsGapAt2) {
R.code->AddInstruction(g1);
R.code->EndBlock(R.RpoFor(b1));
- CHECK(R.code->instructions().size() == 4);
+ CHECK_EQ(4, R.code->instructions().size());
}
@@ -250,7 +250,7 @@ TEST(InstructionAddGapMove) {
R.code->AddInstruction(g);
R.code->EndBlock(R.RpoFor(b0));
- CHECK(R.code->instructions().size() == 2);
+ CHECK_EQ(2, R.code->instructions().size());
int index = 0;
for (auto instr : R.code->instructions()) {
diff --git a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
index 94b846fa14..ee53f26245 100644
--- a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
+++ b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
@@ -5,14 +5,7 @@
#include "src/assembler.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
-#include "src/factory.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/factory.h -> src/objects-inl.h
-#include "src/objects-inl.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/feedback-vector.h ->
-// src/feedback-vector-inl.h
-#include "src/feedback-vector-inl.h"
+#include "src/factory-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index 8e24bac8fc..3944afee42 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -10,13 +10,9 @@
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/typer.h"
-#include "src/factory.h"
+#include "src/factory-inl.h"
#include "src/isolate.h"
-#include "src/objects-inl.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/feedback-vector.h ->
-// src/feedback-vector-inl.h
-#include "src/feedback-vector-inl.h"
+#include "src/objects.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 16a6a85ab7..57af474848 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -33,11 +33,13 @@ static Handle<JSFunction> Compile(const char* source) {
Handle<String> source_code = isolate->factory()
->NewStringFromUtf8(CStrVector(source))
.ToHandleChecked();
- Handle<SharedFunctionInfo> shared = Compiler::GetSharedFunctionInfoForScript(
- source_code, Handle<String>(), 0, 0, v8::ScriptOriginOptions(),
- Handle<Object>(), Handle<Context>(isolate->native_context()), NULL, NULL,
- v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE,
- Handle<FixedArray>());
+ Handle<SharedFunctionInfo> shared =
+ Compiler::GetSharedFunctionInfoForScript(
+ source_code, MaybeHandle<String>(), 0, 0, v8::ScriptOriginOptions(),
+ MaybeHandle<Object>(), Handle<Context>(isolate->native_context()),
+ NULL, NULL, v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE,
+ MaybeHandle<FixedArray>())
+ .ToHandleChecked();
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared, isolate->native_context());
}
@@ -47,9 +49,8 @@ TEST(TestLinkageCreate) {
HandleAndZoneScope handles;
Handle<JSFunction> function = Compile("a + b");
Handle<SharedFunctionInfo> shared(function->shared());
- Handle<Script> script(Script::cast(shared->script()));
- CompilationInfo info(handles.main_zone(), function->GetIsolate(), script,
- shared, function);
+ CompilationInfo info(handles.main_zone(), function->GetIsolate(), shared,
+ function);
CallDescriptor* descriptor = Linkage::ComputeIncoming(info.zone(), &info);
CHECK(descriptor);
}
@@ -65,9 +66,8 @@ TEST(TestLinkageJSFunctionIncoming) {
Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CompileRun(sources[i]))));
Handle<SharedFunctionInfo> shared(function->shared());
- Handle<Script> script(Script::cast(shared->script()));
- CompilationInfo info(handles.main_zone(), function->GetIsolate(), script,
- shared, function);
+ CompilationInfo info(handles.main_zone(), function->GetIsolate(), shared,
+ function);
CallDescriptor* descriptor = Linkage::ComputeIncoming(info.zone(), &info);
CHECK(descriptor);
@@ -83,9 +83,8 @@ TEST(TestLinkageJSCall) {
HandleAndZoneScope handles;
Handle<JSFunction> function = Compile("a + c");
Handle<SharedFunctionInfo> shared(function->shared());
- Handle<Script> script(Script::cast(shared->script()));
- CompilationInfo info(handles.main_zone(), function->GetIsolate(), script,
- shared, function);
+ CompilationInfo info(handles.main_zone(), function->GetIsolate(), shared,
+ function);
for (int i = 0; i < 32; i++) {
CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(
@@ -108,8 +107,7 @@ TEST(TestLinkageStubCall) {
Isolate* isolate = CcTest::InitIsolateOnce();
Zone zone(isolate->allocator(), ZONE_NAME);
Callable callable = Builtins::CallableFor(isolate, Builtins::kToNumber);
- CompilationInfo info(ArrayVector("test"), isolate, &zone,
- Code::ComputeFlags(Code::STUB));
+ CompilationInfo info(ArrayVector("test"), isolate, &zone, Code::STUB);
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
isolate, &zone, callable.descriptor(), 0, CallDescriptor::kNoFlags,
Operator::kNoProperties);
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index a016564fe0..25e5527f03 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -82,7 +82,7 @@ TEST(ReturnThreeValues) {
m.Return(add, sub, mul);
CompilationInfo info(ArrayVector("testing"), handles.main_isolate(),
- handles.main_zone(), Code::ComputeFlags(Code::STUB));
+ handles.main_zone(), Code::STUB);
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&info, desc, m.graph(), m.Export());
#ifdef ENABLE_DISASSEMBLER
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index 9189c7c502..a1dde0dc41 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -4,6 +4,7 @@
#include <utility>
+#include "src/api.h"
#include "src/compilation-info.h"
#include "src/compiler/pipeline.h"
#include "src/debug/debug-interface.h"
@@ -118,11 +119,10 @@ class BytecodeGraphTester {
Zone zone(function->GetIsolate()->allocator(), ZONE_NAME);
Handle<SharedFunctionInfo> shared(function->shared());
- Handle<Script> script(Script::cast(shared->script()));
- CompilationInfo compilation_info(&zone, function->GetIsolate(), script,
- shared, function);
+ CompilationInfo compilation_info(&zone, function->GetIsolate(), shared,
+ function);
Handle<Code> code = Pipeline::GenerateCodeForTesting(&compilation_info);
- function->ReplaceCode(*code);
+ function->set_code(*code);
return function;
}
@@ -2994,6 +2994,19 @@ TEST(BytecodeGraphBuilderDebuggerStatement) {
CHECK_EQ(2, delegate.debug_break_count);
}
+#undef SHARD_TEST_BY_2
+#undef SHARD_TEST_BY_4
+#undef SPACE
+#undef REPEAT_2
+#undef REPEAT_4
+#undef REPEAT_8
+#undef REPEAT_16
+#undef REPEAT_32
+#undef REPEAT_64
+#undef REPEAT_128
+#undef REPEAT_256
+#undef REPEAT_127
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
index 1015ca0989..3d6f1fbe0d 100644
--- a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
+++ b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
@@ -107,16 +107,6 @@ TEST(StringAdd) {
}
-TEST(StringCharCodeAt) {
- FunctionTester T("(function(a,b) { return %_StringCharCodeAt(a,b); })",
- flags);
-
- T.CheckCall(T.Val('e'), T.Val("huge fan!"), T.Val(3));
- T.CheckCall(T.Val('f'), T.Val("\xE2\x9D\x8A fan!"), T.Val(2));
- T.CheckCall(T.nan(), T.Val("not a fan!"), T.Val(23));
-}
-
-
TEST(StringCompare) {
FunctionTester T("(function(a,b) { return %_StringCompare(a,b); })", flags);
diff --git a/deps/v8/test/cctest/compiler/test-run-jsobjects.cc b/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
index ce6756d6db..0c2912cbef 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
@@ -4,14 +4,7 @@
#include "src/factory.h"
#include "src/isolate.h"
-#include "src/objects.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/factory.h -> src/objects-inl.h
#include "src/objects-inl.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/feedback-vector.h ->
-// src/feedback-vector-inl.h
-#include "src/feedback-vector-inl.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index a429973f0c..a2cc262fba 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -6902,16 +6902,17 @@ TEST(Regression6640) {
int32_t old_value = 0;
int32_t new_value = 1;
- Node* c = m.RelocatableInt32Constant(old_value,
- RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ Node* c = m.RelocatableInt32Constant(
+ old_value, RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
m.Return(m.Word32Equal(c, c));
// Patch the code.
Handle<Code> code = m.GetCode();
- for (RelocIterator it(*code, 1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ for (RelocIterator it(*code,
+ 1 << RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
!it.done(); it.next()) {
- it.rinfo()->update_wasm_memory_size(code->GetIsolate(), old_value,
- new_value, FLUSH_ICACHE_IF_NEEDED);
+ it.rinfo()->update_wasm_function_table_size_reference(
+ code->GetIsolate(), old_value, new_value, FLUSH_ICACHE_IF_NEEDED);
}
CHECK(m.Call());
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index b95c530076..70a0455f20 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -18,6 +18,7 @@
namespace v8 {
namespace internal {
namespace compiler {
+namespace test_run_native_calls {
const auto GetRegConfig = RegisterConfiguration::Default;
@@ -247,7 +248,7 @@ class Int32Signature : public MachineSignature {
public:
explicit Int32Signature(int param_count)
: MachineSignature(1, param_count, kIntTypes) {
- CHECK(param_count <= kMaxParamCount);
+ CHECK_GE(kMaxParamCount, param_count);
}
};
@@ -256,7 +257,7 @@ Handle<Code> CompileGraph(const char* name, CallDescriptor* desc, Graph* graph,
Schedule* schedule = nullptr) {
Isolate* isolate = CcTest::InitIsolateOnce();
CompilationInfo info(ArrayVector("testing"), isolate, graph->zone(),
- Code::ComputeFlags(Code::STUB));
+ Code::STUB);
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&info, desc, graph, schedule);
CHECK(!code.is_null());
@@ -323,7 +324,7 @@ class ArgsBuffer {
public:
explicit Sig(int param_count)
: MachineSignature(1, param_count, MachTypes()) {
- CHECK(param_count <= kMaxParamCount);
+ CHECK_GE(kMaxParamCount, param_count);
}
};
@@ -1225,6 +1226,8 @@ TEST(RunStackSlotFloat64) {
double magic = 3456.375;
TestStackSlot(MachineType::Float64(), magic);
}
+
+} // namespace test_run_native_calls
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-stubs.cc b/deps/v8/test/cctest/compiler/test-run-stubs.cc
index b0ddc71d87..d42a803c3e 100644
--- a/deps/v8/test/cctest/compiler/test-run-stubs.cc
+++ b/deps/v8/test/cctest/compiler/test-run-stubs.cc
@@ -24,8 +24,7 @@ class StubTester {
public:
StubTester(Isolate* isolate, Zone* zone, CodeStub* stub)
: zone_(zone),
- info_(ArrayVector("test"), isolate, zone,
- Code::ComputeFlags(Code::HANDLER)),
+ info_(ArrayVector("test"), isolate, zone, Code::STUB),
interface_descriptor_(stub->GetCallInterfaceDescriptor()),
descriptor_(Linkage::GetStubCallDescriptor(
isolate, zone, interface_descriptor_,
@@ -38,8 +37,7 @@ class StubTester {
StubTester(Isolate* isolate, Zone* zone, Builtins::Name name)
: zone_(zone),
- info_(ArrayVector("test"), isolate, zone,
- Code::ComputeFlags(Code::HANDLER)),
+ info_(ArrayVector("test"), isolate, zone, Code::STUB),
interface_descriptor_(
Builtins::CallableFor(isolate, name).descriptor()),
descriptor_(Linkage::GetStubCallDescriptor(
@@ -108,8 +106,7 @@ TEST(RunStringLengthStub) {
Isolate* isolate = scope.main_isolate();
Zone* zone = scope.main_zone();
- StringLengthStub stub(isolate);
- StubTester tester(isolate, zone, &stub);
+ StubTester tester(isolate, zone, Builtins::kLoadIC_StringLength);
// Actuall call through to the stub, verifying its result.
const char* testString = "Und das Lamm schrie HURZ!";
diff --git a/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc b/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc
index 93acc0889a..7f63484ba9 100644
--- a/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc
@@ -10,6 +10,7 @@
#include "src/base/utils/random-number-generator.h"
#include "src/codegen.h"
#include "src/objects-inl.h"
+#include "src/wasm/wasm-objects.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/graph-builder-tester.h"
@@ -19,29 +20,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-static void UpdateMemoryReferences(Handle<Code> code, Address old_base,
- Address new_base, uint32_t old_size,
- uint32_t new_size) {
- Isolate* isolate = CcTest::i_isolate();
- bool modified = false;
- int mode_mask = RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (RelocInfo::IsWasmMemoryReference(mode)) {
- it.rinfo()->update_wasm_memory_reference(isolate, old_base, new_base);
- } else {
- DCHECK(RelocInfo::IsWasmMemorySizeReference(mode));
- it.rinfo()->update_wasm_memory_size(isolate, old_size, new_size);
- }
- modified = true;
- }
- if (modified) {
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
- }
-}
-
static void UpdateFunctionTableSizeReferences(Handle<Code> code,
uint32_t old_size,
uint32_t new_size) {
@@ -70,17 +48,19 @@ static void RunLoadStoreRelocation(MachineType rep) {
CType new_buffer[kNumElems];
byte* raw = reinterpret_cast<byte*>(buffer);
byte* new_raw = reinterpret_cast<byte*>(new_buffer);
+ WasmContext wasm_context = {raw, sizeof(buffer)};
for (size_t i = 0; i < sizeof(buffer); i++) {
raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
new_raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
}
uint32_t OK = 0x29000;
RawMachineAssemblerTester<uint32_t> m;
- Node* base = m.RelocatableIntPtrConstant(reinterpret_cast<intptr_t>(raw),
- RelocInfo::WASM_MEMORY_REFERENCE);
- Node* base1 = m.RelocatableIntPtrConstant(
- reinterpret_cast<intptr_t>(raw + sizeof(CType)),
- RelocInfo::WASM_MEMORY_REFERENCE);
+ Node* wasm_context_node =
+ m.RelocatableIntPtrConstant(reinterpret_cast<uintptr_t>(&wasm_context),
+ RelocInfo::WASM_CONTEXT_REFERENCE);
+ Node* offset = m.Int32Constant(offsetof(WasmContext, mem_start));
+ Node* base = m.Load(MachineType::UintPtr(), wasm_context_node, offset);
+ Node* base1 = m.IntPtrAdd(base, m.Int32Constant(sizeof(CType)));
Node* index = m.Int32Constant(0);
Node* load = m.Load(rep, base, index);
m.Store(rep.representation(), base1, index, load, kNoWriteBarrier);
@@ -88,10 +68,8 @@ static void RunLoadStoreRelocation(MachineType rep) {
CHECK(buffer[0] != buffer[1]);
CHECK_EQ(OK, m.Call());
CHECK(buffer[0] == buffer[1]);
- m.GenerateCode();
- Handle<Code> code = m.GetCode();
- UpdateMemoryReferences(code, raw, new_raw, sizeof(buffer),
- sizeof(new_buffer));
+ wasm_context.mem_size = sizeof(new_buffer);
+ wasm_context.mem_start = new_raw;
CHECK(new_buffer[0] != new_buffer[1]);
CHECK_EQ(OK, m.Call());
CHECK(new_buffer[0] == new_buffer[1]);
@@ -115,19 +93,24 @@ static void RunLoadStoreRelocationOffset(MachineType rep) {
const int kNumElems = 4;
CType buffer[kNumElems];
CType new_buffer[kNumElems + 1];
+ WasmContext wasm_context;
for (int32_t x = 0; x < kNumElems; x++) {
int32_t y = kNumElems - x - 1;
// initialize the buffer with raw data.
byte* raw = reinterpret_cast<byte*>(buffer);
+ wasm_context = {raw, sizeof(buffer)};
for (size_t i = 0; i < sizeof(buffer); i++) {
raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
}
RawMachineAssemblerTester<int32_t> m;
int32_t OK = 0x29000 + x;
- Node* base = m.RelocatableIntPtrConstant(reinterpret_cast<intptr_t>(buffer),
- RelocInfo::WASM_MEMORY_REFERENCE);
+ Node* wasm_context_node =
+ m.RelocatableIntPtrConstant(reinterpret_cast<uintptr_t>(&wasm_context),
+ RelocInfo::WASM_CONTEXT_REFERENCE);
+ Node* offset = m.Int32Constant(offsetof(WasmContext, mem_start));
+ Node* base = m.Load(MachineType::UintPtr(), wasm_context_node, offset);
Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0]));
Node* load = m.Load(rep, base, index0);
Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0]));
@@ -137,7 +120,6 @@ static void RunLoadStoreRelocationOffset(MachineType rep) {
CHECK(buffer[x] != buffer[y]);
CHECK_EQ(OK, m.Call());
CHECK(buffer[x] == buffer[y]);
- m.GenerateCode();
// Initialize new buffer and set old_buffer to 0
byte* new_raw = reinterpret_cast<byte*>(new_buffer);
@@ -146,10 +128,8 @@ static void RunLoadStoreRelocationOffset(MachineType rep) {
new_raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
}
- // Perform relocation on generated code
- Handle<Code> code = m.GetCode();
- UpdateMemoryReferences(code, raw, new_raw, sizeof(buffer),
- sizeof(new_buffer));
+ wasm_context.mem_size = sizeof(new_buffer);
+ wasm_context.mem_start = new_raw;
CHECK(new_buffer[x] != new_buffer[y]);
CHECK_EQ(OK, m.Call());
@@ -172,9 +152,13 @@ TEST(RunLoadStoreRelocationOffset) {
TEST(Uint32LessThanMemoryRelocation) {
RawMachineAssemblerTester<uint32_t> m;
RawMachineLabel within_bounds, out_of_bounds;
+ WasmContext wasm_context = {reinterpret_cast<Address>(1234), 0x200};
Node* index = m.Int32Constant(0x200);
- Node* limit =
- m.RelocatableInt32Constant(0x200, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ Node* wasm_context_node =
+ m.RelocatableIntPtrConstant(reinterpret_cast<uintptr_t>(&wasm_context),
+ RelocInfo::WASM_CONTEXT_REFERENCE);
+ Node* offset = m.Int32Constant(offsetof(WasmContext, mem_size));
+ Node* limit = m.Load(MachineType::Uint32(), wasm_context_node, offset);
Node* cond = m.AddNode(m.machine()->Uint32LessThan(), index, limit);
m.Branch(cond, &within_bounds, &out_of_bounds);
m.Bind(&within_bounds);
@@ -183,11 +167,7 @@ TEST(Uint32LessThanMemoryRelocation) {
m.Return(m.Int32Constant(0xdeadbeef));
// Check that index is out of bounds with current size
CHECK_EQ(0xdeadbeef, m.Call());
- m.GenerateCode();
-
- Handle<Code> code = m.GetCode();
- UpdateMemoryReferences(code, reinterpret_cast<Address>(1234),
- reinterpret_cast<Address>(1234), 0x200, 0x400);
+ wasm_context.mem_size = 0x400;
// Check that after limit is increased, index is within bounds.
CHECK_EQ(0xacedu, m.Call());
}
diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h
index 14bfaf9fe9..87cdb585da 100644
--- a/deps/v8/test/cctest/compiler/value-helper.h
+++ b/deps/v8/test/cctest/compiler/value-helper.h
@@ -332,9 +332,10 @@ class ValueHelper {
// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... }
// Watch out, these macros aren't hygenic; they pollute your scope. Thanks STL.
-#define FOR_INPUTS(ctype, itype, var) \
- std::vector<ctype> var##_vec = ValueHelper::itype##_vector(); \
- for (std::vector<ctype>::iterator var = var##_vec.begin(); \
+#define FOR_INPUTS(ctype, itype, var) \
+ std::vector<ctype> var##_vec = \
+ ::v8::internal::compiler::ValueHelper::itype##_vector(); \
+ for (std::vector<ctype>::iterator var = var##_vec.begin(); \
var != var##_vec.end(); ++var)
#define FOR_INT32_INPUTS(var) FOR_INPUTS(int32_t, int32, var)
@@ -362,10 +363,10 @@ static inline void CheckFloatEq(volatile float x, volatile float y) {
}
}
-#define CHECK_FLOAT_EQ(lhs, rhs) \
- do { \
- volatile float tmp = lhs; \
- CheckFloatEq(tmp, rhs); \
+#define CHECK_FLOAT_EQ(lhs, rhs) \
+ do { \
+ volatile float tmp = lhs; \
+ ::v8::internal::compiler::CheckFloatEq(tmp, rhs); \
} while (0)
static inline void CheckDoubleEq(volatile double x, volatile double y) {
@@ -377,10 +378,10 @@ static inline void CheckDoubleEq(volatile double x, volatile double y) {
}
}
-#define CHECK_DOUBLE_EQ(lhs, rhs) \
- do { \
- volatile double tmp = lhs; \
- CheckDoubleEq(tmp, rhs); \
+#define CHECK_DOUBLE_EQ(lhs, rhs) \
+ do { \
+ volatile double tmp = lhs; \
+ ::v8::internal::compiler::CheckDoubleEq(tmp, rhs); \
} while (0)
} // namespace compiler
diff --git a/deps/v8/test/cctest/ffi/OWNERS b/deps/v8/test/cctest/ffi/OWNERS
deleted file mode 100644
index f78789f5b5..0000000000
--- a/deps/v8/test/cctest/ffi/OWNERS
+++ /dev/null
@@ -1,4 +0,0 @@
-mattloring@google.com
-ofrobots@google.com
-
-# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/test/cctest/ffi/test-ffi.cc b/deps/v8/test/cctest/ffi/test-ffi.cc
deleted file mode 100644
index 9d87244e94..0000000000
--- a/deps/v8/test/cctest/ffi/test-ffi.cc
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/api.h"
-#include "src/codegen.h"
-#include "src/ffi/ffi-compiler.h"
-#include "src/objects-inl.h"
-#include "test/cctest/cctest.h"
-
-namespace v8 {
-namespace internal {
-namespace ffi {
-
-static void hello_world() { printf("hello world from native code\n"); }
-
-TEST(Run_FFI_Hello) {
- Isolate* isolate = CcTest::InitIsolateOnce();
- HandleScope scope(isolate);
-
- Handle<String> name =
- isolate->factory()->InternalizeUtf8String("hello_world");
- Handle<Object> undefined = isolate->factory()->undefined_value();
-
- AccountingAllocator allocator;
- Zone zone(&allocator, ZONE_NAME);
- FFISignature::Builder sig_builder(&zone, 0, 0);
- NativeFunction func = {sig_builder.Build(),
- reinterpret_cast<uint8_t*>(hello_world)};
-
- Handle<JSFunction> jsfunc = CompileJSToNativeWrapper(isolate, name, func);
-
- Handle<Object> result =
- Execution::Call(isolate, jsfunc, undefined, 0, nullptr).ToHandleChecked();
-
- CHECK(result->IsUndefined(isolate));
-}
-
-static int add2(int x, int y) { return x + y; }
-
-TEST(Run_FFI_add2) {
- Isolate* isolate = CcTest::InitIsolateOnce();
- HandleScope scope(isolate);
-
- Handle<String> name = isolate->factory()->InternalizeUtf8String("add2");
- Handle<Object> undefined = isolate->factory()->undefined_value();
-
- AccountingAllocator allocator;
- Zone zone(&allocator, ZONE_NAME);
- FFISignature::Builder sig_builder(&zone, 1, 2);
- sig_builder.AddReturn(FFIType::kInt32);
- sig_builder.AddParam(FFIType::kInt32);
- sig_builder.AddParam(FFIType::kInt32);
- NativeFunction func = {sig_builder.Build(), reinterpret_cast<uint8_t*>(add2)};
-
- Handle<JSFunction> jsfunc = CompileJSToNativeWrapper(isolate, name, func);
-
- // Simple math should work.
- {
- Handle<Object> args[] = {isolate->factory()->NewNumber(1.0),
- isolate->factory()->NewNumber(41.0)};
- Handle<Object> result =
- Execution::Call(isolate, jsfunc, undefined, arraysize(args), args)
- .ToHandleChecked();
- CHECK_EQ(42.0, result->Number());
- }
-
- // Truncate floating point to integer.
- {
- Handle<Object> args[] = {isolate->factory()->NewNumber(1.9),
- isolate->factory()->NewNumber(41.0)};
- Handle<Object> result =
- Execution::Call(isolate, jsfunc, undefined, arraysize(args), args)
- .ToHandleChecked();
- CHECK_EQ(42.0, result->Number());
- }
-
- // INT_MAX + 1 should wrap.
- {
- Handle<Object> args[] = {isolate->factory()->NewNumber(kMaxInt),
- isolate->factory()->NewNumber(1)};
- Handle<Object> result =
- Execution::Call(isolate, jsfunc, undefined, arraysize(args), args)
- .ToHandleChecked();
- CHECK_EQ(kMinInt, result->Number());
- }
-
- // INT_MIN + -1 should wrap.
- {
- Handle<Object> args[] = {isolate->factory()->NewNumber(kMinInt),
- isolate->factory()->NewNumber(-1)};
- Handle<Object> result =
- Execution::Call(isolate, jsfunc, undefined, arraysize(args), args)
- .ToHandleChecked();
- CHECK_EQ(kMaxInt, result->Number());
- }
-
- // Numbers get truncated to the 32 least significant bits.
- {
- Handle<Object> args[] = {isolate->factory()->NewNumber(1ull << 40),
- isolate->factory()->NewNumber(-1)};
- Handle<Object> result =
- Execution::Call(isolate, jsfunc, undefined, arraysize(args), args)
- .ToHandleChecked();
- CHECK_EQ(-1, result->Number());
- }
-
- // String '57' converts to 57.
- {
- Handle<Object> args[] = {
- isolate->factory()->NewStringFromAsciiChecked("57"),
- isolate->factory()->NewNumber(41.0)};
- Handle<Object> result =
- Execution::Call(isolate, jsfunc, undefined, arraysize(args), args)
- .ToHandleChecked();
- CHECK_EQ(98.0, result->Number());
- }
-
- // String 'foo' converts to 0.
- {
- Handle<Object> args[] = {
- isolate->factory()->NewStringFromAsciiChecked("foo"),
- isolate->factory()->NewNumber(41.0)};
- Handle<Object> result =
- Execution::Call(isolate, jsfunc, undefined, arraysize(args), args)
- .ToHandleChecked();
- CHECK_EQ(41.0, result->Number());
- }
-
- // String '58o' converts to 0.
- {
- Handle<Object> args[] = {
- isolate->factory()->NewStringFromAsciiChecked("58o"),
- isolate->factory()->NewNumber(41.0)};
- Handle<Object> result =
- Execution::Call(isolate, jsfunc, undefined, arraysize(args), args)
- .ToHandleChecked();
- CHECK_EQ(41.0, result->Number());
- }
-
- // NaN converts to 0.
- {
- Handle<Object> args[] = {isolate->factory()->nan_value(),
- isolate->factory()->NewNumber(41.0)};
- Handle<Object> result =
- Execution::Call(isolate, jsfunc, undefined, arraysize(args), args)
- .ToHandleChecked();
- CHECK_EQ(41.0, result->Number());
- }
-
- // null converts to 0.
- {
- Handle<Object> args[] = {isolate->factory()->null_value(),
- isolate->factory()->NewNumber(41.0)};
- Handle<Object> result =
- Execution::Call(isolate, jsfunc, undefined, arraysize(args), args)
- .ToHandleChecked();
- CHECK_EQ(41.0, result->Number());
- }
-}
-
-static int add6(int a, int b, int c, int d, int e, int f) {
- return a + b + c + d + e + f;
-}
-
-TEST(Run_FFI_add6) {
- Isolate* isolate = CcTest::InitIsolateOnce();
- HandleScope scope(isolate);
-
- Handle<String> name = isolate->factory()->InternalizeUtf8String("add6");
- Handle<Object> undefined = isolate->factory()->undefined_value();
-
- AccountingAllocator allocator;
- Zone zone(&allocator, ZONE_NAME);
- FFISignature::Builder sig_builder(&zone, 1, 7);
- sig_builder.AddReturn(FFIType::kInt32);
- for (int i = 0; i < 7; i++) {
- sig_builder.AddParam(FFIType::kInt32);
- }
- NativeFunction func = {sig_builder.Build(), reinterpret_cast<uint8_t*>(add6)};
-
- Handle<JSFunction> jsfunc = CompileJSToNativeWrapper(isolate, name, func);
- Handle<Object> args[] = {
- isolate->factory()->NewNumber(1), isolate->factory()->NewNumber(2),
- isolate->factory()->NewNumber(3), isolate->factory()->NewNumber(4),
- isolate->factory()->NewNumber(5), isolate->factory()->NewNumber(6)};
-
- Handle<Object> result =
- Execution::Call(isolate, jsfunc, undefined, arraysize(args), args)
- .ToHandleChecked();
-
- CHECK_EQ(21.0, result->Number());
-
- {
- // Ensure builtin frames are generated
- FLAG_allow_natives_syntax = true;
- v8::Local<v8::Value> res = CompileRun(
- "var o = { valueOf: function() { %DebugTrace(); return 1; } }; o;");
- Handle<JSReceiver> param(v8::Utils::OpenHandle(v8::Object::Cast(*res)));
- Handle<Object> args[] = {param,
- isolate->factory()->NewNumber(2),
- isolate->factory()->NewNumber(3),
- isolate->factory()->NewNumber(4),
- isolate->factory()->NewNumber(5),
- isolate->factory()->NewNumber(6),
- isolate->factory()->NewNumber(21)};
-
- Handle<Object> result =
- Execution::Call(isolate, jsfunc, undefined, arraysize(args), args)
- .ToHandleChecked();
- CHECK_EQ(21.0, result->Number());
- CHECK_EQ(
- 1.0,
- res->NumberValue(
- reinterpret_cast<v8::Isolate*>(isolate)->GetCurrentContext())
- .ToChecked());
- }
-}
-
-} // namespace ffi
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/heap/heap-utils.cc b/deps/v8/test/cctest/heap/heap-utils.cc
index 1d936f7c5b..6e92b96da1 100644
--- a/deps/v8/test/cctest/heap/heap-utils.cc
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -32,6 +32,7 @@ int FixedArrayLenFromSize(int size) {
std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
int remainder) {
+ PauseAllocationObserversScope pause_observers(heap);
std::vector<Handle<FixedArray>> handles;
Isolate* isolate = heap->isolate();
const int kArraySize = 128;
@@ -203,7 +204,7 @@ void ForceEvacuationCandidate(Page* page) {
int remaining = static_cast<int>(limit - top);
space->heap()->CreateFillerObjectAt(top, remaining,
ClearRecordedSlots::kNo);
- space->SetTopAndLimit(nullptr, nullptr);
+ space->EmptyAllocationInfo();
}
}
diff --git a/deps/v8/test/cctest/heap/test-alloc.cc b/deps/v8/test/cctest/heap/test-alloc.cc
index d4abaa3a8a..06aec9ac6e 100644
--- a/deps/v8/test/cctest/heap/test-alloc.cc
+++ b/deps/v8/test/cctest/heap/test-alloc.cc
@@ -145,12 +145,12 @@ TEST(StressJS) {
// Force the creation of an initial map and set the code to
// something empty.
factory->NewJSObject(function);
- function->ReplaceCode(CcTest::i_isolate()->builtins()->builtin(
- Builtins::kEmptyFunction));
+ function->set_code(
+ CcTest::i_isolate()->builtins()->builtin(Builtins::kEmptyFunction));
// Patch the map to have an accessor for "get".
Handle<Map> map(function->initial_map());
Handle<DescriptorArray> instance_descriptors(map->instance_descriptors());
- CHECK(instance_descriptors->IsEmpty());
+ CHECK_EQ(0, instance_descriptors->number_of_descriptors());
PropertyAttributes attrs = NONE;
Handle<AccessorInfo> foreign = TestAccessorInfo(isolate, attrs);
diff --git a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
index 73286f3970..f3907a0bdc 100644
--- a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
@@ -335,6 +335,7 @@ UNINITIALIZED_TEST(ArrayBuffer_SemiSpaceCopyMultipleTasks) {
Page::FromAddress(buf2->address()));
heap::GcAndSweep(heap, OLD_SPACE);
}
+ isolate->Dispose();
}
TEST(ArrayBuffer_RetainedSizeIncreases) {
diff --git a/deps/v8/test/cctest/heap/test-compaction.cc b/deps/v8/test/cctest/heap/test-compaction.cc
index d98aa1fb56..e8c65d1110 100644
--- a/deps/v8/test/cctest/heap/test-compaction.cc
+++ b/deps/v8/test/cctest/heap/test-compaction.cc
@@ -5,13 +5,7 @@
#include "src/factory.h"
#include "src/heap/mark-compact.h"
#include "src/isolate.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/factory.h -> src/objects-inl.h
#include "src/objects-inl.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/feedback-vector.h ->
-// src/feedback-vector-inl.h
-#include "src/feedback-vector-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
diff --git a/deps/v8/test/cctest/heap/test-concurrent-marking.cc b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
index 36bf49c2ff..5ee99c6320 100644
--- a/deps/v8/test/cctest/heap/test-concurrent-marking.cc
+++ b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
@@ -31,12 +31,20 @@ TEST(ConcurrentMarking) {
if (!i::FLAG_concurrent_marking) return;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
- ConcurrentMarking::MarkingWorklist shared, bailout;
+ CcTest::CollectAllGarbage();
+ if (!heap->incremental_marking()->IsStopped()) return;
+ MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
+ if (collector->sweeping_in_progress()) {
+ collector->EnsureSweepingCompleted();
+ }
+
+ ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
- new ConcurrentMarking(heap, &shared, &bailout, &weak_objects);
+ new ConcurrentMarking(heap, &shared, &bailout, &on_hold, &weak_objects);
PublishSegment(&shared, heap->undefined_value());
concurrent_marking->ScheduleTasks();
+ concurrent_marking->WaitForTasks();
concurrent_marking->EnsureCompleted();
delete concurrent_marking;
}
@@ -45,19 +53,43 @@ TEST(ConcurrentMarkingReschedule) {
if (!i::FLAG_concurrent_marking) return;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
- ConcurrentMarking::MarkingWorklist shared, bailout;
+ CcTest::CollectAllGarbage();
+ if (!heap->incremental_marking()->IsStopped()) return;
+ MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
+ if (collector->sweeping_in_progress()) {
+ collector->EnsureSweepingCompleted();
+ }
+
+ ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
- new ConcurrentMarking(heap, &shared, &bailout, &weak_objects);
+ new ConcurrentMarking(heap, &shared, &bailout, &on_hold, &weak_objects);
PublishSegment(&shared, heap->undefined_value());
concurrent_marking->ScheduleTasks();
+ concurrent_marking->WaitForTasks();
concurrent_marking->EnsureCompleted();
PublishSegment(&shared, heap->undefined_value());
concurrent_marking->RescheduleTasksIfNeeded();
+ concurrent_marking->WaitForTasks();
concurrent_marking->EnsureCompleted();
delete concurrent_marking;
}
+TEST(ConcurrentMarkingMarkedBytes) {
+ if (!i::FLAG_concurrent_marking) return;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = CcTest::heap();
+ HandleScope sc(isolate);
+ Handle<FixedArray> root = isolate->factory()->NewFixedArray(1000000);
+ CcTest::CollectAllGarbage();
+ if (!heap->incremental_marking()->IsStopped()) return;
+ heap::SimulateIncrementalMarking(heap, false);
+ heap->concurrent_marking()->WaitForTasks();
+ heap->concurrent_marking()->EnsureCompleted();
+ CHECK_GE(heap->concurrent_marking()->TotalMarkedBytes(), root->Size());
+}
+
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 89c074b3c3..ab2ba1a53b 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -197,8 +197,8 @@ HEAP_TEST(TestNewSpaceRefsInCopiedCode) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
Code* tmp = nullptr;
heap->CopyCode(*code).To(&tmp);
@@ -219,8 +219,8 @@ static void CheckFindCodeObject(Isolate* isolate) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
CHECK(code->IsCode());
HeapObject* obj = HeapObject::cast(*code);
@@ -231,8 +231,8 @@ static void CheckFindCodeObject(Isolate* isolate) {
CHECK_EQ(*code, found);
}
- Handle<Code> copy = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> copy =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
HeapObject* obj_copy = HeapObject::cast(*copy);
Object* not_right = isolate->FindCodeObject(obj_copy->address() +
obj_copy->Size() / 2);
@@ -1367,22 +1367,6 @@ int CountNativeContexts() {
return count;
}
-
-// Count the number of user functions in the weak list of optimized
-// functions attached to a native context.
-static int CountOptimizedUserFunctions(v8::Local<v8::Context> context) {
- int count = 0;
- Handle<Context> icontext = v8::Utils::OpenHandle(*context);
- Object* object = icontext->get(Context::OPTIMIZED_FUNCTIONS_LIST);
- while (object->IsJSFunction() &&
- JSFunction::cast(object)->shared()->IsUserJavaScript()) {
- count++;
- object = JSFunction::cast(object)->next_function_link();
- }
- return count;
-}
-
-
TEST(TestInternalWeakLists) {
FLAG_always_opt = false;
FLAG_allow_natives_syntax = true;
@@ -1420,17 +1404,11 @@ TEST(TestInternalWeakLists) {
// Create a handle scope so no function objects get stuck in the outer
// handle scope.
HandleScope scope(isolate);
- CHECK_EQ(0, CountOptimizedUserFunctions(ctx[i]));
OptimizeEmptyFunction("f1");
- CHECK_EQ(1, CountOptimizedUserFunctions(ctx[i]));
OptimizeEmptyFunction("f2");
- CHECK_EQ(2, CountOptimizedUserFunctions(ctx[i]));
OptimizeEmptyFunction("f3");
- CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i]));
OptimizeEmptyFunction("f4");
- CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i]));
OptimizeEmptyFunction("f5");
- CHECK_EQ(5, CountOptimizedUserFunctions(ctx[i]));
// Remove function f1, and
CompileRun("f1=null");
@@ -1438,29 +1416,23 @@ TEST(TestInternalWeakLists) {
// Scavenge treats these references as strong.
for (int j = 0; j < 10; j++) {
CcTest::CollectGarbage(NEW_SPACE);
- CHECK_EQ(5, CountOptimizedUserFunctions(ctx[i]));
}
// Mark compact handles the weak references.
isolate->compilation_cache()->Clear();
CcTest::CollectAllGarbage();
- CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i]));
// Get rid of f3 and f5 in the same way.
CompileRun("f3=null");
for (int j = 0; j < 10; j++) {
CcTest::CollectGarbage(NEW_SPACE);
- CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i]));
}
CcTest::CollectAllGarbage();
- CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i]));
CompileRun("f5=null");
for (int j = 0; j < 10; j++) {
CcTest::CollectGarbage(NEW_SPACE);
- CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i]));
}
CcTest::CollectAllGarbage();
- CHECK_EQ(2, CountOptimizedUserFunctions(ctx[i]));
ctx[i]->Exit();
}
@@ -1491,94 +1463,6 @@ TEST(TestInternalWeakLists) {
}
-// Count the number of native contexts in the weak list of native contexts
-// causing a GC after the specified number of elements.
-static int CountNativeContextsWithGC(Isolate* isolate, int n) {
- Heap* heap = isolate->heap();
- int count = 0;
- Handle<Object> object(heap->native_contexts_list(), isolate);
- while (!object->IsUndefined(isolate)) {
- count++;
- if (count == n) CcTest::CollectAllGarbage();
- object =
- Handle<Object>(Context::cast(*object)->next_context_link(), isolate);
- }
- return count;
-}
-
-
-// Count the number of user functions in the weak list of optimized
-// functions attached to a native context causing a GC after the
-// specified number of elements.
-static int CountOptimizedUserFunctionsWithGC(v8::Local<v8::Context> context,
- int n) {
- int count = 0;
- Handle<Context> icontext = v8::Utils::OpenHandle(*context);
- Isolate* isolate = icontext->GetIsolate();
- Handle<Object> object(icontext->get(Context::OPTIMIZED_FUNCTIONS_LIST),
- isolate);
- while (object->IsJSFunction() &&
- Handle<JSFunction>::cast(object)->shared()->IsUserJavaScript()) {
- count++;
- if (count == n)
- isolate->heap()->CollectAllGarbage(
- i::Heap::kFinalizeIncrementalMarkingMask,
- i::GarbageCollectionReason::kTesting);
- object = Handle<Object>(
- Object::cast(JSFunction::cast(*object)->next_function_link()),
- isolate);
- }
- return count;
-}
-
-
-TEST(TestInternalWeakListsTraverseWithGC) {
- FLAG_always_opt = false;
- FLAG_allow_natives_syntax = true;
- v8::V8::Initialize();
-
- static const int kNumTestContexts = 10;
-
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
- v8::Local<v8::Context> ctx[kNumTestContexts];
- if (!isolate->use_optimizer()) return;
-
- CHECK_EQ(0, CountNativeContexts());
-
- // Create an number of contexts and check the length of the weak list both
- // with and without GCs while iterating the list.
- for (int i = 0; i < kNumTestContexts; i++) {
- ctx[i] = v8::Context::New(CcTest::isolate());
- CHECK_EQ(i + 1, CountNativeContexts());
- CHECK_EQ(i + 1, CountNativeContextsWithGC(isolate, i / 2 + 1));
- }
-
- ctx[0]->Enter();
-
- // Compile a number of functions the length of the weak list of optimized
- // functions both with and without GCs while iterating the list.
- CHECK_EQ(0, CountOptimizedUserFunctions(ctx[0]));
- OptimizeEmptyFunction("f1");
- CHECK_EQ(1, CountOptimizedUserFunctions(ctx[0]));
- CHECK_EQ(1, CountOptimizedUserFunctionsWithGC(ctx[0], 1));
- OptimizeEmptyFunction("f2");
- CHECK_EQ(2, CountOptimizedUserFunctions(ctx[0]));
- CHECK_EQ(2, CountOptimizedUserFunctionsWithGC(ctx[0], 1));
- OptimizeEmptyFunction("f3");
- CHECK_EQ(3, CountOptimizedUserFunctions(ctx[0]));
- CHECK_EQ(3, CountOptimizedUserFunctionsWithGC(ctx[0], 1));
- OptimizeEmptyFunction("f4");
- CHECK_EQ(4, CountOptimizedUserFunctions(ctx[0]));
- CHECK_EQ(4, CountOptimizedUserFunctionsWithGC(ctx[0], 2));
- OptimizeEmptyFunction("f5");
- CHECK_EQ(5, CountOptimizedUserFunctions(ctx[0]));
- CHECK_EQ(5, CountOptimizedUserFunctionsWithGC(ctx[0], 4));
-
- ctx[0]->Exit();
-}
-
-
TEST(TestSizeOfRegExpCode) {
if (!FLAG_regexp_optimization) return;
@@ -1825,11 +1709,8 @@ TEST(TestAlignedOverAllocation) {
if (double_misalignment) {
start = AlignOldSpace(kDoubleAligned, 0);
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned);
- // The object is aligned, and a filler object is created after.
+ // The object is aligned.
CHECK(IsAddressAligned(obj->address(), kDoubleAlignment));
- filler = HeapObject::FromAddress(start + kPointerSize);
- CHECK(obj != filler && filler->IsFiller() &&
- filler->Size() == kPointerSize);
// Try the opposite alignment case.
start = AlignOldSpace(kDoubleAligned, kPointerSize);
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned);
@@ -1837,18 +1718,15 @@ TEST(TestAlignedOverAllocation) {
filler = HeapObject::FromAddress(start);
CHECK(obj != filler);
CHECK(filler->IsFiller());
- CHECK(filler->Size() == kPointerSize);
+ CHECK_EQ(kPointerSize, filler->Size());
CHECK(obj != filler && filler->IsFiller() &&
filler->Size() == kPointerSize);
// Similarly for kDoubleUnaligned.
start = AlignOldSpace(kDoubleUnaligned, 0);
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
- // The object is aligned, and a filler object is created after.
+ // The object is aligned.
CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize));
- filler = HeapObject::FromAddress(start + kPointerSize);
- CHECK(obj != filler && filler->IsFiller() &&
- filler->Size() == kPointerSize);
// Try the opposite alignment case.
start = AlignOldSpace(kDoubleUnaligned, kPointerSize);
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
@@ -3493,7 +3371,7 @@ TEST(LargeObjectSlotRecording) {
// Allocate a large object.
int size = Max(1000000, kMaxRegularHeapObjectSize + KB);
- CHECK(size > kMaxRegularHeapObjectSize);
+ CHECK_LT(kMaxRegularHeapObjectSize, size);
Handle<FixedArray> lo = isolate->factory()->NewFixedArray(size, TENURED);
CHECK(heap->lo_space()->Contains(*lo));
@@ -3663,10 +3541,8 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
dependency = dependency->next_link();
dependency_group_count++;
}
-
- // TurboFan respects pretenuring feedback from allocation sites, Crankshaft
- // does not. Either is fine for the purposes of this test.
- CHECK(dependency_group_count == 1 || dependency_group_count == 2);
+ // Expect a dependent code object for transitioning and pretenuring.
+ CHECK_EQ(2, dependency_group_count);
}
// Now make sure that a gc should get rid of the function, even though we
@@ -4039,8 +3915,8 @@ static Handle<Code> DummyOptimizedCode(Isolate* isolate) {
masm.Drop(1);
masm.GetCode(isolate, &desc);
Handle<Object> undefined(isolate->heap()->undefined_value(), isolate);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::OPTIMIZED_FUNCTION), undefined);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::OPTIMIZED_FUNCTION, undefined);
CHECK(code->IsCode());
return code;
}
@@ -4326,21 +4202,14 @@ Handle<JSFunction> GetFunctionByName(Isolate* isolate, const char* name) {
return Handle<JSFunction>::cast(obj);
}
-void CheckIC(Handle<JSFunction> function, Code::Kind kind, int slot_index,
+void CheckIC(Handle<JSFunction> function, int slot_index,
InlineCacheState state) {
- CHECK(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
FeedbackVector* vector = function->feedback_vector();
FeedbackSlot slot(slot_index);
- if (kind == Code::LOAD_IC) {
- LoadICNexus nexus(vector, slot);
- CHECK_EQ(nexus.StateFromFeedback(), state);
- } else if (kind == Code::KEYED_LOAD_IC) {
- KeyedLoadICNexus nexus(vector, slot);
- CHECK_EQ(nexus.StateFromFeedback(), state);
- }
+ LoadICNexus nexus(vector, slot);
+ CHECK_EQ(nexus.StateFromFeedback(), state);
}
-
TEST(MonomorphicStaysMonomorphicAfterGC) {
if (FLAG_always_opt) return;
ManualGCScope manual_gc_scope;
@@ -4365,12 +4234,12 @@ TEST(MonomorphicStaysMonomorphicAfterGC) {
CompileRun("(testIC())");
}
CcTest::CollectAllGarbage();
- CheckIC(loadIC, Code::LOAD_IC, 0, MONOMORPHIC);
+ CheckIC(loadIC, 0, MONOMORPHIC);
{
v8::HandleScope scope(CcTest::isolate());
CompileRun("(testIC())");
}
- CheckIC(loadIC, Code::LOAD_IC, 0, MONOMORPHIC);
+ CheckIC(loadIC, 0, MONOMORPHIC);
}
@@ -4401,12 +4270,12 @@ TEST(PolymorphicStaysPolymorphicAfterGC) {
CompileRun("(testIC())");
}
CcTest::CollectAllGarbage();
- CheckIC(loadIC, Code::LOAD_IC, 0, POLYMORPHIC);
+ CheckIC(loadIC, 0, POLYMORPHIC);
{
v8::HandleScope scope(CcTest::isolate());
CompileRun("(testIC())");
}
- CheckIC(loadIC, Code::LOAD_IC, 0, POLYMORPHIC);
+ CheckIC(loadIC, 0, POLYMORPHIC);
}
@@ -4637,7 +4506,7 @@ TEST(Regress507979) {
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
// Let's not optimize the loop away.
- CHECK(obj->address() != nullptr);
+ CHECK_NOT_NULL(obj->address());
}
}
@@ -5039,8 +4908,8 @@ TEST(Regress1878) {
void AllocateInSpace(Isolate* isolate, size_t bytes, AllocationSpace space) {
- CHECK(bytes >= FixedArray::kHeaderSize);
- CHECK(bytes % kPointerSize == 0);
+ CHECK_LE(FixedArray::kHeaderSize, bytes);
+ CHECK_EQ(0, bytes % kPointerSize);
Factory* factory = isolate->factory();
HandleScope scope(isolate);
AlwaysAllocateScope always_allocate(isolate);
@@ -5172,8 +5041,8 @@ static void RemoveCodeAndGC(const v8::FunctionCallbackInfo<v8::Value>& args) {
Handle<Object> obj = v8::Utils::OpenHandle(*args[0]);
Handle<JSFunction> fun = Handle<JSFunction>::cast(obj);
fun->shared()->ClearBytecodeArray(); // Bytecode is code too.
- fun->ReplaceCode(*BUILTIN_CODE(isolate, CompileLazy));
- fun->shared()->ReplaceCode(*BUILTIN_CODE(isolate, CompileLazy));
+ fun->set_code(*BUILTIN_CODE(isolate, CompileLazy));
+ fun->shared()->set_code(*BUILTIN_CODE(isolate, CompileLazy));
CcTest::CollectAllAvailableGarbage();
}
@@ -5954,8 +5823,8 @@ Handle<Code> GenerateDummyImmovableCode(Isolate* isolate) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
const bool kImmovable = true;
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>(), kImmovable);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>(), kImmovable);
CHECK(code->IsCode());
return code;
@@ -5994,7 +5863,7 @@ HEAP_TEST(Regress5831) {
// Generate the code.
Handle<Code> code = GenerateDummyImmovableCode(isolate);
- CHECK(code->Size() <= i::kMaxRegularHeapObjectSize);
+ CHECK_GE(i::kMaxRegularHeapObjectSize, code->Size());
CHECK(!heap->code_space()->FirstPage()->Contains(code->address()));
// Ensure it's not in large object space.
diff --git a/deps/v8/test/cctest/heap/test-invalidated-slots.cc b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
index 4353fb0bff..f7900cb116 100644
--- a/deps/v8/test/cctest/heap/test-invalidated-slots.cc
+++ b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
@@ -20,6 +20,7 @@ namespace heap {
Page* HeapTester::AllocateByteArraysOnPage(
Heap* heap, std::vector<ByteArray*>* byte_arrays) {
+ PauseAllocationObserversScope pause_observers(heap);
const int kLength = 256 - ByteArray::kHeaderSize;
const int kSize = ByteArray::SizeFor(kLength);
CHECK_EQ(kSize, 256);
@@ -134,6 +135,7 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
}
HEAP_TEST(InvalidatedSlotsEvacuationCandidate) {
+ ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
std::vector<ByteArray*> byte_arrays;
diff --git a/deps/v8/test/cctest/heap/test-lab.cc b/deps/v8/test/cctest/heap/test-lab.cc
index d2f5ac2d54..f8ef6f5250 100644
--- a/deps/v8/test/cctest/heap/test-lab.cc
+++ b/deps/v8/test/cctest/heap/test-lab.cc
@@ -5,12 +5,9 @@
#include <vector>
#include "src/globals.h"
-#include "src/heap/heap.h"
-#include "src/heap/spaces.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/spaces-inl.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/heap/incremental-marking.h -> src/objects-inl.h
-#include "src/objects-inl.h"
+#include "src/objects.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/heap/test-mark-compact.cc b/deps/v8/test/cctest/heap/test-mark-compact.cc
index 6cf6fd1902..8f704b1a97 100644
--- a/deps/v8/test/cctest/heap/test-mark-compact.cc
+++ b/deps/v8/test/cctest/heap/test-mark-compact.cc
@@ -42,7 +42,6 @@
#include "src/global-handles.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
-#include "src/heap/sequential-marking-deque.h"
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
@@ -52,30 +51,6 @@ namespace v8 {
namespace internal {
namespace heap {
-TEST(SequentialMarkingDeque) {
- CcTest::InitializeVM();
- SequentialMarkingDeque s(CcTest::i_isolate()->heap());
- s.SetUp();
- s.StartUsing();
- Address original_address = reinterpret_cast<Address>(&s);
- Address current_address = original_address;
- while (!s.IsFull()) {
- s.Push(HeapObject::FromAddress(current_address));
- current_address += kPointerSize;
- }
-
- while (!s.IsEmpty()) {
- Address value = s.Pop()->address();
- current_address -= kPointerSize;
- CHECK_EQ(current_address, value);
- }
-
- CHECK_EQ(original_address, current_address);
- s.StopUsing();
- CcTest::i_isolate()->cancelable_task_manager()->CancelAndWait();
- s.TearDown();
-}
-
TEST(Promotion) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
diff --git a/deps/v8/test/cctest/heap/test-page-promotion.cc b/deps/v8/test/cctest/heap/test-page-promotion.cc
index 5eb2aec005..c9ad761b35 100644
--- a/deps/v8/test/cctest/heap/test-page-promotion.cc
+++ b/deps/v8/test/cctest/heap/test-page-promotion.cc
@@ -6,8 +6,6 @@
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/spaces-inl.h"
#include "src/isolate.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/factory.h -> src/objects-inl.h
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 73ecb28efe..f5a0083771 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -30,11 +30,8 @@
#include "src/base/platform/platform.h"
#include "src/factory.h"
#include "src/heap/spaces-inl.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/heap/incremental-marking.h -> src/objects-inl.h
#include "src/objects-inl.h"
#include "src/snapshot/snapshot.h"
-#include "src/v8.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
@@ -83,6 +80,7 @@ class TestCodeRangeScope {
DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
};
+namespace test_spaces {
static void VerifyMemoryChunk(Isolate* isolate,
Heap* heap,
@@ -242,7 +240,7 @@ TEST(MemoryAllocator) {
Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
- CHECK(memory_allocator != nullptr);
+ CHECK_NOT_NULL(memory_allocator);
CHECK(memory_allocator->SetUp(heap->MaxReserved(), 0));
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
@@ -706,6 +704,7 @@ TEST(ShrinkPageToHighWaterMarkTwoWordFiller) {
CHECK_EQ(0u, shrunk);
}
+} // namespace test_spaces
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
index de6ad3a717..d6a93fdda2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -181,12 +181,7 @@ void BytecodeExpectationsPrinter::PrintBytecodeOperand(
break;
case OperandType::kIdx: {
stream << 'U' << size_tag << '(';
- uint32_t idx = bytecode_iterator.GetIndexOperand(op_index);
- if (bytecode == Bytecode::kCallJSRuntime && op_index == 0) {
- stream << "%" << NameForNativeContextIntrinsicIndex(idx);
- } else {
- stream << idx;
- }
+ stream << bytecode_iterator.GetIndexOperand(op_index);
break;
}
case OperandType::kUImm:
@@ -215,6 +210,12 @@ void BytecodeExpectationsPrinter::PrintBytecodeOperand(
stream << "Runtime::k" << i::Runtime::FunctionForId(id)->name;
break;
}
+ case OperandType::kNativeContextIndex: {
+ stream << 'U' << size_tag << '(';
+ uint32_t idx = bytecode_iterator.GetNativeContextIndexOperand(op_index);
+ stream << "%" << NameForNativeContextIntrinsicIndex(idx);
+ break;
+ }
default:
UNREACHABLE();
}
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
index a5af50d6ca..e5de344960 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
@@ -39,12 +39,12 @@ bytecodes: [
B(LdaZero),
B(Star), R(1),
B(Ldar), R(0),
- /* 54 E> */ B(StaKeyedPropertySloppy), R(2), R(1), U8(2),
+ /* 54 E> */ B(StaKeyedProperty), R(2), R(1), U8(2),
B(LdaSmi), I8(1),
B(Star), R(1),
B(Ldar), R(0),
/* 59 E> */ B(AddSmi), I8(1), U8(0),
- B(StaKeyedPropertySloppy), R(2), R(1), U8(2),
+ B(StaKeyedProperty), R(2), R(1), U8(2),
B(Ldar), R(2),
/* 65 S> */ B(Return),
]
@@ -92,9 +92,9 @@ bytecodes: [
B(LdaZero),
B(Star), R(3),
B(Ldar), R(0),
- /* 56 E> */ B(StaKeyedPropertySloppy), R(4), R(3), U8(1),
+ /* 56 E> */ B(StaKeyedProperty), R(4), R(3), U8(1),
B(Ldar), R(4),
- B(StaKeyedPropertySloppy), R(2), R(1), U8(8),
+ B(StaKeyedProperty), R(2), R(1), U8(8),
B(LdaSmi), I8(1),
B(Star), R(1),
B(CreateArrayLiteral), U8(2), U8(4), U8(37),
@@ -103,9 +103,9 @@ bytecodes: [
B(Star), R(3),
B(Ldar), R(0),
/* 68 E> */ B(AddSmi), I8(2), U8(3),
- B(StaKeyedPropertySloppy), R(4), R(3), U8(5),
+ B(StaKeyedProperty), R(4), R(3), U8(5),
B(Ldar), R(4),
- B(StaKeyedPropertySloppy), R(2), R(1), U8(8),
+ B(StaKeyedProperty), R(2), R(1), U8(8),
B(Ldar), R(2),
/* 76 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
index dd55ae9856..925795a673 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
@@ -238,7 +238,7 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 44
+bytecode array length: 43
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(17),
@@ -249,8 +249,8 @@ bytecodes: [
/* 55 E> */ B(Add), R(1), U8(0),
B(Star), R(1),
B(Ldar), R(0),
- B(ToNumber), R(2), U8(1),
- B(Ldar), R(2),
+ B(ToNumber), U8(1),
+ B(Star), R(2),
B(Inc), U8(1),
B(Star), R(0),
B(Ldar), R(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index 5ec4d045ec..84fdd0f715 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -14,18 +14,16 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 237
+bytecode array length: 230
bytecodes: [
B(Ldar), R(0),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
B(PushContext), R(2),
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -121,8 +119,8 @@ bytecodes: [
/* 22 S> */ B(Return),
]
constant pool: [
- Smi [44],
- Smi [93],
+ Smi [37],
+ Smi [86],
Smi [15],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
@@ -132,8 +130,8 @@ constant pool: [
Smi [23],
]
handlers: [
- [47, 182, 190],
- [50, 143, 145],
+ [40, 175, 183],
+ [43, 136, 138],
]
---
@@ -143,18 +141,16 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 290
+bytecode array length: 283
bytecodes: [
B(Ldar), R(0),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
B(PushContext), R(2),
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -271,9 +267,9 @@ bytecodes: [
/* 31 S> */ B(Return),
]
constant pool: [
- Smi [44],
- Smi [97],
- Smi [146],
+ Smi [37],
+ Smi [90],
+ Smi [139],
Smi [15],
Smi [7],
Smi [15],
@@ -285,8 +281,8 @@ constant pool: [
Smi [23],
]
handlers: [
- [47, 235, 243],
- [50, 196, 198],
+ [40, 228, 236],
+ [43, 189, 191],
]
---
@@ -296,18 +292,16 @@ snippet: "
"
frame size: 22
parameter count: 1
-bytecode array length: 585
+bytecode array length: 571
bytecodes: [
B(Ldar), R(2),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
B(PushContext), R(11),
B(RestoreGeneratorState), R(2),
B(Star), R(10),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(11),
- B(CallRuntime), U16(Runtime::kAbort), R(11), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(10),
B(Mov), R(closure), R(11),
@@ -349,10 +343,8 @@ bytecodes: [
B(SwitchOnSmiNoFeedback), U8(7), U8(1), I8(1),
B(LdaSmi), I8(-2),
/* 36 E> */ B(TestEqualStrictNoFeedback), R(10),
- B(JumpIfTrue), U8(11),
- B(LdaSmi), I8(45),
- B(Star), R(19),
- B(CallRuntime), U16(Runtime::kAbort), R(19), U8(1),
+ B(JumpIfTrue), U8(4),
+ B(Abort), U8(43),
/* 31 S> */ B(LdaNamedProperty), R(4), U8(8), U8(7),
B(Star), R(19),
B(CallProperty0), R(19), R(4), U8(5),
@@ -392,7 +384,7 @@ bytecodes: [
B(Jump), U8(62),
B(LdaZero),
B(Star), R(6),
- B(JumpLoop), U8(123), I8(0),
+ B(JumpLoop), U8(116), I8(0),
B(Jump), U8(40),
B(Star), R(19),
B(Ldar), R(closure),
@@ -436,7 +428,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(18),
B(LdaConstant), U8(16),
B(Star), R(19),
@@ -546,14 +538,14 @@ bytecodes: [
/* 50 S> */ B(Return),
]
constant pool: [
- Smi [44],
- Smi [111],
- Smi [441],
+ Smi [37],
+ Smi [104],
+ Smi [427],
Smi [15],
Smi [7],
TUPLE2_TYPE,
SYMBOL_TYPE,
- Smi [85],
+ Smi [78],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -567,17 +559,17 @@ constant pool: [
Smi [6],
Smi [14],
FIXED_ARRAY_TYPE,
- Smi [455],
+ Smi [448],
Smi [6],
Smi [20],
Smi [23],
]
handlers: [
- [47, 530, 538],
- [50, 491, 493],
- [97, 291, 299],
- [100, 251, 253],
- [360, 370, 372],
+ [40, 516, 524],
+ [43, 477, 479],
+ [90, 277, 285],
+ [93, 237, 239],
+ [346, 356, 358],
]
---
@@ -588,18 +580,16 @@ snippet: "
"
frame size: 17
parameter count: 1
-bytecode array length: 577
+bytecode array length: 560
bytecodes: [
B(Ldar), R(0),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
B(PushContext), R(2),
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(5), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -650,10 +640,8 @@ bytecodes: [
B(SwitchOnSmiNoFeedback), U8(10), U8(3), I8(1),
B(LdaSmi), I8(-2),
B(TestEqualStrictNoFeedback), R(1),
- B(JumpIfTrue), U8(11),
- B(LdaSmi), I8(45),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
+ B(JumpIfTrue), U8(4),
+ B(Abort), U8(43),
B(Ldar), R(7),
B(SwitchOnSmiNoFeedback), U8(13), U8(2), I8(1),
B(LdaNamedProperty), R(8), U8(15), U8(8),
@@ -725,7 +713,7 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
B(LdaNamedProperty), R(6), U8(18), U8(12),
- B(JumpIfToBooleanTrue), U8(50),
+ B(JumpIfToBooleanTrue), U8(47),
B(LdaNamedProperty), R(6), U8(19), U8(14),
B(Star), R(15),
B(LdaFalse),
@@ -741,7 +729,7 @@ bytecodes: [
B(Star), R(9),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(7),
- B(Wide), B(JumpLoop), U16(260), I16(0),
+ B(JumpLoop), U8(252), I8(0),
B(LdaNamedProperty), R(6), U8(19), U8(14),
B(Star), R(8),
B(LdaSmi), I8(1),
@@ -821,19 +809,19 @@ bytecodes: [
/* 60 S> */ B(Return),
]
constant pool: [
- Smi [44],
- Smi [132],
- Smi [132],
- Smi [132],
- Smi [433],
+ Smi [37],
+ Smi [125],
+ Smi [125],
+ Smi [125],
+ Smi [416],
Smi [15],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["g"],
SYMBOL_TYPE,
SYMBOL_TYPE,
- Smi [237],
- Smi [109],
- Smi [169],
+ Smi [230],
+ Smi [102],
+ Smi [162],
Smi [17],
Smi [42],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
@@ -843,14 +831,14 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
FIXED_ARRAY_TYPE,
- Smi [447],
- Smi [327],
+ Smi [437],
+ Smi [324],
Smi [6],
Smi [20],
Smi [23],
]
handlers: [
- [47, 522, 530],
- [50, 483, 485],
+ [40, 505, 513],
+ [43, 466, 468],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
index 751c4f27a4..1315be378b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
@@ -65,33 +65,27 @@ handlers: [
snippet: "
Math.max(0, ...[1, 2, 3], 4);
"
-frame size: 8
+frame size: 6
parameter count: 1
-bytecode array length: 60
+bytecode array length: 51
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(LdaUndefined),
- B(Star), R(1),
- /* 34 E> */ B(LdaGlobal), U8(0), U8(0),
+ /* 34 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(0),
B(LdaNamedProperty), R(0), U8(1), U8(2),
- B(Star), R(2),
- B(LdaUndefined),
- B(Star), R(4),
+ B(Star), R(1),
B(CreateArrayLiteral), U8(2), U8(4), U8(37),
- B(Star), R(5),
- B(LdaUndefined),
- B(Star), R(6),
+ B(Star), R(3),
B(CreateArrayLiteral), U8(3), U8(5), U8(37),
- B(Star), R(7),
- B(CallJSRuntime), U8(%spread_iterable), R(6), U8(2),
- B(Star), R(6),
- B(CreateArrayLiteral), U8(4), U8(6), U8(37),
- B(Star), R(7),
- B(CallJSRuntime), U8(%spread_arguments), R(4), U8(4),
B(Star), R(4),
- B(Mov), R(0), R(3),
- B(CallJSRuntime), U8(%reflect_apply), R(1), U8(4),
+ B(CallJSRuntime), U8(%spread_iterable), R(4), U8(1),
+ B(Star), R(4),
+ B(CreateArrayLiteral), U8(4), U8(6), U8(37),
+ B(Star), R(5),
+ B(CallJSRuntime), U8(%spread_arguments), R(3), U8(3),
+ B(Star), R(3),
+ B(Mov), R(0), R(2),
+ B(CallJSRuntime), U8(%reflect_apply), R(1), U8(3),
B(LdaUndefined),
/* 64 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
index 83911ce4e2..cb201ed8e6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
@@ -70,16 +70,14 @@ snippet: "
function f() { return %spread_iterable([1]) }
f();
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 15
+bytecode array length: 12
bytecodes: [
/* 10 E> */ B(StackCheck),
- /* 15 S> */ B(LdaUndefined),
+ /* 15 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(0),
- B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(1),
- B(CallJSRuntime), U8(%spread_iterable), R(0), U8(2),
+ B(CallJSRuntime), U8(%spread_iterable), R(0), U8(1),
/* 43 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
index db0f4772c3..ea44a8a040 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
@@ -123,7 +123,7 @@ bytecodes: [
/* 128 S> */ B(Ldar), R(this),
B(ThrowSuperNotCalledIfHole),
B(LdaSmi), I8(2),
- /* 136 E> */ B(StaNamedPropertyStrict), R(2), U8(0), U8(2),
+ /* 136 E> */ B(StaNamedProperty), R(2), U8(0), U8(2),
B(Ldar), R(this),
B(ThrowSuperNotCalledIfHole),
/* 141 S> */ B(Return),
@@ -164,7 +164,7 @@ bytecodes: [
/* 126 S> */ B(Ldar), R(this),
B(ThrowSuperNotCalledIfHole),
B(LdaSmi), I8(2),
- /* 134 E> */ B(StaNamedPropertyStrict), R(2), U8(0), U8(2),
+ /* 134 E> */ B(StaNamedProperty), R(2), U8(0), U8(2),
B(Ldar), R(this),
B(ThrowSuperNotCalledIfHole),
/* 139 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
index ea761f5dfa..f2d00b28e1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
@@ -60,7 +60,7 @@ bytecodes: [
B(Mov), R(1), R(0),
/* 54 S> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
B(MulSmi), I8(2), U8(3),
- /* 61 E> */ B(StaNamedPropertySloppy), R(1), U8(1), U8(4),
+ /* 61 E> */ B(StaNamedProperty), R(1), U8(1), U8(4),
B(LdaUndefined),
/* 67 S> */ B(Return),
]
@@ -86,7 +86,7 @@ bytecodes: [
B(Star), R(2),
B(LdaKeyedProperty), R(1), U8(1),
B(BitwiseXorSmi), I8(2), U8(3),
- /* 57 E> */ B(StaKeyedPropertySloppy), R(1), R(2), U8(4),
+ /* 57 E> */ B(StaKeyedProperty), R(1), R(2), U8(4),
B(LdaUndefined),
/* 63 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
index c259f53f64..2aefc7a142 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
@@ -31,13 +31,13 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(ToNumber), R(1), U8(0),
- B(Ldar), R(1),
+ /* 45 S> */ B(ToNumber), U8(0),
+ B(Star), R(1),
B(Inc), U8(0),
B(Star), R(0),
B(Ldar), R(1),
@@ -74,13 +74,13 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(ToNumber), R(1), U8(0),
- B(Ldar), R(1),
+ /* 45 S> */ B(ToNumber), U8(0),
+ B(Star), R(1),
B(Dec), U8(0),
B(Star), R(0),
B(Ldar), R(1),
@@ -97,16 +97,16 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 27
+bytecode array length: 26
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
B(Mov), R(1), R(0),
/* 54 S> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
- B(ToNumber), R(2), U8(5),
- B(Ldar), R(2),
+ B(ToNumber), U8(5),
+ B(Star), R(2),
B(Inc), U8(5),
- /* 66 E> */ B(StaNamedPropertySloppy), R(1), U8(1), U8(3),
+ /* 66 E> */ B(StaNamedProperty), R(1), U8(1), U8(3),
B(Ldar), R(2),
/* 69 S> */ B(Return),
]
@@ -130,7 +130,7 @@ bytecodes: [
B(Mov), R(1), R(0),
/* 54 S> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
B(Dec), U8(5),
- /* 65 E> */ B(StaNamedPropertySloppy), R(1), U8(1), U8(3),
+ /* 65 E> */ B(StaNamedProperty), R(1), U8(1), U8(3),
/* 69 S> */ B(Return),
]
constant pool: [
@@ -146,7 +146,7 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 32
+bytecode array length: 31
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaConstant), U8(0),
@@ -155,10 +155,10 @@ bytecodes: [
B(Mov), R(2), R(1),
/* 72 S> */ B(Ldar), R(0),
/* 81 E> */ B(LdaKeyedProperty), R(2), U8(1),
- B(ToNumber), R(4), U8(5),
- B(Ldar), R(4),
+ B(ToNumber), U8(5),
+ B(Star), R(4),
B(Dec), U8(5),
- /* 86 E> */ B(StaKeyedPropertySloppy), R(2), R(0), U8(3),
+ /* 86 E> */ B(StaKeyedProperty), R(2), R(0), U8(3),
B(Ldar), R(4),
/* 89 S> */ B(Return),
]
@@ -185,7 +185,7 @@ bytecodes: [
/* 72 S> */ B(Ldar), R(0),
/* 83 E> */ B(LdaKeyedProperty), R(2), U8(1),
B(Inc), U8(5),
- /* 87 E> */ B(StaKeyedPropertySloppy), R(2), R(0), U8(3),
+ /* 87 E> */ B(StaKeyedProperty), R(2), R(0), U8(3),
/* 89 S> */ B(Return),
]
constant pool: [
@@ -227,7 +227,7 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 29
+bytecode array length: 28
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
@@ -237,8 +237,8 @@ bytecodes: [
/* 53 S> */ B(CreateClosure), U8(0), U8(0), U8(2),
B(Star), R(0),
/* 78 S> */ B(LdaCurrentContextSlot), U8(4),
- B(ToNumber), R(2), U8(1),
- B(Ldar), R(2),
+ B(ToNumber), U8(1),
+ B(Star), R(2),
B(Dec), U8(1),
/* 86 E> */ B(StaCurrentContextSlot), U8(4),
B(Ldar), R(2),
@@ -256,7 +256,7 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 29
+bytecode array length: 28
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(1),
@@ -264,12 +264,12 @@ bytecodes: [
/* 55 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(1),
/* 63 S> */ B(Ldar), R(0),
- B(ToNumber), R(3), U8(1),
- B(Ldar), R(3),
+ B(ToNumber), U8(1),
+ B(Star), R(3),
B(Inc), U8(1),
B(Star), R(0),
B(LdaSmi), I8(2),
- /* 79 E> */ B(StaKeyedPropertySloppy), R(1), R(3), U8(2),
+ /* 79 E> */ B(StaKeyedProperty), R(1), R(3), U8(2),
/* 83 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index f363e3d168..f02cb544d0 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -16,18 +16,16 @@ snippet: "
"
frame size: 23
parameter count: 1
-bytecode array length: 618
+bytecode array length: 589
bytecodes: [
B(Ldar), R(2),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
B(PushContext), R(12),
B(RestoreGeneratorState), R(2),
B(Star), R(11),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(11),
B(Mov), R(closure), R(12),
@@ -35,9 +33,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
B(Star), R(2),
/* 16 E> */ B(StackCheck),
- B(LdaUndefined),
- B(Star), R(12),
- B(CallJSRuntime), U8(%async_function_promise_create), R(12), U8(1),
+ B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
B(Star), R(10),
B(Mov), R(context), R(14),
B(Mov), R(context), R(15),
@@ -64,10 +60,8 @@ bytecodes: [
B(SwitchOnSmiNoFeedback), U8(6), U8(1), I8(0),
B(LdaSmi), I8(-2),
/* 43 E> */ B(TestEqualStrictNoFeedback), R(11),
- B(JumpIfTrue), U8(11),
- B(LdaSmi), I8(45),
- B(Star), R(20),
- B(CallRuntime), U16(Runtime::kAbort), R(20), U8(1),
+ B(JumpIfTrue), U8(4),
+ B(Abort), U8(43),
/* 40 S> */ B(LdaNamedProperty), R(4), U8(7), U8(11),
B(Star), R(20),
B(CallProperty0), R(20), R(4), U8(9),
@@ -105,7 +99,7 @@ bytecodes: [
B(Mov), R(3), R(0),
B(LdaZero),
B(Star), R(6),
- B(JumpLoop), U8(118), I8(0),
+ B(JumpLoop), U8(111), I8(0),
B(Jump), U8(40),
B(Star), R(20),
B(Ldar), R(closure),
@@ -149,7 +143,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(19),
B(LdaConstant), U8(13),
B(Star), R(20),
@@ -223,16 +217,14 @@ bytecodes: [
B(Ldar), R(17),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(16),
- B(LdaUndefined),
- B(Star), R(18),
- B(Mov), R(10), R(17),
- B(CallJSRuntime), U8(%promise_resolve), R(16), U8(3),
+ B(Star), R(17),
+ B(Mov), R(10), R(16),
+ B(CallJSRuntime), U8(%promise_resolve), R(16), U8(2),
B(LdaZero),
B(Star), R(12),
B(Mov), R(10), R(13),
- B(Jump), U8(61),
- B(Jump), U8(45),
+ B(Jump), U8(58),
+ B(Jump), U8(42),
B(Star), R(16),
B(Ldar), R(closure),
B(CreateCatchContext), R(16), U8(10), U8(15),
@@ -241,14 +233,12 @@ bytecodes: [
B(SetPendingMessage),
B(Ldar), R(15),
B(PushContext), R(16),
- B(LdaUndefined),
- B(Star), R(17),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(19),
+ B(Star), R(18),
B(LdaFalse),
- B(Star), R(20),
- B(Mov), R(10), R(18),
- B(CallJSRuntime), U8(%promise_internal_reject), R(17), U8(4),
+ B(Star), R(19),
+ B(Mov), R(10), R(17),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(17), U8(3),
B(PopContext), R(16),
B(LdaZero),
B(Star), R(12),
@@ -264,10 +254,7 @@ bytecodes: [
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(14),
- B(LdaUndefined),
- B(Star), R(15),
- B(Mov), R(10), R(16),
- B(CallJSRuntime), U8(%async_function_promise_release), R(15), U8(2),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(10), U8(1),
B(Ldar), R(14),
B(SetPendingMessage),
B(Ldar), R(12),
@@ -281,13 +268,13 @@ bytecodes: [
/* 57 S> */ B(Return),
]
constant pool: [
- Smi [99],
- Smi [356],
- Smi [436],
+ Smi [89],
+ Smi [339],
+ Smi [419],
TUPLE2_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
- Smi [47],
+ Smi [40],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -301,11 +288,11 @@ constant pool: [
Smi [9],
]
handlers: [
- [56, 571, 579],
- [59, 526, 528],
- [65, 274, 282],
- [68, 234, 236],
- [342, 400, 402],
+ [46, 548, 556],
+ [49, 506, 508],
+ [55, 257, 265],
+ [58, 217, 219],
+ [325, 383, 385],
]
---
@@ -317,18 +304,16 @@ snippet: "
"
frame size: 23
parameter count: 1
-bytecode array length: 650
+bytecode array length: 618
bytecodes: [
B(Ldar), R(2),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
B(PushContext), R(12),
B(RestoreGeneratorState), R(2),
B(Star), R(11),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(11),
B(Mov), R(closure), R(12),
@@ -336,9 +321,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
B(Star), R(2),
/* 16 E> */ B(StackCheck),
- B(LdaUndefined),
- B(Star), R(12),
- B(CallJSRuntime), U8(%async_function_promise_create), R(12), U8(1),
+ B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
B(Star), R(10),
B(Mov), R(context), R(14),
B(Mov), R(context), R(15),
@@ -365,10 +348,8 @@ bytecodes: [
B(SwitchOnSmiNoFeedback), U8(6), U8(1), I8(0),
B(LdaSmi), I8(-2),
/* 43 E> */ B(TestEqualStrictNoFeedback), R(11),
- B(JumpIfTrue), U8(11),
- B(LdaSmi), I8(45),
- B(Star), R(20),
- B(CallRuntime), U16(Runtime::kAbort), R(20), U8(1),
+ B(JumpIfTrue), U8(4),
+ B(Abort), U8(43),
/* 40 S> */ B(LdaNamedProperty), R(4), U8(7), U8(11),
B(Star), R(20),
B(CallProperty0), R(20), R(4), U8(9),
@@ -451,7 +432,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(19),
B(LdaConstant), U8(13),
B(Star), R(20),
@@ -525,20 +506,18 @@ bytecodes: [
B(LdaZero),
B(Star), R(12),
B(Mov), R(17), R(13),
- B(Jump), U8(87),
+ B(Jump), U8(81),
B(Ldar), R(17),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(16),
- B(LdaUndefined),
- B(Star), R(18),
- B(Mov), R(10), R(17),
- B(CallJSRuntime), U8(%promise_resolve), R(16), U8(3),
+ B(Star), R(17),
+ B(Mov), R(10), R(16),
+ B(CallJSRuntime), U8(%promise_resolve), R(16), U8(2),
B(LdaSmi), I8(1),
B(Star), R(12),
B(Mov), R(10), R(13),
- B(Jump), U8(62),
- B(Jump), U8(46),
+ B(Jump), U8(59),
+ B(Jump), U8(43),
B(Star), R(16),
B(Ldar), R(closure),
B(CreateCatchContext), R(16), U8(10), U8(17),
@@ -547,14 +526,12 @@ bytecodes: [
B(SetPendingMessage),
B(Ldar), R(15),
B(PushContext), R(16),
- B(LdaUndefined),
- B(Star), R(17),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(19),
+ B(Star), R(18),
B(LdaFalse),
- B(Star), R(20),
- B(Mov), R(10), R(18),
- B(CallJSRuntime), U8(%promise_internal_reject), R(17), U8(4),
+ B(Star), R(19),
+ B(Mov), R(10), R(17),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(17), U8(3),
B(PopContext), R(16),
B(LdaSmi), I8(1),
B(Star), R(12),
@@ -570,20 +547,15 @@ bytecodes: [
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(14),
- B(LdaUndefined),
- B(Star), R(15),
- B(Mov), R(10), R(16),
- B(CallJSRuntime), U8(%async_function_promise_release), R(15), U8(2),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(10), U8(1),
B(Ldar), R(14),
B(SetPendingMessage),
B(Ldar), R(12),
B(SwitchOnSmiNoFeedback), U8(18), U8(3), I8(0),
- B(Jump), U8(24),
- B(LdaUndefined),
- B(Star), R(15),
- B(Mov), R(10), R(16),
- B(Mov), R(13), R(17),
- B(CallJSRuntime), U8(%promise_resolve), R(15), U8(3),
+ B(Jump), U8(21),
+ B(Mov), R(10), R(15),
+ B(Mov), R(13), R(16),
+ B(CallJSRuntime), U8(%promise_resolve), R(15), U8(2),
B(Ldar), R(10),
/* 68 S> */ B(Return),
B(Ldar), R(13),
@@ -594,13 +566,13 @@ bytecodes: [
/* 68 S> */ B(Return),
]
constant pool: [
- Smi [99],
- Smi [359],
- Smi [439],
+ Smi [89],
+ Smi [342],
+ Smi [422],
TUPLE2_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
- Smi [47],
+ Smi [40],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -613,15 +585,15 @@ constant pool: [
Smi [14],
FIXED_ARRAY_TYPE,
Smi [6],
+ Smi [19],
Smi [22],
- Smi [25],
]
handlers: [
- [56, 587, 595],
- [59, 541, 543],
- [65, 276, 284],
- [68, 236, 238],
- [345, 403, 405],
+ [46, 564, 572],
+ [49, 521, 523],
+ [55, 259, 267],
+ [58, 219, 221],
+ [328, 386, 388],
]
---
@@ -636,18 +608,16 @@ snippet: "
"
frame size: 23
parameter count: 1
-bytecode array length: 636
+bytecode array length: 607
bytecodes: [
B(Ldar), R(2),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
B(PushContext), R(12),
B(RestoreGeneratorState), R(2),
B(Star), R(11),
B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(11),
B(Mov), R(closure), R(12),
@@ -655,9 +625,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
B(Star), R(2),
/* 16 E> */ B(StackCheck),
- B(LdaUndefined),
- B(Star), R(12),
- B(CallJSRuntime), U8(%async_function_promise_create), R(12), U8(1),
+ B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
B(Star), R(10),
B(Mov), R(context), R(14),
B(Mov), R(context), R(15),
@@ -684,10 +652,8 @@ bytecodes: [
B(SwitchOnSmiNoFeedback), U8(6), U8(1), I8(0),
B(LdaSmi), I8(-2),
/* 43 E> */ B(TestEqualStrictNoFeedback), R(11),
- B(JumpIfTrue), U8(11),
- B(LdaSmi), I8(45),
- B(Star), R(20),
- B(CallRuntime), U16(Runtime::kAbort), R(20), U8(1),
+ B(JumpIfTrue), U8(4),
+ B(Abort), U8(43),
/* 40 S> */ B(LdaNamedProperty), R(4), U8(7), U8(11),
B(Star), R(20),
B(CallProperty0), R(20), R(4), U8(9),
@@ -733,7 +699,7 @@ bytecodes: [
/* 103 S> */ B(Jump), U8(8),
B(LdaZero),
B(Star), R(6),
- B(JumpLoop), U8(136), I8(0),
+ B(JumpLoop), U8(129), I8(0),
B(Jump), U8(40),
B(Star), R(20),
B(Ldar), R(closure),
@@ -777,7 +743,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(19),
B(LdaConstant), U8(13),
B(Star), R(20),
@@ -851,16 +817,14 @@ bytecodes: [
B(Ldar), R(17),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(16),
- B(LdaUndefined),
- B(Star), R(18),
- B(Mov), R(10), R(17),
- B(CallJSRuntime), U8(%promise_resolve), R(16), U8(3),
+ B(Star), R(17),
+ B(Mov), R(10), R(16),
+ B(CallJSRuntime), U8(%promise_resolve), R(16), U8(2),
B(LdaZero),
B(Star), R(12),
B(Mov), R(10), R(13),
- B(Jump), U8(61),
- B(Jump), U8(45),
+ B(Jump), U8(58),
+ B(Jump), U8(42),
B(Star), R(16),
B(Ldar), R(closure),
B(CreateCatchContext), R(16), U8(10), U8(15),
@@ -869,14 +833,12 @@ bytecodes: [
B(SetPendingMessage),
B(Ldar), R(15),
B(PushContext), R(16),
- B(LdaUndefined),
- B(Star), R(17),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(19),
+ B(Star), R(18),
B(LdaFalse),
- B(Star), R(20),
- B(Mov), R(10), R(18),
- B(CallJSRuntime), U8(%promise_internal_reject), R(17), U8(4),
+ B(Star), R(19),
+ B(Mov), R(10), R(17),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(17), U8(3),
B(PopContext), R(16),
B(LdaZero),
B(Star), R(12),
@@ -892,10 +854,7 @@ bytecodes: [
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(14),
- B(LdaUndefined),
- B(Star), R(15),
- B(Mov), R(10), R(16),
- B(CallJSRuntime), U8(%async_function_promise_release), R(15), U8(2),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(10), U8(1),
B(Ldar), R(14),
B(SetPendingMessage),
B(Ldar), R(12),
@@ -909,13 +868,13 @@ bytecodes: [
/* 114 S> */ B(Return),
]
constant pool: [
- Smi [99],
- Smi [374],
- Smi [454],
+ Smi [89],
+ Smi [357],
+ Smi [437],
TUPLE2_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
- Smi [47],
+ Smi [40],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -929,11 +888,11 @@ constant pool: [
Smi [9],
]
handlers: [
- [56, 589, 597],
- [59, 544, 546],
- [65, 292, 300],
- [68, 252, 254],
- [360, 418, 420],
+ [46, 566, 574],
+ [49, 524, 526],
+ [55, 275, 283],
+ [58, 235, 237],
+ [343, 401, 403],
]
---
@@ -946,12 +905,10 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 435
+bytecode array length: 417
bytecodes: [
/* 16 E> */ B(StackCheck),
- B(LdaUndefined),
- B(Star), R(9),
- B(CallJSRuntime), U8(%async_function_promise_create), R(9), U8(1),
+ B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
B(Star), R(8),
B(Mov), R(context), R(11),
B(Mov), R(context), R(12),
@@ -984,7 +941,7 @@ bytecodes: [
B(LdaSmi), I8(2),
B(Star), R(4),
B(Ldar), R(5),
- B(StaNamedPropertySloppy), R(1), U8(6), U8(14),
+ B(StaNamedProperty), R(1), U8(6), U8(14),
/* 53 E> */ B(StackCheck),
/* 87 S> */ B(LdaNamedProperty), R(1), U8(6), U8(16),
B(Star), R(14),
@@ -1034,7 +991,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(16),
B(LdaConstant), U8(10),
B(Star), R(17),
@@ -1071,20 +1028,18 @@ bytecodes: [
B(LdaZero),
B(Star), R(9),
B(Mov), R(14), R(10),
- B(Jump), U8(87),
+ B(Jump), U8(81),
B(Ldar), R(14),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(13),
- B(LdaUndefined),
- B(Star), R(15),
- B(Mov), R(8), R(14),
- B(CallJSRuntime), U8(%promise_resolve), R(13), U8(3),
+ B(Star), R(14),
+ B(Mov), R(8), R(13),
+ B(CallJSRuntime), U8(%promise_resolve), R(13), U8(2),
B(LdaSmi), I8(1),
B(Star), R(9),
B(Mov), R(8), R(10),
- B(Jump), U8(62),
- B(Jump), U8(46),
+ B(Jump), U8(59),
+ B(Jump), U8(43),
B(Star), R(13),
B(Ldar), R(closure),
B(CreateCatchContext), R(13), U8(7), U8(14),
@@ -1093,14 +1048,12 @@ bytecodes: [
B(SetPendingMessage),
B(Ldar), R(12),
B(PushContext), R(13),
- B(LdaUndefined),
- B(Star), R(14),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(16),
+ B(Star), R(15),
B(LdaFalse),
- B(Star), R(17),
- B(Mov), R(8), R(15),
- B(CallJSRuntime), U8(%promise_internal_reject), R(14), U8(4),
+ B(Star), R(16),
+ B(Mov), R(8), R(14),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(14), U8(3),
B(PopContext), R(13),
B(LdaSmi), I8(1),
B(Star), R(9),
@@ -1116,20 +1069,15 @@ bytecodes: [
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(11),
- B(LdaUndefined),
- B(Star), R(12),
- B(Mov), R(8), R(13),
- B(CallJSRuntime), U8(%async_function_promise_release), R(12), U8(2),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(8), U8(1),
B(Ldar), R(11),
B(SetPendingMessage),
B(Ldar), R(9),
B(SwitchOnSmiNoFeedback), U8(15), U8(3), I8(0),
- B(Jump), U8(24),
- B(LdaUndefined),
- B(Star), R(12),
- B(Mov), R(8), R(13),
- B(Mov), R(10), R(14),
- B(CallJSRuntime), U8(%promise_resolve), R(12), U8(3),
+ B(Jump), U8(21),
+ B(Mov), R(8), R(12),
+ B(Mov), R(10), R(13),
+ B(CallJSRuntime), U8(%promise_resolve), R(12), U8(2),
B(Ldar), R(8),
/* 96 S> */ B(Return),
B(Ldar), R(10),
@@ -1156,14 +1104,14 @@ constant pool: [
Smi [14],
FIXED_ARRAY_TYPE,
Smi [6],
+ Smi [19],
Smi [22],
- Smi [25],
]
handlers: [
- [13, 372, 380],
- [16, 326, 328],
- [30, 156, 164],
- [33, 116, 118],
- [225, 235, 237],
+ [10, 363, 371],
+ [13, 320, 322],
+ [27, 153, 161],
+ [30, 113, 115],
+ [222, 232, 234],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
index 4bca1ed747..55f94ab321 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
@@ -63,15 +63,16 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 44
+bytecode array length: 46
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 68 S> */ B(JumpIfUndefined), U8(37),
- B(JumpIfNull), U8(35),
+ /* 68 S> */ B(JumpIfUndefined), U8(39),
+ B(JumpIfNull), U8(37),
B(ToObject), R(3),
- B(ForInPrepare), R(3), R(4),
+ B(ForInEnumerate), R(3),
+ B(ForInPrepare), R(4), U8(0),
B(LdaZero),
B(Star), R(7),
/* 63 S> */ B(ForInContinue), R(7), R(6),
@@ -101,16 +102,17 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 56
+bytecode array length: 58
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 59 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(JumpIfUndefined), U8(46),
- B(JumpIfNull), U8(44),
+ B(JumpIfUndefined), U8(48),
+ B(JumpIfNull), U8(46),
B(ToObject), R(3),
- B(ForInPrepare), R(3), R(4),
+ B(ForInEnumerate), R(3),
+ B(ForInPrepare), R(4), U8(2),
B(LdaZero),
B(Star), R(7),
/* 54 S> */ B(ForInContinue), R(7), R(6),
@@ -146,16 +148,17 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 85
+bytecode array length: 87
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
B(Mov), R(1), R(0),
/* 77 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
- B(JumpIfUndefined), U8(70),
- B(JumpIfNull), U8(68),
+ B(JumpIfUndefined), U8(72),
+ B(JumpIfNull), U8(70),
B(ToObject), R(1),
- B(ForInPrepare), R(1), R(2),
+ B(ForInEnumerate), R(1),
+ B(ForInPrepare), R(2), U8(12),
B(LdaZero),
B(Star), R(5),
/* 68 S> */ B(ForInContinue), R(5), R(4),
@@ -164,7 +167,7 @@ bytecodes: [
B(JumpIfUndefined), U8(41),
B(Star), R(6),
B(Ldar), R(6),
- /* 67 E> */ B(StaNamedPropertySloppy), R(0), U8(2), U8(10),
+ /* 67 E> */ B(StaNamedProperty), R(0), U8(2), U8(10),
/* 62 E> */ B(StackCheck),
/* 100 S> */ B(LdaNamedProperty), R(0), U8(2), U8(4),
B(Star), R(6),
@@ -199,16 +202,17 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 62
+bytecode array length: 64
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(0),
/* 72 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
- B(JumpIfUndefined), U8(49),
- B(JumpIfNull), U8(47),
+ B(JumpIfUndefined), U8(51),
+ B(JumpIfNull), U8(49),
B(ToObject), R(1),
- B(ForInPrepare), R(1), R(2),
+ B(ForInEnumerate), R(1),
+ B(ForInPrepare), R(2), U8(8),
B(LdaZero),
B(Star), R(5),
/* 65 S> */ B(ForInContinue), R(5), R(4),
@@ -219,7 +223,7 @@ bytecodes: [
B(LdaZero),
B(Star), R(8),
B(Ldar), R(6),
- /* 64 E> */ B(StaKeyedPropertySloppy), R(0), R(8), U8(6),
+ /* 64 E> */ B(StaKeyedProperty), R(0), R(8), U8(6),
/* 59 E> */ B(StackCheck),
/* 83 S> */ B(LdaSmi), I8(3),
/* 91 E> */ B(LdaKeyedProperty), R(0), U8(4),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index 91aace0208..9ef001a264 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -86,7 +86,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(11),
B(LdaConstant), U8(8),
B(Star), R(12),
@@ -227,7 +227,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(12),
B(LdaConstant), U8(8),
B(Star), R(13),
@@ -380,7 +380,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(11),
B(LdaConstant), U8(8),
B(Star), R(12),
@@ -476,7 +476,7 @@ bytecodes: [
B(LdaSmi), I8(2),
B(Star), R(3),
B(Ldar), R(4),
- B(StaNamedPropertySloppy), R(0), U8(6), U8(14),
+ B(StaNamedProperty), R(0), U8(6), U8(14),
/* 62 E> */ B(StackCheck),
/* 96 S> */ B(LdaNamedProperty), R(0), U8(6), U8(16),
B(Star), R(8),
@@ -523,7 +523,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(10),
B(LdaConstant), U8(10),
B(Star), R(11),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index 3363e94ee9..af992d39ec 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -90,7 +90,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(13),
B(LdaConstant), U8(7),
B(Star), R(14),
@@ -268,7 +268,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(13),
B(LdaConstant), U8(11),
B(Star), R(14),
@@ -422,7 +422,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(11),
B(LdaConstant), U8(9),
B(Star), R(12),
@@ -524,7 +524,7 @@ bytecodes: [
B(JumpIfUndefined), U8(6),
B(Ldar), R(6),
B(JumpIfNotNull), U8(16),
- B(LdaSmi), I8(63),
+ B(LdaSmi), I8(67),
B(Star), R(17),
B(LdaConstant), U8(4),
B(Star), R(18),
@@ -580,7 +580,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(16),
B(LdaConstant), U8(9),
B(Star), R(17),
@@ -647,18 +647,16 @@ snippet: "
"
frame size: 19
parameter count: 2
-bytecode array length: 348
+bytecode array length: 341
bytecodes: [
B(Ldar), R(3),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(3), U8(1),
B(PushContext), R(12),
B(RestoreGeneratorState), R(3),
B(Star), R(11),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(11),
B(CreateFunctionContext), U8(1),
@@ -756,7 +754,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(16),
B(LdaConstant), U8(10),
B(Star), R(17),
@@ -796,7 +794,7 @@ bytecodes: [
/* 55 S> */ B(Return),
]
constant pool: [
- Smi [44],
+ Smi [37],
Smi [10],
Smi [7],
SYMBOL_TYPE,
@@ -810,9 +808,9 @@ constant pool: [
FIXED_ARRAY_TYPE,
]
handlers: [
- [92, 210, 218],
- [95, 174, 176],
- [278, 288, 290],
+ [85, 203, 211],
+ [88, 167, 169],
+ [271, 281, 283],
]
---
@@ -824,18 +822,16 @@ snippet: "
"
frame size: 18
parameter count: 2
-bytecode array length: 422
+bytecode array length: 408
bytecodes: [
B(Ldar), R(2),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
B(PushContext), R(11),
B(RestoreGeneratorState), R(2),
B(Star), R(10),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(11),
- B(CallRuntime), U16(Runtime::kAbort), R(11), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(10),
B(CreateFunctionContext), U8(1),
@@ -876,10 +872,8 @@ bytecodes: [
B(SwitchOnSmiNoFeedback), U8(5), U8(1), I8(1),
B(LdaSmi), I8(-2),
/* 35 E> */ B(TestEqualStrictNoFeedback), R(10),
- B(JumpIfTrue), U8(11),
- B(LdaSmi), I8(45),
- B(Star), R(16),
- B(CallRuntime), U16(Runtime::kAbort), R(16), U8(1),
+ B(JumpIfTrue), U8(4),
+ B(Abort), U8(43),
/* 30 S> */ B(LdaNamedProperty), R(4), U8(6), U8(6),
B(Star), R(16),
B(CallProperty0), R(16), R(4), U8(4),
@@ -918,7 +912,7 @@ bytecodes: [
B(Jump), U8(58),
B(LdaZero),
B(Star), R(6),
- B(JumpLoop), U8(120), I8(0),
+ B(JumpLoop), U8(113), I8(0),
B(Jump), U8(36),
B(Star), R(16),
B(Ldar), R(closure),
@@ -959,7 +953,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(15),
B(LdaConstant), U8(14),
B(Star), R(16),
@@ -1001,12 +995,12 @@ bytecodes: [
/* 49 S> */ B(Return),
]
constant pool: [
- Smi [44],
- Smi [104],
+ Smi [37],
+ Smi [97],
Smi [10],
Smi [7],
SYMBOL_TYPE,
- Smi [82],
+ Smi [75],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -1021,9 +1015,9 @@ constant pool: [
Smi [9],
]
handlers: [
- [92, 277, 285],
- [95, 241, 243],
- [346, 356, 358],
+ [85, 263, 271],
+ [88, 227, 229],
+ [332, 342, 344],
]
---
@@ -1035,16 +1029,14 @@ snippet: "
"
frame size: 23
parameter count: 2
-bytecode array length: 401
+bytecode array length: 386
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(12),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(4),
/* 16 E> */ B(StackCheck),
- B(LdaUndefined),
- B(Star), R(13),
- B(CallJSRuntime), U8(%async_function_promise_create), R(13), U8(1),
+ B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
B(Star), R(11),
B(Mov), R(context), R(15),
B(Mov), R(context), R(16),
@@ -1124,7 +1116,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(20),
B(LdaConstant), U8(7),
B(Star), R(21),
@@ -1161,16 +1153,14 @@ bytecodes: [
B(Ldar), R(18),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(17),
- B(LdaUndefined),
- B(Star), R(19),
- B(Mov), R(11), R(18),
- B(CallJSRuntime), U8(%promise_resolve), R(17), U8(3),
+ B(Star), R(18),
+ B(Mov), R(11), R(17),
+ B(CallJSRuntime), U8(%promise_resolve), R(17), U8(2),
B(LdaZero),
B(Star), R(13),
B(Mov), R(11), R(14),
- B(Jump), U8(61),
- B(Jump), U8(45),
+ B(Jump), U8(58),
+ B(Jump), U8(42),
B(Star), R(17),
B(Ldar), R(closure),
B(CreateCatchContext), R(17), U8(4), U8(9),
@@ -1179,14 +1169,12 @@ bytecodes: [
B(SetPendingMessage),
B(Ldar), R(16),
B(PushContext), R(17),
- B(LdaUndefined),
- B(Star), R(18),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(20),
+ B(Star), R(19),
B(LdaFalse),
- B(Star), R(21),
- B(Mov), R(11), R(19),
- B(CallJSRuntime), U8(%promise_internal_reject), R(18), U8(4),
+ B(Star), R(20),
+ B(Mov), R(11), R(18),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(18), U8(3),
B(PopContext), R(17),
B(LdaZero),
B(Star), R(13),
@@ -1202,10 +1190,7 @@ bytecodes: [
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(15),
- B(LdaUndefined),
- B(Star), R(16),
- B(Mov), R(11), R(17),
- B(CallJSRuntime), U8(%async_function_promise_release), R(16), U8(2),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(11), U8(1),
B(Ldar), R(15),
B(SetPendingMessage),
B(Ldar), R(13),
@@ -1233,11 +1218,11 @@ constant pool: [
Smi [9],
]
handlers: [
- [21, 354, 362],
- [24, 309, 311],
- [30, 152, 160],
- [33, 112, 114],
- [220, 230, 232],
+ [18, 345, 353],
+ [21, 303, 305],
+ [27, 149, 157],
+ [30, 109, 111],
+ [217, 227, 229],
]
---
@@ -1249,18 +1234,16 @@ snippet: "
"
frame size: 24
parameter count: 2
-bytecode array length: 509
+bytecode array length: 480
bytecodes: [
B(Ldar), R(2),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
B(PushContext), R(12),
B(RestoreGeneratorState), R(2),
B(Star), R(11),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(11),
B(CreateFunctionContext), U8(1),
@@ -1272,9 +1255,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(13), U8(2),
B(Star), R(2),
/* 16 E> */ B(StackCheck),
- B(LdaUndefined),
- B(Star), R(13),
- B(CallJSRuntime), U8(%async_function_promise_create), R(13), U8(1),
+ B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
B(Star), R(10),
B(Mov), R(context), R(15),
B(Mov), R(context), R(16),
@@ -1294,10 +1275,8 @@ bytecodes: [
B(SwitchOnSmiNoFeedback), U8(2), U8(1), I8(0),
B(LdaSmi), I8(-2),
/* 40 E> */ B(TestEqualStrictNoFeedback), R(11),
- B(JumpIfTrue), U8(11),
- B(LdaSmi), I8(45),
- B(Star), R(21),
- B(CallRuntime), U16(Runtime::kAbort), R(21), U8(1),
+ B(JumpIfTrue), U8(4),
+ B(Abort), U8(43),
/* 35 S> */ B(LdaNamedProperty), R(4), U8(3), U8(6),
B(Star), R(21),
B(CallProperty0), R(21), R(4), U8(4),
@@ -1335,7 +1314,7 @@ bytecodes: [
B(ReThrow),
B(LdaZero),
B(Star), R(6),
- B(JumpLoop), U8(118), I8(0),
+ B(JumpLoop), U8(111), I8(0),
B(Jump), U8(40),
B(Star), R(21),
B(Ldar), R(closure),
@@ -1379,7 +1358,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(20),
B(LdaConstant), U8(9),
B(Star), R(21),
@@ -1416,16 +1395,14 @@ bytecodes: [
B(Ldar), R(18),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(17),
- B(LdaUndefined),
- B(Star), R(19),
- B(Mov), R(10), R(18),
- B(CallJSRuntime), U8(%promise_resolve), R(17), U8(3),
+ B(Star), R(18),
+ B(Mov), R(10), R(17),
+ B(CallJSRuntime), U8(%promise_resolve), R(17), U8(2),
B(LdaZero),
B(Star), R(13),
B(Mov), R(10), R(14),
- B(Jump), U8(61),
- B(Jump), U8(45),
+ B(Jump), U8(58),
+ B(Jump), U8(42),
B(Star), R(17),
B(Ldar), R(closure),
B(CreateCatchContext), R(17), U8(6), U8(11),
@@ -1434,14 +1411,12 @@ bytecodes: [
B(SetPendingMessage),
B(Ldar), R(16),
B(PushContext), R(17),
- B(LdaUndefined),
- B(Star), R(18),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(20),
+ B(Star), R(19),
B(LdaFalse),
- B(Star), R(21),
- B(Mov), R(10), R(19),
- B(CallJSRuntime), U8(%promise_internal_reject), R(18), U8(4),
+ B(Star), R(20),
+ B(Mov), R(10), R(18),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(18), U8(3),
B(PopContext), R(17),
B(LdaZero),
B(Star), R(13),
@@ -1457,10 +1432,7 @@ bytecodes: [
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(15),
- B(LdaUndefined),
- B(Star), R(16),
- B(Mov), R(10), R(17),
- B(CallJSRuntime), U8(%async_function_promise_release), R(16), U8(2),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(10), U8(1),
B(Ldar), R(15),
B(SetPendingMessage),
B(Ldar), R(13),
@@ -1474,9 +1446,9 @@ bytecodes: [
/* 54 S> */ B(Return),
]
constant pool: [
- Smi [85],
+ Smi [75],
SYMBOL_TYPE,
- Smi [85],
+ Smi [78],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -1490,10 +1462,10 @@ constant pool: [
Smi [9],
]
handlers: [
- [64, 462, 470],
- [67, 417, 419],
- [73, 260, 268],
- [76, 220, 222],
- [328, 338, 340],
+ [54, 439, 447],
+ [57, 397, 399],
+ [63, 243, 251],
+ [66, 203, 205],
+ [311, 321, 323],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index afb949fe0a..bf2eb53a4f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -13,18 +13,16 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 80
+bytecode array length: 73
bytecodes: [
B(Ldar), R(0),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
B(PushContext), R(2),
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -49,7 +47,7 @@ bytecodes: [
/* 16 S> */ B(Return),
]
constant pool: [
- Smi [36],
+ Smi [29],
Smi [10],
Smi [7],
]
@@ -63,18 +61,16 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 125
+bytecode array length: 118
bytecodes: [
B(Ldar), R(0),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
B(PushContext), R(2),
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -117,8 +113,8 @@ bytecodes: [
/* 25 S> */ B(Return),
]
constant pool: [
- Smi [36],
- Smi [81],
+ Smi [29],
+ Smi [74],
Smi [10],
Smi [7],
Smi [10],
@@ -134,18 +130,16 @@ snippet: "
"
frame size: 17
parameter count: 1
-bytecode array length: 416
+bytecode array length: 402
bytecodes: [
B(Ldar), R(2),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
B(PushContext), R(11),
B(RestoreGeneratorState), R(2),
B(Star), R(10),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(11),
- B(CallRuntime), U16(Runtime::kAbort), R(11), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(10),
B(Mov), R(closure), R(11),
@@ -182,10 +176,8 @@ bytecodes: [
B(SwitchOnSmiNoFeedback), U8(6), U8(1), I8(1),
B(LdaSmi), I8(-2),
/* 30 E> */ B(TestEqualStrictNoFeedback), R(10),
- B(JumpIfTrue), U8(11),
- B(LdaSmi), I8(45),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::kAbort), R(15), U8(1),
+ B(JumpIfTrue), U8(4),
+ B(Abort), U8(43),
/* 25 S> */ B(LdaNamedProperty), R(4), U8(7), U8(7),
B(Star), R(15),
B(CallProperty0), R(15), R(4), U8(5),
@@ -224,7 +216,7 @@ bytecodes: [
B(Jump), U8(58),
B(LdaZero),
B(Star), R(6),
- B(JumpLoop), U8(120), I8(0),
+ B(JumpLoop), U8(113), I8(0),
B(Jump), U8(36),
B(Star), R(15),
B(Ldar), R(closure),
@@ -265,7 +257,7 @@ bytecodes: [
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(134),
+ B(Wide), B(LdaSmi), I16(137),
B(Star), R(14),
B(LdaConstant), U8(15),
B(Star), R(15),
@@ -307,13 +299,13 @@ bytecodes: [
/* 44 S> */ B(Return),
]
constant pool: [
- Smi [36],
- Smi [98],
+ Smi [29],
+ Smi [91],
Smi [10],
Smi [7],
TUPLE2_TYPE,
SYMBOL_TYPE,
- Smi [82],
+ Smi [75],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -328,9 +320,9 @@ constant pool: [
Smi [9],
]
handlers: [
- [84, 271, 279],
- [87, 235, 237],
- [340, 350, 352],
+ [77, 257, 265],
+ [80, 221, 223],
+ [326, 336, 338],
]
---
@@ -341,18 +333,16 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 279
+bytecode array length: 265
bytecodes: [
B(Ldar), R(0),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
B(PushContext), R(2),
B(RestoreGeneratorState), R(0),
B(Star), R(1),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(1),
B(Mov), R(closure), R(2),
@@ -391,10 +381,8 @@ bytecodes: [
B(SwitchOnSmiNoFeedback), U8(6), U8(1), I8(1),
B(LdaSmi), I8(-2),
B(TestEqualStrictNoFeedback), R(1),
- B(JumpIfTrue), U8(11),
- B(LdaSmi), I8(45),
- B(Star), R(8),
- B(CallRuntime), U16(Runtime::kAbort), R(8), U8(1),
+ B(JumpIfTrue), U8(4),
+ B(Abort), U8(43),
B(Ldar), R(3),
B(SwitchOnSmiNoFeedback), U8(7), U8(2), I8(1),
B(LdaNamedProperty), R(4), U8(9), U8(8),
@@ -438,7 +426,7 @@ bytecodes: [
B(Star), R(5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(3),
- B(JumpLoop), U8(146), I8(0),
+ B(JumpLoop), U8(139), I8(0),
B(LdaNamedProperty), R(2), U8(13), U8(14),
B(Star), R(4),
B(LdaSmi), I8(1),
@@ -450,13 +438,13 @@ bytecodes: [
/* 54 S> */ B(Return),
]
constant pool: [
- Smi [36],
- Smi [99],
+ Smi [29],
+ Smi [92],
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["g"],
SYMBOL_TYPE,
- Smi [124],
+ Smi [117],
Smi [17],
Smi [37],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
index 0068d80e71..2e0b987b22 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
@@ -36,12 +36,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
/* 26 E> */ B(StackCheck),
/* 31 S> */ B(LdaGlobal), U8(0), U8(0),
- B(ToNumber), R(0), U8(4),
- B(Ldar), R(0),
+ B(ToNumber), U8(4),
+ B(Star), R(0),
B(Dec), U8(4),
/* 44 E> */ B(StaGlobalSloppy), U8(0), U8(2),
B(Ldar), R(0),
@@ -83,12 +83,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
/* 27 E> */ B(StackCheck),
/* 32 S> */ B(LdaGlobal), U8(0), U8(0),
- B(ToNumber), R(0), U8(4),
- B(Ldar), R(0),
+ B(ToNumber), U8(4),
+ B(Star), R(0),
B(Inc), U8(4),
/* 50 E> */ B(StaGlobalSloppy), U8(0), U8(2),
B(Ldar), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
index b38d352ec6..7ee726bb85 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
@@ -13,18 +13,16 @@ snippet: "
"
frame size: 5
parameter count: 2
-bytecode array length: 103
+bytecode array length: 96
bytecodes: [
B(Ldar), R(1),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
B(PushContext), R(2),
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -58,7 +56,7 @@ bytecodes: [
/* 13 S> */ B(Return),
]
constant pool: [
- Smi [54],
+ Smi [47],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -72,18 +70,16 @@ snippet: "
"
frame size: 5
parameter count: 2
-bytecode array length: 103
+bytecode array length: 96
bytecodes: [
B(Ldar), R(1),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
B(PushContext), R(2),
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -117,7 +113,7 @@ bytecodes: [
/* 24 S> */ B(Return),
]
constant pool: [
- Smi [54],
+ Smi [47],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -133,18 +129,16 @@ snippet: "
"
frame size: 6
parameter count: 2
-bytecode array length: 147
+bytecode array length: 140
bytecodes: [
B(Ldar), R(1),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
B(PushContext), R(2),
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -197,7 +191,7 @@ bytecodes: [
/* 64 S> */ B(Return),
]
constant pool: [
- Smi [54],
+ Smi [47],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -215,18 +209,16 @@ snippet: "
"
frame size: 5
parameter count: 2
-bytecode array length: 145
+bytecode array length: 137
bytecodes: [
B(Ldar), R(1),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
B(PushContext), R(2),
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -267,8 +259,8 @@ bytecodes: [
/* 34 S> */ B(LdaUndefined),
/* 34 E> */ B(StaCurrentContextSlot), U8(4),
/* 39 S> */ B(LdaModuleVariable), I8(1), U8(1),
- B(ToNumber), R(4), U8(1),
- B(Ldar), R(4),
+ B(ToNumber), U8(1),
+ B(Star), R(4),
B(Inc), U8(1),
/* 42 E> */ B(StaModuleVariable), I8(1), U8(1),
B(Ldar), R(4),
@@ -278,7 +270,7 @@ bytecodes: [
/* 49 S> */ B(Return),
]
constant pool: [
- Smi [54],
+ Smi [47],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -295,18 +287,16 @@ snippet: "
"
frame size: 5
parameter count: 2
-bytecode array length: 149
+bytecode array length: 141
bytecodes: [
B(Ldar), R(1),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
B(PushContext), R(2),
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -349,8 +339,8 @@ bytecodes: [
/* 34 S> */ B(LdaUndefined),
/* 34 E> */ B(StaCurrentContextSlot), U8(4),
/* 39 S> */ B(LdaModuleVariable), I8(1), U8(1),
- B(ToNumber), R(4), U8(1),
- B(Ldar), R(4),
+ B(ToNumber), U8(1),
+ B(Star), R(4),
B(Inc), U8(1),
/* 42 E> */ B(StaModuleVariable), I8(1), U8(1),
B(Ldar), R(4),
@@ -360,7 +350,7 @@ bytecodes: [
/* 49 S> */ B(Return),
]
constant pool: [
- Smi [58],
+ Smi [51],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -377,18 +367,16 @@ snippet: "
"
frame size: 5
parameter count: 2
-bytecode array length: 153
+bytecode array length: 145
bytecodes: [
B(Ldar), R(1),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
B(PushContext), R(2),
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -431,8 +419,8 @@ bytecodes: [
/* 36 S> */ B(LdaUndefined),
/* 36 E> */ B(StaCurrentContextSlot), U8(4),
/* 41 S> */ B(LdaModuleVariable), I8(1), U8(1),
- B(ToNumber), R(4), U8(1),
- B(Ldar), R(4),
+ B(ToNumber), U8(1),
+ B(Star), R(4),
B(Inc), U8(1),
/* 44 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
B(Ldar), R(4),
@@ -442,7 +430,7 @@ bytecodes: [
/* 51 S> */ B(Return),
]
constant pool: [
- Smi [58],
+ Smi [51],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -457,18 +445,16 @@ snippet: "
"
frame size: 5
parameter count: 2
-bytecode array length: 114
+bytecode array length: 107
bytecodes: [
B(Ldar), R(1),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
B(PushContext), R(2),
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -506,7 +492,7 @@ bytecodes: [
/* 32 S> */ B(Return),
]
constant pool: [
- Smi [58],
+ Smi [51],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -521,18 +507,16 @@ snippet: "
"
frame size: 8
parameter count: 2
-bytecode array length: 147
+bytecode array length: 140
bytecodes: [
B(Ldar), R(1),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
B(PushContext), R(2),
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -582,7 +566,7 @@ bytecodes: [
/* 26 S> */ B(Return),
]
constant pool: [
- Smi [58],
+ Smi [51],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -597,18 +581,16 @@ snippet: "
"
frame size: 5
parameter count: 2
-bytecode array length: 103
+bytecode array length: 96
bytecodes: [
B(Ldar), R(1),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
B(PushContext), R(2),
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -642,7 +624,7 @@ bytecodes: [
/* 30 S> */ B(Return),
]
constant pool: [
- Smi [54],
+ Smi [47],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -656,18 +638,16 @@ snippet: "
"
frame size: 5
parameter count: 2
-bytecode array length: 103
+bytecode array length: 96
bytecodes: [
B(Ldar), R(1),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
B(PushContext), R(2),
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -701,7 +681,7 @@ bytecodes: [
/* 19 S> */ B(Return),
]
constant pool: [
- Smi [54],
+ Smi [47],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -716,18 +696,16 @@ snippet: "
"
frame size: 7
parameter count: 2
-bytecode array length: 141
+bytecode array length: 134
bytecodes: [
B(Ldar), R(1),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
B(PushContext), R(2),
B(RestoreGeneratorState), R(1),
B(Star), R(0),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -775,7 +753,7 @@ bytecodes: [
/* 45 S> */ B(Return),
]
constant pool: [
- Smi [64],
+ Smi [57],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
index 683b091356..9f701feb05 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
@@ -90,9 +90,9 @@ snippet: "
class A { constructor(...args) { this.args = args; } }
new A(0, ...[1, 2, 3], 4);
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 90
+bytecode array length: 81
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateClosure), U8(0), U8(0), U8(2),
@@ -110,24 +110,18 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kToFastProperties), R(2), U8(1),
B(Star), R(0),
B(Star), R(1),
- /* 89 S> */ B(LdaUndefined),
- B(Star), R(2),
- B(LdaUndefined),
- B(Star), R(4),
- /* 93 E> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
- B(Star), R(5),
- B(LdaUndefined),
- B(Star), R(6),
+ /* 89 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
+ B(Star), R(3),
B(CreateArrayLiteral), U8(2), U8(2), U8(37),
- B(Star), R(7),
- B(CallJSRuntime), U8(%spread_iterable), R(6), U8(2),
- B(Star), R(6),
- B(CreateArrayLiteral), U8(3), U8(3), U8(37),
- B(Star), R(7),
- B(CallJSRuntime), U8(%spread_arguments), R(4), U8(4),
B(Star), R(4),
- B(Mov), R(0), R(3),
- B(CallJSRuntime), U8(%reflect_construct), R(2), U8(3),
+ B(CallJSRuntime), U8(%spread_iterable), R(4), U8(1),
+ B(Star), R(4),
+ B(CreateArrayLiteral), U8(3), U8(3), U8(37),
+ B(Star), R(5),
+ B(CallJSRuntime), U8(%spread_arguments), R(3), U8(3),
+ B(Star), R(3),
+ B(Mov), R(1), R(2),
+ B(CallJSRuntime), U8(%reflect_construct), R(2), U8(2),
B(LdaUndefined),
/* 116 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
index 5aab58c78f..242d988f63 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
@@ -338,10 +338,10 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(1), U8(41), R(1),
+ /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41), R(1),
/* 60 E> */ B(ToName), R(2),
B(LdaSmi), I8(1),
- B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(2),
+ B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(1),
B(CreateEmptyObjectLiteral),
B(Star), R(3),
B(Mov), R(1), R(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
index 31036735ae..41f2290805 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
@@ -17,7 +17,7 @@ bytecode array length: 9
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaConstant), U8(0),
- /* 23 E> */ B(StaNamedPropertySloppy), R(arg0), U8(1), U8(0),
+ /* 23 E> */ B(StaNamedProperty), R(arg0), U8(1), U8(0),
B(LdaUndefined),
/* 32 S> */ B(Return),
]
@@ -39,7 +39,7 @@ bytecode array length: 9
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaConstant), U8(0),
- /* 25 E> */ B(StaNamedPropertySloppy), R(arg0), U8(1), U8(0),
+ /* 25 E> */ B(StaNamedProperty), R(arg0), U8(1), U8(0),
B(LdaUndefined),
/* 34 S> */ B(Return),
]
@@ -63,7 +63,7 @@ bytecodes: [
/* 16 S> */ B(LdaSmi), I8(100),
B(Star), R(1),
B(LdaConstant), U8(0),
- /* 23 E> */ B(StaKeyedPropertySloppy), R(arg0), R(1), U8(0),
+ /* 23 E> */ B(StaKeyedProperty), R(arg0), R(1), U8(0),
B(LdaUndefined),
/* 32 S> */ B(Return),
]
@@ -84,7 +84,7 @@ bytecode array length: 9
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 19 S> */ B(LdaConstant), U8(0),
- /* 24 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(0),
+ /* 24 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(0),
B(LdaUndefined),
/* 33 S> */ B(Return),
]
@@ -106,7 +106,7 @@ bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaSmi), I8(-124),
/* 26 E> */ B(LdaKeyedProperty), R(arg0), U8(0),
- /* 23 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(2),
+ /* 23 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(2),
B(LdaUndefined),
/* 34 S> */ B(Return),
]
@@ -127,7 +127,7 @@ bytecode array length: 9
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 30 S> */ B(LdaConstant), U8(0),
- /* 37 E> */ B(StaNamedPropertyStrict), R(arg0), U8(1), U8(0),
+ /* 37 E> */ B(StaNamedProperty), R(arg0), U8(1), U8(0),
B(LdaUndefined),
/* 46 S> */ B(Return),
]
@@ -149,7 +149,7 @@ bytecode array length: 9
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 33 S> */ B(LdaConstant), U8(0),
- /* 38 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(0),
+ /* 38 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(0),
B(LdaUndefined),
/* 47 S> */ B(Return),
]
@@ -300,263 +300,263 @@ bytecode array length: 781
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 18 S> */ B(LdaSmi), I8(1),
- /* 25 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(0),
+ /* 25 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
/* 32 S> */ B(LdaSmi), I8(1),
- /* 39 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(2),
+ /* 39 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(2),
/* 46 S> */ B(LdaSmi), I8(1),
- /* 53 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(4),
+ /* 53 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(4),
/* 60 S> */ B(LdaSmi), I8(1),
- /* 67 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(6),
+ /* 67 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(6),
/* 74 S> */ B(LdaSmi), I8(1),
- /* 81 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(8),
+ /* 81 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(8),
/* 88 S> */ B(LdaSmi), I8(1),
- /* 95 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(10),
+ /* 95 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(10),
/* 102 S> */ B(LdaSmi), I8(1),
- /* 109 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(12),
+ /* 109 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(12),
/* 116 S> */ B(LdaSmi), I8(1),
- /* 123 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(14),
+ /* 123 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(14),
/* 130 S> */ B(LdaSmi), I8(1),
- /* 137 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(16),
+ /* 137 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(16),
/* 144 S> */ B(LdaSmi), I8(1),
- /* 151 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(18),
+ /* 151 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(18),
/* 158 S> */ B(LdaSmi), I8(1),
- /* 165 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(20),
+ /* 165 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(20),
/* 172 S> */ B(LdaSmi), I8(1),
- /* 179 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(22),
+ /* 179 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(22),
/* 186 S> */ B(LdaSmi), I8(1),
- /* 193 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(24),
+ /* 193 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(24),
/* 200 S> */ B(LdaSmi), I8(1),
- /* 207 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(26),
+ /* 207 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(26),
/* 214 S> */ B(LdaSmi), I8(1),
- /* 221 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(28),
+ /* 221 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(28),
/* 228 S> */ B(LdaSmi), I8(1),
- /* 235 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(30),
+ /* 235 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(30),
/* 242 S> */ B(LdaSmi), I8(1),
- /* 249 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(32),
+ /* 249 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(32),
/* 256 S> */ B(LdaSmi), I8(1),
- /* 263 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(34),
+ /* 263 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(34),
/* 270 S> */ B(LdaSmi), I8(1),
- /* 277 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(36),
+ /* 277 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(36),
/* 284 S> */ B(LdaSmi), I8(1),
- /* 291 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(38),
+ /* 291 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(38),
/* 298 S> */ B(LdaSmi), I8(1),
- /* 305 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(40),
+ /* 305 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(40),
/* 312 S> */ B(LdaSmi), I8(1),
- /* 319 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(42),
+ /* 319 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(42),
/* 326 S> */ B(LdaSmi), I8(1),
- /* 333 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(44),
+ /* 333 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(44),
/* 340 S> */ B(LdaSmi), I8(1),
- /* 347 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(46),
+ /* 347 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(46),
/* 354 S> */ B(LdaSmi), I8(1),
- /* 361 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(48),
+ /* 361 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(48),
/* 368 S> */ B(LdaSmi), I8(1),
- /* 375 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(50),
+ /* 375 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(50),
/* 382 S> */ B(LdaSmi), I8(1),
- /* 389 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(52),
+ /* 389 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(52),
/* 396 S> */ B(LdaSmi), I8(1),
- /* 403 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(54),
+ /* 403 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(54),
/* 410 S> */ B(LdaSmi), I8(1),
- /* 417 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(56),
+ /* 417 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(56),
/* 424 S> */ B(LdaSmi), I8(1),
- /* 431 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(58),
+ /* 431 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(58),
/* 438 S> */ B(LdaSmi), I8(1),
- /* 445 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(60),
+ /* 445 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(60),
/* 452 S> */ B(LdaSmi), I8(1),
- /* 459 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(62),
+ /* 459 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(62),
/* 466 S> */ B(LdaSmi), I8(1),
- /* 473 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(64),
+ /* 473 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(64),
/* 480 S> */ B(LdaSmi), I8(1),
- /* 487 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(66),
+ /* 487 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(66),
/* 494 S> */ B(LdaSmi), I8(1),
- /* 501 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(68),
+ /* 501 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(68),
/* 508 S> */ B(LdaSmi), I8(1),
- /* 515 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(70),
+ /* 515 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(70),
/* 522 S> */ B(LdaSmi), I8(1),
- /* 529 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(72),
+ /* 529 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(72),
/* 536 S> */ B(LdaSmi), I8(1),
- /* 543 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(74),
+ /* 543 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(74),
/* 550 S> */ B(LdaSmi), I8(1),
- /* 557 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(76),
+ /* 557 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(76),
/* 564 S> */ B(LdaSmi), I8(1),
- /* 571 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(78),
+ /* 571 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(78),
/* 578 S> */ B(LdaSmi), I8(1),
- /* 585 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(80),
+ /* 585 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(80),
/* 592 S> */ B(LdaSmi), I8(1),
- /* 599 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(82),
+ /* 599 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(82),
/* 606 S> */ B(LdaSmi), I8(1),
- /* 613 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(84),
+ /* 613 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(84),
/* 620 S> */ B(LdaSmi), I8(1),
- /* 627 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(86),
+ /* 627 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(86),
/* 634 S> */ B(LdaSmi), I8(1),
- /* 641 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(88),
+ /* 641 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(88),
/* 648 S> */ B(LdaSmi), I8(1),
- /* 655 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(90),
+ /* 655 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(90),
/* 662 S> */ B(LdaSmi), I8(1),
- /* 669 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(92),
+ /* 669 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(92),
/* 676 S> */ B(LdaSmi), I8(1),
- /* 683 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(94),
+ /* 683 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(94),
/* 690 S> */ B(LdaSmi), I8(1),
- /* 697 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(96),
+ /* 697 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(96),
/* 704 S> */ B(LdaSmi), I8(1),
- /* 711 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(98),
+ /* 711 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(98),
/* 718 S> */ B(LdaSmi), I8(1),
- /* 725 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(100),
+ /* 725 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(100),
/* 732 S> */ B(LdaSmi), I8(1),
- /* 739 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(102),
+ /* 739 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(102),
/* 746 S> */ B(LdaSmi), I8(1),
- /* 753 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(104),
+ /* 753 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(104),
/* 760 S> */ B(LdaSmi), I8(1),
- /* 767 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(106),
+ /* 767 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(106),
/* 774 S> */ B(LdaSmi), I8(1),
- /* 781 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(108),
+ /* 781 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(108),
/* 788 S> */ B(LdaSmi), I8(1),
- /* 795 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(110),
+ /* 795 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(110),
/* 802 S> */ B(LdaSmi), I8(1),
- /* 809 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(112),
+ /* 809 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(112),
/* 816 S> */ B(LdaSmi), I8(1),
- /* 823 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(114),
+ /* 823 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(114),
/* 830 S> */ B(LdaSmi), I8(1),
- /* 837 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(116),
+ /* 837 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(116),
/* 844 S> */ B(LdaSmi), I8(1),
- /* 851 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(118),
+ /* 851 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(118),
/* 858 S> */ B(LdaSmi), I8(1),
- /* 865 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(120),
+ /* 865 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(120),
/* 872 S> */ B(LdaSmi), I8(1),
- /* 879 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(122),
+ /* 879 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(122),
/* 886 S> */ B(LdaSmi), I8(1),
- /* 893 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(124),
+ /* 893 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(124),
/* 900 S> */ B(LdaSmi), I8(1),
- /* 907 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(126),
+ /* 907 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(126),
/* 914 S> */ B(LdaSmi), I8(1),
- /* 921 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(128),
+ /* 921 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(128),
/* 928 S> */ B(LdaSmi), I8(1),
- /* 935 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(130),
+ /* 935 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(130),
/* 942 S> */ B(LdaSmi), I8(1),
- /* 949 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(132),
+ /* 949 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(132),
/* 956 S> */ B(LdaSmi), I8(1),
- /* 963 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(134),
+ /* 963 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(134),
/* 970 S> */ B(LdaSmi), I8(1),
- /* 977 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(136),
+ /* 977 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(136),
/* 984 S> */ B(LdaSmi), I8(1),
- /* 991 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(138),
+ /* 991 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(138),
/* 998 S> */ B(LdaSmi), I8(1),
- /* 1005 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(140),
+ /* 1005 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(140),
/* 1012 S> */ B(LdaSmi), I8(1),
- /* 1019 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(142),
+ /* 1019 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(142),
/* 1026 S> */ B(LdaSmi), I8(1),
- /* 1033 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(144),
+ /* 1033 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(144),
/* 1040 S> */ B(LdaSmi), I8(1),
- /* 1047 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(146),
+ /* 1047 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(146),
/* 1054 S> */ B(LdaSmi), I8(1),
- /* 1061 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(148),
+ /* 1061 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(148),
/* 1068 S> */ B(LdaSmi), I8(1),
- /* 1075 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(150),
+ /* 1075 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(150),
/* 1082 S> */ B(LdaSmi), I8(1),
- /* 1089 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(152),
+ /* 1089 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(152),
/* 1096 S> */ B(LdaSmi), I8(1),
- /* 1103 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(154),
+ /* 1103 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(154),
/* 1110 S> */ B(LdaSmi), I8(1),
- /* 1117 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(156),
+ /* 1117 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(156),
/* 1124 S> */ B(LdaSmi), I8(1),
- /* 1131 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(158),
+ /* 1131 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(158),
/* 1138 S> */ B(LdaSmi), I8(1),
- /* 1145 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(160),
+ /* 1145 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(160),
/* 1152 S> */ B(LdaSmi), I8(1),
- /* 1159 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(162),
+ /* 1159 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(162),
/* 1166 S> */ B(LdaSmi), I8(1),
- /* 1173 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(164),
+ /* 1173 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(164),
/* 1180 S> */ B(LdaSmi), I8(1),
- /* 1187 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(166),
+ /* 1187 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(166),
/* 1194 S> */ B(LdaSmi), I8(1),
- /* 1201 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(168),
+ /* 1201 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(168),
/* 1208 S> */ B(LdaSmi), I8(1),
- /* 1215 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(170),
+ /* 1215 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(170),
/* 1222 S> */ B(LdaSmi), I8(1),
- /* 1229 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(172),
+ /* 1229 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(172),
/* 1236 S> */ B(LdaSmi), I8(1),
- /* 1243 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(174),
+ /* 1243 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(174),
/* 1250 S> */ B(LdaSmi), I8(1),
- /* 1257 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(176),
+ /* 1257 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(176),
/* 1264 S> */ B(LdaSmi), I8(1),
- /* 1271 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(178),
+ /* 1271 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(178),
/* 1278 S> */ B(LdaSmi), I8(1),
- /* 1285 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(180),
+ /* 1285 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(180),
/* 1292 S> */ B(LdaSmi), I8(1),
- /* 1299 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(182),
+ /* 1299 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(182),
/* 1306 S> */ B(LdaSmi), I8(1),
- /* 1313 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(184),
+ /* 1313 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(184),
/* 1320 S> */ B(LdaSmi), I8(1),
- /* 1327 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(186),
+ /* 1327 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(186),
/* 1334 S> */ B(LdaSmi), I8(1),
- /* 1341 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(188),
+ /* 1341 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(188),
/* 1348 S> */ B(LdaSmi), I8(1),
- /* 1355 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(190),
+ /* 1355 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(190),
/* 1362 S> */ B(LdaSmi), I8(1),
- /* 1369 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(192),
+ /* 1369 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(192),
/* 1376 S> */ B(LdaSmi), I8(1),
- /* 1383 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(194),
+ /* 1383 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(194),
/* 1390 S> */ B(LdaSmi), I8(1),
- /* 1397 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(196),
+ /* 1397 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(196),
/* 1404 S> */ B(LdaSmi), I8(1),
- /* 1411 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(198),
+ /* 1411 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(198),
/* 1418 S> */ B(LdaSmi), I8(1),
- /* 1425 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(200),
+ /* 1425 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(200),
/* 1432 S> */ B(LdaSmi), I8(1),
- /* 1439 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(202),
+ /* 1439 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(202),
/* 1446 S> */ B(LdaSmi), I8(1),
- /* 1453 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(204),
+ /* 1453 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(204),
/* 1460 S> */ B(LdaSmi), I8(1),
- /* 1467 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(206),
+ /* 1467 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(206),
/* 1474 S> */ B(LdaSmi), I8(1),
- /* 1481 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(208),
+ /* 1481 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(208),
/* 1488 S> */ B(LdaSmi), I8(1),
- /* 1495 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(210),
+ /* 1495 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(210),
/* 1502 S> */ B(LdaSmi), I8(1),
- /* 1509 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(212),
+ /* 1509 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(212),
/* 1516 S> */ B(LdaSmi), I8(1),
- /* 1523 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(214),
+ /* 1523 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(214),
/* 1530 S> */ B(LdaSmi), I8(1),
- /* 1537 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(216),
+ /* 1537 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(216),
/* 1544 S> */ B(LdaSmi), I8(1),
- /* 1551 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(218),
+ /* 1551 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(218),
/* 1558 S> */ B(LdaSmi), I8(1),
- /* 1565 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(220),
+ /* 1565 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(220),
/* 1572 S> */ B(LdaSmi), I8(1),
- /* 1579 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(222),
+ /* 1579 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(222),
/* 1586 S> */ B(LdaSmi), I8(1),
- /* 1593 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(224),
+ /* 1593 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(224),
/* 1600 S> */ B(LdaSmi), I8(1),
- /* 1607 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(226),
+ /* 1607 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(226),
/* 1614 S> */ B(LdaSmi), I8(1),
- /* 1621 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(228),
+ /* 1621 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(228),
/* 1628 S> */ B(LdaSmi), I8(1),
- /* 1635 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(230),
+ /* 1635 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(230),
/* 1642 S> */ B(LdaSmi), I8(1),
- /* 1649 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(232),
+ /* 1649 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(232),
/* 1656 S> */ B(LdaSmi), I8(1),
- /* 1663 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(234),
+ /* 1663 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(234),
/* 1670 S> */ B(LdaSmi), I8(1),
- /* 1677 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(236),
+ /* 1677 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(236),
/* 1684 S> */ B(LdaSmi), I8(1),
- /* 1691 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(238),
+ /* 1691 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(238),
/* 1698 S> */ B(LdaSmi), I8(1),
- /* 1705 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(240),
+ /* 1705 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(240),
/* 1712 S> */ B(LdaSmi), I8(1),
- /* 1719 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(242),
+ /* 1719 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(242),
/* 1726 S> */ B(LdaSmi), I8(1),
- /* 1733 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(244),
+ /* 1733 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(244),
/* 1740 S> */ B(LdaSmi), I8(1),
- /* 1747 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(246),
+ /* 1747 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(246),
/* 1754 S> */ B(LdaSmi), I8(1),
- /* 1761 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(248),
+ /* 1761 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(248),
/* 1768 S> */ B(LdaSmi), I8(1),
- /* 1775 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(250),
+ /* 1775 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(250),
/* 1782 S> */ B(LdaSmi), I8(1),
- /* 1789 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(252),
+ /* 1789 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(252),
/* 1796 S> */ B(LdaSmi), I8(1),
- /* 1803 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(254),
+ /* 1803 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(254),
/* 1810 S> */ B(LdaSmi), I8(2),
- /* 1817 E> */ B(Wide), B(StaNamedPropertySloppy), R16(arg0), U16(0), U16(256),
+ /* 1817 E> */ B(Wide), B(StaNamedProperty), R16(arg0), U16(0), U16(256),
B(LdaUndefined),
/* 1822 S> */ B(Return),
]
@@ -708,263 +708,263 @@ bytecode array length: 781
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 33 S> */ B(LdaSmi), I8(1),
- /* 40 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(0),
+ /* 40 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
/* 47 S> */ B(LdaSmi), I8(1),
- /* 54 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(2),
+ /* 54 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(2),
/* 61 S> */ B(LdaSmi), I8(1),
- /* 68 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(4),
+ /* 68 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(4),
/* 75 S> */ B(LdaSmi), I8(1),
- /* 82 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(6),
+ /* 82 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(6),
/* 89 S> */ B(LdaSmi), I8(1),
- /* 96 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(8),
+ /* 96 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(8),
/* 103 S> */ B(LdaSmi), I8(1),
- /* 110 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(10),
+ /* 110 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(10),
/* 117 S> */ B(LdaSmi), I8(1),
- /* 124 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(12),
+ /* 124 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(12),
/* 131 S> */ B(LdaSmi), I8(1),
- /* 138 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(14),
+ /* 138 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(14),
/* 145 S> */ B(LdaSmi), I8(1),
- /* 152 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(16),
+ /* 152 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(16),
/* 159 S> */ B(LdaSmi), I8(1),
- /* 166 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(18),
+ /* 166 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(18),
/* 173 S> */ B(LdaSmi), I8(1),
- /* 180 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(20),
+ /* 180 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(20),
/* 187 S> */ B(LdaSmi), I8(1),
- /* 194 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(22),
+ /* 194 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(22),
/* 201 S> */ B(LdaSmi), I8(1),
- /* 208 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(24),
+ /* 208 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(24),
/* 215 S> */ B(LdaSmi), I8(1),
- /* 222 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(26),
+ /* 222 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(26),
/* 229 S> */ B(LdaSmi), I8(1),
- /* 236 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(28),
+ /* 236 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(28),
/* 243 S> */ B(LdaSmi), I8(1),
- /* 250 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(30),
+ /* 250 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(30),
/* 257 S> */ B(LdaSmi), I8(1),
- /* 264 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(32),
+ /* 264 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(32),
/* 271 S> */ B(LdaSmi), I8(1),
- /* 278 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(34),
+ /* 278 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(34),
/* 285 S> */ B(LdaSmi), I8(1),
- /* 292 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(36),
+ /* 292 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(36),
/* 299 S> */ B(LdaSmi), I8(1),
- /* 306 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(38),
+ /* 306 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(38),
/* 313 S> */ B(LdaSmi), I8(1),
- /* 320 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(40),
+ /* 320 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(40),
/* 327 S> */ B(LdaSmi), I8(1),
- /* 334 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(42),
+ /* 334 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(42),
/* 341 S> */ B(LdaSmi), I8(1),
- /* 348 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(44),
+ /* 348 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(44),
/* 355 S> */ B(LdaSmi), I8(1),
- /* 362 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(46),
+ /* 362 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(46),
/* 369 S> */ B(LdaSmi), I8(1),
- /* 376 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(48),
+ /* 376 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(48),
/* 383 S> */ B(LdaSmi), I8(1),
- /* 390 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(50),
+ /* 390 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(50),
/* 397 S> */ B(LdaSmi), I8(1),
- /* 404 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(52),
+ /* 404 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(52),
/* 411 S> */ B(LdaSmi), I8(1),
- /* 418 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(54),
+ /* 418 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(54),
/* 425 S> */ B(LdaSmi), I8(1),
- /* 432 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(56),
+ /* 432 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(56),
/* 439 S> */ B(LdaSmi), I8(1),
- /* 446 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(58),
+ /* 446 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(58),
/* 453 S> */ B(LdaSmi), I8(1),
- /* 460 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(60),
+ /* 460 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(60),
/* 467 S> */ B(LdaSmi), I8(1),
- /* 474 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(62),
+ /* 474 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(62),
/* 481 S> */ B(LdaSmi), I8(1),
- /* 488 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(64),
+ /* 488 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(64),
/* 495 S> */ B(LdaSmi), I8(1),
- /* 502 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(66),
+ /* 502 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(66),
/* 509 S> */ B(LdaSmi), I8(1),
- /* 516 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(68),
+ /* 516 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(68),
/* 523 S> */ B(LdaSmi), I8(1),
- /* 530 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(70),
+ /* 530 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(70),
/* 537 S> */ B(LdaSmi), I8(1),
- /* 544 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(72),
+ /* 544 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(72),
/* 551 S> */ B(LdaSmi), I8(1),
- /* 558 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(74),
+ /* 558 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(74),
/* 565 S> */ B(LdaSmi), I8(1),
- /* 572 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(76),
+ /* 572 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(76),
/* 579 S> */ B(LdaSmi), I8(1),
- /* 586 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(78),
+ /* 586 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(78),
/* 593 S> */ B(LdaSmi), I8(1),
- /* 600 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(80),
+ /* 600 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(80),
/* 607 S> */ B(LdaSmi), I8(1),
- /* 614 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(82),
+ /* 614 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(82),
/* 621 S> */ B(LdaSmi), I8(1),
- /* 628 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(84),
+ /* 628 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(84),
/* 635 S> */ B(LdaSmi), I8(1),
- /* 642 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(86),
+ /* 642 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(86),
/* 649 S> */ B(LdaSmi), I8(1),
- /* 656 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(88),
+ /* 656 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(88),
/* 663 S> */ B(LdaSmi), I8(1),
- /* 670 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(90),
+ /* 670 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(90),
/* 677 S> */ B(LdaSmi), I8(1),
- /* 684 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(92),
+ /* 684 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(92),
/* 691 S> */ B(LdaSmi), I8(1),
- /* 698 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(94),
+ /* 698 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(94),
/* 705 S> */ B(LdaSmi), I8(1),
- /* 712 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(96),
+ /* 712 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(96),
/* 719 S> */ B(LdaSmi), I8(1),
- /* 726 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(98),
+ /* 726 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(98),
/* 733 S> */ B(LdaSmi), I8(1),
- /* 740 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(100),
+ /* 740 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(100),
/* 747 S> */ B(LdaSmi), I8(1),
- /* 754 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(102),
+ /* 754 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(102),
/* 761 S> */ B(LdaSmi), I8(1),
- /* 768 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(104),
+ /* 768 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(104),
/* 775 S> */ B(LdaSmi), I8(1),
- /* 782 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(106),
+ /* 782 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(106),
/* 789 S> */ B(LdaSmi), I8(1),
- /* 796 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(108),
+ /* 796 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(108),
/* 803 S> */ B(LdaSmi), I8(1),
- /* 810 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(110),
+ /* 810 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(110),
/* 817 S> */ B(LdaSmi), I8(1),
- /* 824 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(112),
+ /* 824 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(112),
/* 831 S> */ B(LdaSmi), I8(1),
- /* 838 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(114),
+ /* 838 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(114),
/* 845 S> */ B(LdaSmi), I8(1),
- /* 852 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(116),
+ /* 852 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(116),
/* 859 S> */ B(LdaSmi), I8(1),
- /* 866 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(118),
+ /* 866 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(118),
/* 873 S> */ B(LdaSmi), I8(1),
- /* 880 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(120),
+ /* 880 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(120),
/* 887 S> */ B(LdaSmi), I8(1),
- /* 894 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(122),
+ /* 894 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(122),
/* 901 S> */ B(LdaSmi), I8(1),
- /* 908 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(124),
+ /* 908 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(124),
/* 915 S> */ B(LdaSmi), I8(1),
- /* 922 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(126),
+ /* 922 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(126),
/* 929 S> */ B(LdaSmi), I8(1),
- /* 936 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(128),
+ /* 936 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(128),
/* 943 S> */ B(LdaSmi), I8(1),
- /* 950 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(130),
+ /* 950 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(130),
/* 957 S> */ B(LdaSmi), I8(1),
- /* 964 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(132),
+ /* 964 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(132),
/* 971 S> */ B(LdaSmi), I8(1),
- /* 978 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(134),
+ /* 978 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(134),
/* 985 S> */ B(LdaSmi), I8(1),
- /* 992 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(136),
+ /* 992 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(136),
/* 999 S> */ B(LdaSmi), I8(1),
- /* 1006 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(138),
+ /* 1006 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(138),
/* 1013 S> */ B(LdaSmi), I8(1),
- /* 1020 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(140),
+ /* 1020 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(140),
/* 1027 S> */ B(LdaSmi), I8(1),
- /* 1034 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(142),
+ /* 1034 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(142),
/* 1041 S> */ B(LdaSmi), I8(1),
- /* 1048 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(144),
+ /* 1048 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(144),
/* 1055 S> */ B(LdaSmi), I8(1),
- /* 1062 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(146),
+ /* 1062 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(146),
/* 1069 S> */ B(LdaSmi), I8(1),
- /* 1076 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(148),
+ /* 1076 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(148),
/* 1083 S> */ B(LdaSmi), I8(1),
- /* 1090 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(150),
+ /* 1090 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(150),
/* 1097 S> */ B(LdaSmi), I8(1),
- /* 1104 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(152),
+ /* 1104 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(152),
/* 1111 S> */ B(LdaSmi), I8(1),
- /* 1118 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(154),
+ /* 1118 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(154),
/* 1125 S> */ B(LdaSmi), I8(1),
- /* 1132 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(156),
+ /* 1132 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(156),
/* 1139 S> */ B(LdaSmi), I8(1),
- /* 1146 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(158),
+ /* 1146 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(158),
/* 1153 S> */ B(LdaSmi), I8(1),
- /* 1160 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(160),
+ /* 1160 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(160),
/* 1167 S> */ B(LdaSmi), I8(1),
- /* 1174 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(162),
+ /* 1174 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(162),
/* 1181 S> */ B(LdaSmi), I8(1),
- /* 1188 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(164),
+ /* 1188 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(164),
/* 1195 S> */ B(LdaSmi), I8(1),
- /* 1202 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(166),
+ /* 1202 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(166),
/* 1209 S> */ B(LdaSmi), I8(1),
- /* 1216 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(168),
+ /* 1216 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(168),
/* 1223 S> */ B(LdaSmi), I8(1),
- /* 1230 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(170),
+ /* 1230 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(170),
/* 1237 S> */ B(LdaSmi), I8(1),
- /* 1244 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(172),
+ /* 1244 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(172),
/* 1251 S> */ B(LdaSmi), I8(1),
- /* 1258 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(174),
+ /* 1258 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(174),
/* 1265 S> */ B(LdaSmi), I8(1),
- /* 1272 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(176),
+ /* 1272 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(176),
/* 1279 S> */ B(LdaSmi), I8(1),
- /* 1286 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(178),
+ /* 1286 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(178),
/* 1293 S> */ B(LdaSmi), I8(1),
- /* 1300 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(180),
+ /* 1300 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(180),
/* 1307 S> */ B(LdaSmi), I8(1),
- /* 1314 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(182),
+ /* 1314 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(182),
/* 1321 S> */ B(LdaSmi), I8(1),
- /* 1328 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(184),
+ /* 1328 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(184),
/* 1335 S> */ B(LdaSmi), I8(1),
- /* 1342 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(186),
+ /* 1342 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(186),
/* 1349 S> */ B(LdaSmi), I8(1),
- /* 1356 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(188),
+ /* 1356 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(188),
/* 1363 S> */ B(LdaSmi), I8(1),
- /* 1370 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(190),
+ /* 1370 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(190),
/* 1377 S> */ B(LdaSmi), I8(1),
- /* 1384 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(192),
+ /* 1384 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(192),
/* 1391 S> */ B(LdaSmi), I8(1),
- /* 1398 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(194),
+ /* 1398 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(194),
/* 1405 S> */ B(LdaSmi), I8(1),
- /* 1412 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(196),
+ /* 1412 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(196),
/* 1419 S> */ B(LdaSmi), I8(1),
- /* 1426 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(198),
+ /* 1426 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(198),
/* 1433 S> */ B(LdaSmi), I8(1),
- /* 1440 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(200),
+ /* 1440 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(200),
/* 1447 S> */ B(LdaSmi), I8(1),
- /* 1454 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(202),
+ /* 1454 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(202),
/* 1461 S> */ B(LdaSmi), I8(1),
- /* 1468 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(204),
+ /* 1468 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(204),
/* 1475 S> */ B(LdaSmi), I8(1),
- /* 1482 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(206),
+ /* 1482 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(206),
/* 1489 S> */ B(LdaSmi), I8(1),
- /* 1496 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(208),
+ /* 1496 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(208),
/* 1503 S> */ B(LdaSmi), I8(1),
- /* 1510 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(210),
+ /* 1510 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(210),
/* 1517 S> */ B(LdaSmi), I8(1),
- /* 1524 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(212),
+ /* 1524 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(212),
/* 1531 S> */ B(LdaSmi), I8(1),
- /* 1538 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(214),
+ /* 1538 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(214),
/* 1545 S> */ B(LdaSmi), I8(1),
- /* 1552 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(216),
+ /* 1552 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(216),
/* 1559 S> */ B(LdaSmi), I8(1),
- /* 1566 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(218),
+ /* 1566 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(218),
/* 1573 S> */ B(LdaSmi), I8(1),
- /* 1580 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(220),
+ /* 1580 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(220),
/* 1587 S> */ B(LdaSmi), I8(1),
- /* 1594 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(222),
+ /* 1594 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(222),
/* 1601 S> */ B(LdaSmi), I8(1),
- /* 1608 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(224),
+ /* 1608 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(224),
/* 1615 S> */ B(LdaSmi), I8(1),
- /* 1622 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(226),
+ /* 1622 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(226),
/* 1629 S> */ B(LdaSmi), I8(1),
- /* 1636 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(228),
+ /* 1636 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(228),
/* 1643 S> */ B(LdaSmi), I8(1),
- /* 1650 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(230),
+ /* 1650 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(230),
/* 1657 S> */ B(LdaSmi), I8(1),
- /* 1664 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(232),
+ /* 1664 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(232),
/* 1671 S> */ B(LdaSmi), I8(1),
- /* 1678 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(234),
+ /* 1678 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(234),
/* 1685 S> */ B(LdaSmi), I8(1),
- /* 1692 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(236),
+ /* 1692 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(236),
/* 1699 S> */ B(LdaSmi), I8(1),
- /* 1706 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(238),
+ /* 1706 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(238),
/* 1713 S> */ B(LdaSmi), I8(1),
- /* 1720 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(240),
+ /* 1720 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(240),
/* 1727 S> */ B(LdaSmi), I8(1),
- /* 1734 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(242),
+ /* 1734 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(242),
/* 1741 S> */ B(LdaSmi), I8(1),
- /* 1748 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(244),
+ /* 1748 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(244),
/* 1755 S> */ B(LdaSmi), I8(1),
- /* 1762 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(246),
+ /* 1762 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(246),
/* 1769 S> */ B(LdaSmi), I8(1),
- /* 1776 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(248),
+ /* 1776 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(248),
/* 1783 S> */ B(LdaSmi), I8(1),
- /* 1790 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(250),
+ /* 1790 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(250),
/* 1797 S> */ B(LdaSmi), I8(1),
- /* 1804 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(252),
+ /* 1804 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(252),
/* 1811 S> */ B(LdaSmi), I8(1),
- /* 1818 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(254),
+ /* 1818 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(254),
/* 1825 S> */ B(LdaSmi), I8(2),
- /* 1832 E> */ B(Wide), B(StaNamedPropertyStrict), R16(arg0), U16(0), U16(256),
+ /* 1832 E> */ B(Wide), B(StaNamedProperty), R16(arg0), U16(0), U16(256),
B(LdaUndefined),
/* 1837 S> */ B(Return),
]
@@ -1115,263 +1115,263 @@ bytecode array length: 781
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 21 S> */ B(LdaSmi), I8(1),
- /* 26 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(0),
+ /* 26 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(0),
/* 33 S> */ B(LdaSmi), I8(1),
- /* 38 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(2),
+ /* 38 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(2),
/* 45 S> */ B(LdaSmi), I8(1),
- /* 50 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(4),
+ /* 50 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(4),
/* 57 S> */ B(LdaSmi), I8(1),
- /* 62 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(6),
+ /* 62 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(6),
/* 69 S> */ B(LdaSmi), I8(1),
- /* 74 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(8),
+ /* 74 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(8),
/* 81 S> */ B(LdaSmi), I8(1),
- /* 86 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(10),
+ /* 86 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(10),
/* 93 S> */ B(LdaSmi), I8(1),
- /* 98 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(12),
+ /* 98 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(12),
/* 105 S> */ B(LdaSmi), I8(1),
- /* 110 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(14),
+ /* 110 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(14),
/* 117 S> */ B(LdaSmi), I8(1),
- /* 122 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(16),
+ /* 122 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(16),
/* 129 S> */ B(LdaSmi), I8(1),
- /* 134 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(18),
+ /* 134 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(18),
/* 141 S> */ B(LdaSmi), I8(1),
- /* 146 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(20),
+ /* 146 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(20),
/* 153 S> */ B(LdaSmi), I8(1),
- /* 158 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(22),
+ /* 158 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(22),
/* 165 S> */ B(LdaSmi), I8(1),
- /* 170 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(24),
+ /* 170 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(24),
/* 177 S> */ B(LdaSmi), I8(1),
- /* 182 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(26),
+ /* 182 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(26),
/* 189 S> */ B(LdaSmi), I8(1),
- /* 194 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(28),
+ /* 194 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(28),
/* 201 S> */ B(LdaSmi), I8(1),
- /* 206 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(30),
+ /* 206 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(30),
/* 213 S> */ B(LdaSmi), I8(1),
- /* 218 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(32),
+ /* 218 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(32),
/* 225 S> */ B(LdaSmi), I8(1),
- /* 230 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(34),
+ /* 230 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(34),
/* 237 S> */ B(LdaSmi), I8(1),
- /* 242 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(36),
+ /* 242 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(36),
/* 249 S> */ B(LdaSmi), I8(1),
- /* 254 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(38),
+ /* 254 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(38),
/* 261 S> */ B(LdaSmi), I8(1),
- /* 266 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(40),
+ /* 266 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(40),
/* 273 S> */ B(LdaSmi), I8(1),
- /* 278 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(42),
+ /* 278 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(42),
/* 285 S> */ B(LdaSmi), I8(1),
- /* 290 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(44),
+ /* 290 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(44),
/* 297 S> */ B(LdaSmi), I8(1),
- /* 302 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(46),
+ /* 302 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(46),
/* 309 S> */ B(LdaSmi), I8(1),
- /* 314 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(48),
+ /* 314 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(48),
/* 321 S> */ B(LdaSmi), I8(1),
- /* 326 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(50),
+ /* 326 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(50),
/* 333 S> */ B(LdaSmi), I8(1),
- /* 338 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(52),
+ /* 338 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(52),
/* 345 S> */ B(LdaSmi), I8(1),
- /* 350 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(54),
+ /* 350 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(54),
/* 357 S> */ B(LdaSmi), I8(1),
- /* 362 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(56),
+ /* 362 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(56),
/* 369 S> */ B(LdaSmi), I8(1),
- /* 374 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(58),
+ /* 374 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(58),
/* 381 S> */ B(LdaSmi), I8(1),
- /* 386 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(60),
+ /* 386 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(60),
/* 393 S> */ B(LdaSmi), I8(1),
- /* 398 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(62),
+ /* 398 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(62),
/* 405 S> */ B(LdaSmi), I8(1),
- /* 410 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(64),
+ /* 410 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(64),
/* 417 S> */ B(LdaSmi), I8(1),
- /* 422 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(66),
+ /* 422 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(66),
/* 429 S> */ B(LdaSmi), I8(1),
- /* 434 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(68),
+ /* 434 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(68),
/* 441 S> */ B(LdaSmi), I8(1),
- /* 446 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(70),
+ /* 446 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(70),
/* 453 S> */ B(LdaSmi), I8(1),
- /* 458 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(72),
+ /* 458 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(72),
/* 465 S> */ B(LdaSmi), I8(1),
- /* 470 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(74),
+ /* 470 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(74),
/* 477 S> */ B(LdaSmi), I8(1),
- /* 482 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(76),
+ /* 482 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(76),
/* 489 S> */ B(LdaSmi), I8(1),
- /* 494 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(78),
+ /* 494 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(78),
/* 501 S> */ B(LdaSmi), I8(1),
- /* 506 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(80),
+ /* 506 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(80),
/* 513 S> */ B(LdaSmi), I8(1),
- /* 518 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(82),
+ /* 518 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(82),
/* 525 S> */ B(LdaSmi), I8(1),
- /* 530 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(84),
+ /* 530 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(84),
/* 537 S> */ B(LdaSmi), I8(1),
- /* 542 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(86),
+ /* 542 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(86),
/* 549 S> */ B(LdaSmi), I8(1),
- /* 554 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(88),
+ /* 554 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(88),
/* 561 S> */ B(LdaSmi), I8(1),
- /* 566 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(90),
+ /* 566 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(90),
/* 573 S> */ B(LdaSmi), I8(1),
- /* 578 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(92),
+ /* 578 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(92),
/* 585 S> */ B(LdaSmi), I8(1),
- /* 590 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(94),
+ /* 590 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(94),
/* 597 S> */ B(LdaSmi), I8(1),
- /* 602 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(96),
+ /* 602 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(96),
/* 609 S> */ B(LdaSmi), I8(1),
- /* 614 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(98),
+ /* 614 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(98),
/* 621 S> */ B(LdaSmi), I8(1),
- /* 626 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(100),
+ /* 626 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(100),
/* 633 S> */ B(LdaSmi), I8(1),
- /* 638 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(102),
+ /* 638 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(102),
/* 645 S> */ B(LdaSmi), I8(1),
- /* 650 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(104),
+ /* 650 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(104),
/* 657 S> */ B(LdaSmi), I8(1),
- /* 662 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(106),
+ /* 662 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(106),
/* 669 S> */ B(LdaSmi), I8(1),
- /* 674 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(108),
+ /* 674 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(108),
/* 681 S> */ B(LdaSmi), I8(1),
- /* 686 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(110),
+ /* 686 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(110),
/* 693 S> */ B(LdaSmi), I8(1),
- /* 698 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(112),
+ /* 698 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(112),
/* 705 S> */ B(LdaSmi), I8(1),
- /* 710 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(114),
+ /* 710 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(114),
/* 717 S> */ B(LdaSmi), I8(1),
- /* 722 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(116),
+ /* 722 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(116),
/* 729 S> */ B(LdaSmi), I8(1),
- /* 734 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(118),
+ /* 734 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(118),
/* 741 S> */ B(LdaSmi), I8(1),
- /* 746 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(120),
+ /* 746 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(120),
/* 753 S> */ B(LdaSmi), I8(1),
- /* 758 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(122),
+ /* 758 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(122),
/* 765 S> */ B(LdaSmi), I8(1),
- /* 770 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(124),
+ /* 770 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(124),
/* 777 S> */ B(LdaSmi), I8(1),
- /* 782 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(126),
+ /* 782 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(126),
/* 789 S> */ B(LdaSmi), I8(1),
- /* 794 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(128),
+ /* 794 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(128),
/* 801 S> */ B(LdaSmi), I8(1),
- /* 806 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(130),
+ /* 806 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(130),
/* 813 S> */ B(LdaSmi), I8(1),
- /* 818 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(132),
+ /* 818 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(132),
/* 825 S> */ B(LdaSmi), I8(1),
- /* 830 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(134),
+ /* 830 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(134),
/* 837 S> */ B(LdaSmi), I8(1),
- /* 842 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(136),
+ /* 842 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(136),
/* 849 S> */ B(LdaSmi), I8(1),
- /* 854 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(138),
+ /* 854 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(138),
/* 861 S> */ B(LdaSmi), I8(1),
- /* 866 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(140),
+ /* 866 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(140),
/* 873 S> */ B(LdaSmi), I8(1),
- /* 878 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(142),
+ /* 878 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(142),
/* 885 S> */ B(LdaSmi), I8(1),
- /* 890 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(144),
+ /* 890 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(144),
/* 897 S> */ B(LdaSmi), I8(1),
- /* 902 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(146),
+ /* 902 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(146),
/* 909 S> */ B(LdaSmi), I8(1),
- /* 914 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(148),
+ /* 914 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(148),
/* 921 S> */ B(LdaSmi), I8(1),
- /* 926 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(150),
+ /* 926 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(150),
/* 933 S> */ B(LdaSmi), I8(1),
- /* 938 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(152),
+ /* 938 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(152),
/* 945 S> */ B(LdaSmi), I8(1),
- /* 950 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(154),
+ /* 950 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(154),
/* 957 S> */ B(LdaSmi), I8(1),
- /* 962 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(156),
+ /* 962 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(156),
/* 969 S> */ B(LdaSmi), I8(1),
- /* 974 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(158),
+ /* 974 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(158),
/* 981 S> */ B(LdaSmi), I8(1),
- /* 986 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(160),
+ /* 986 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(160),
/* 993 S> */ B(LdaSmi), I8(1),
- /* 998 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(162),
+ /* 998 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(162),
/* 1005 S> */ B(LdaSmi), I8(1),
- /* 1010 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(164),
+ /* 1010 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(164),
/* 1017 S> */ B(LdaSmi), I8(1),
- /* 1022 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(166),
+ /* 1022 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(166),
/* 1029 S> */ B(LdaSmi), I8(1),
- /* 1034 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(168),
+ /* 1034 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(168),
/* 1041 S> */ B(LdaSmi), I8(1),
- /* 1046 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(170),
+ /* 1046 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(170),
/* 1053 S> */ B(LdaSmi), I8(1),
- /* 1058 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(172),
+ /* 1058 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(172),
/* 1065 S> */ B(LdaSmi), I8(1),
- /* 1070 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(174),
+ /* 1070 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(174),
/* 1077 S> */ B(LdaSmi), I8(1),
- /* 1082 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(176),
+ /* 1082 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(176),
/* 1089 S> */ B(LdaSmi), I8(1),
- /* 1094 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(178),
+ /* 1094 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(178),
/* 1101 S> */ B(LdaSmi), I8(1),
- /* 1106 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(180),
+ /* 1106 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(180),
/* 1113 S> */ B(LdaSmi), I8(1),
- /* 1118 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(182),
+ /* 1118 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(182),
/* 1125 S> */ B(LdaSmi), I8(1),
- /* 1130 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(184),
+ /* 1130 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(184),
/* 1137 S> */ B(LdaSmi), I8(1),
- /* 1142 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(186),
+ /* 1142 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(186),
/* 1149 S> */ B(LdaSmi), I8(1),
- /* 1154 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(188),
+ /* 1154 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(188),
/* 1161 S> */ B(LdaSmi), I8(1),
- /* 1166 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(190),
+ /* 1166 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(190),
/* 1173 S> */ B(LdaSmi), I8(1),
- /* 1178 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(192),
+ /* 1178 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(192),
/* 1185 S> */ B(LdaSmi), I8(1),
- /* 1190 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(194),
+ /* 1190 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(194),
/* 1197 S> */ B(LdaSmi), I8(1),
- /* 1202 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(196),
+ /* 1202 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(196),
/* 1209 S> */ B(LdaSmi), I8(1),
- /* 1214 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(198),
+ /* 1214 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(198),
/* 1221 S> */ B(LdaSmi), I8(1),
- /* 1226 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(200),
+ /* 1226 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(200),
/* 1233 S> */ B(LdaSmi), I8(1),
- /* 1238 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(202),
+ /* 1238 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(202),
/* 1245 S> */ B(LdaSmi), I8(1),
- /* 1250 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(204),
+ /* 1250 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(204),
/* 1257 S> */ B(LdaSmi), I8(1),
- /* 1262 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(206),
+ /* 1262 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(206),
/* 1269 S> */ B(LdaSmi), I8(1),
- /* 1274 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(208),
+ /* 1274 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(208),
/* 1281 S> */ B(LdaSmi), I8(1),
- /* 1286 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(210),
+ /* 1286 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(210),
/* 1293 S> */ B(LdaSmi), I8(1),
- /* 1298 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(212),
+ /* 1298 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(212),
/* 1305 S> */ B(LdaSmi), I8(1),
- /* 1310 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(214),
+ /* 1310 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(214),
/* 1317 S> */ B(LdaSmi), I8(1),
- /* 1322 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(216),
+ /* 1322 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(216),
/* 1329 S> */ B(LdaSmi), I8(1),
- /* 1334 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(218),
+ /* 1334 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(218),
/* 1341 S> */ B(LdaSmi), I8(1),
- /* 1346 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(220),
+ /* 1346 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(220),
/* 1353 S> */ B(LdaSmi), I8(1),
- /* 1358 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(222),
+ /* 1358 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(222),
/* 1365 S> */ B(LdaSmi), I8(1),
- /* 1370 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(224),
+ /* 1370 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(224),
/* 1377 S> */ B(LdaSmi), I8(1),
- /* 1382 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(226),
+ /* 1382 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(226),
/* 1389 S> */ B(LdaSmi), I8(1),
- /* 1394 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(228),
+ /* 1394 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(228),
/* 1401 S> */ B(LdaSmi), I8(1),
- /* 1406 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(230),
+ /* 1406 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(230),
/* 1413 S> */ B(LdaSmi), I8(1),
- /* 1418 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(232),
+ /* 1418 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(232),
/* 1425 S> */ B(LdaSmi), I8(1),
- /* 1430 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(234),
+ /* 1430 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(234),
/* 1437 S> */ B(LdaSmi), I8(1),
- /* 1442 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(236),
+ /* 1442 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(236),
/* 1449 S> */ B(LdaSmi), I8(1),
- /* 1454 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(238),
+ /* 1454 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(238),
/* 1461 S> */ B(LdaSmi), I8(1),
- /* 1466 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(240),
+ /* 1466 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(240),
/* 1473 S> */ B(LdaSmi), I8(1),
- /* 1478 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(242),
+ /* 1478 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(242),
/* 1485 S> */ B(LdaSmi), I8(1),
- /* 1490 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(244),
+ /* 1490 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(244),
/* 1497 S> */ B(LdaSmi), I8(1),
- /* 1502 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(246),
+ /* 1502 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(246),
/* 1509 S> */ B(LdaSmi), I8(1),
- /* 1514 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(248),
+ /* 1514 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(248),
/* 1521 S> */ B(LdaSmi), I8(1),
- /* 1526 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(250),
+ /* 1526 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(250),
/* 1533 S> */ B(LdaSmi), I8(1),
- /* 1538 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(252),
+ /* 1538 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(252),
/* 1545 S> */ B(LdaSmi), I8(1),
- /* 1550 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(254),
+ /* 1550 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(254),
/* 1557 S> */ B(LdaSmi), I8(2),
- /* 1562 E> */ B(Wide), B(StaKeyedPropertySloppy), R16(arg0), R16(arg1), U16(256),
+ /* 1562 E> */ B(Wide), B(StaKeyedProperty), R16(arg0), R16(arg1), U16(256),
B(LdaUndefined),
/* 1567 S> */ B(Return),
]
@@ -1522,263 +1522,263 @@ bytecode array length: 781
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 37 S> */ B(LdaSmi), I8(1),
- /* 42 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(0),
+ /* 42 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(0),
/* 49 S> */ B(LdaSmi), I8(1),
- /* 54 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(2),
+ /* 54 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(2),
/* 61 S> */ B(LdaSmi), I8(1),
- /* 66 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(4),
+ /* 66 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(4),
/* 73 S> */ B(LdaSmi), I8(1),
- /* 78 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(6),
+ /* 78 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(6),
/* 85 S> */ B(LdaSmi), I8(1),
- /* 90 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(8),
+ /* 90 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(8),
/* 97 S> */ B(LdaSmi), I8(1),
- /* 102 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(10),
+ /* 102 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(10),
/* 109 S> */ B(LdaSmi), I8(1),
- /* 114 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(12),
+ /* 114 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(12),
/* 121 S> */ B(LdaSmi), I8(1),
- /* 126 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(14),
+ /* 126 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(14),
/* 133 S> */ B(LdaSmi), I8(1),
- /* 138 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(16),
+ /* 138 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(16),
/* 145 S> */ B(LdaSmi), I8(1),
- /* 150 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(18),
+ /* 150 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(18),
/* 157 S> */ B(LdaSmi), I8(1),
- /* 162 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(20),
+ /* 162 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(20),
/* 169 S> */ B(LdaSmi), I8(1),
- /* 174 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(22),
+ /* 174 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(22),
/* 181 S> */ B(LdaSmi), I8(1),
- /* 186 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(24),
+ /* 186 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(24),
/* 193 S> */ B(LdaSmi), I8(1),
- /* 198 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(26),
+ /* 198 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(26),
/* 205 S> */ B(LdaSmi), I8(1),
- /* 210 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(28),
+ /* 210 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(28),
/* 217 S> */ B(LdaSmi), I8(1),
- /* 222 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(30),
+ /* 222 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(30),
/* 229 S> */ B(LdaSmi), I8(1),
- /* 234 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(32),
+ /* 234 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(32),
/* 241 S> */ B(LdaSmi), I8(1),
- /* 246 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(34),
+ /* 246 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(34),
/* 253 S> */ B(LdaSmi), I8(1),
- /* 258 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(36),
+ /* 258 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(36),
/* 265 S> */ B(LdaSmi), I8(1),
- /* 270 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(38),
+ /* 270 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(38),
/* 277 S> */ B(LdaSmi), I8(1),
- /* 282 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(40),
+ /* 282 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(40),
/* 289 S> */ B(LdaSmi), I8(1),
- /* 294 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(42),
+ /* 294 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(42),
/* 301 S> */ B(LdaSmi), I8(1),
- /* 306 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(44),
+ /* 306 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(44),
/* 313 S> */ B(LdaSmi), I8(1),
- /* 318 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(46),
+ /* 318 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(46),
/* 325 S> */ B(LdaSmi), I8(1),
- /* 330 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(48),
+ /* 330 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(48),
/* 337 S> */ B(LdaSmi), I8(1),
- /* 342 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(50),
+ /* 342 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(50),
/* 349 S> */ B(LdaSmi), I8(1),
- /* 354 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(52),
+ /* 354 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(52),
/* 361 S> */ B(LdaSmi), I8(1),
- /* 366 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(54),
+ /* 366 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(54),
/* 373 S> */ B(LdaSmi), I8(1),
- /* 378 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(56),
+ /* 378 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(56),
/* 385 S> */ B(LdaSmi), I8(1),
- /* 390 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(58),
+ /* 390 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(58),
/* 397 S> */ B(LdaSmi), I8(1),
- /* 402 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(60),
+ /* 402 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(60),
/* 409 S> */ B(LdaSmi), I8(1),
- /* 414 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(62),
+ /* 414 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(62),
/* 421 S> */ B(LdaSmi), I8(1),
- /* 426 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(64),
+ /* 426 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(64),
/* 433 S> */ B(LdaSmi), I8(1),
- /* 438 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(66),
+ /* 438 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(66),
/* 445 S> */ B(LdaSmi), I8(1),
- /* 450 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(68),
+ /* 450 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(68),
/* 457 S> */ B(LdaSmi), I8(1),
- /* 462 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(70),
+ /* 462 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(70),
/* 469 S> */ B(LdaSmi), I8(1),
- /* 474 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(72),
+ /* 474 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(72),
/* 481 S> */ B(LdaSmi), I8(1),
- /* 486 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(74),
+ /* 486 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(74),
/* 493 S> */ B(LdaSmi), I8(1),
- /* 498 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(76),
+ /* 498 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(76),
/* 505 S> */ B(LdaSmi), I8(1),
- /* 510 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(78),
+ /* 510 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(78),
/* 517 S> */ B(LdaSmi), I8(1),
- /* 522 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(80),
+ /* 522 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(80),
/* 529 S> */ B(LdaSmi), I8(1),
- /* 534 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(82),
+ /* 534 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(82),
/* 541 S> */ B(LdaSmi), I8(1),
- /* 546 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(84),
+ /* 546 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(84),
/* 553 S> */ B(LdaSmi), I8(1),
- /* 558 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(86),
+ /* 558 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(86),
/* 565 S> */ B(LdaSmi), I8(1),
- /* 570 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(88),
+ /* 570 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(88),
/* 577 S> */ B(LdaSmi), I8(1),
- /* 582 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(90),
+ /* 582 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(90),
/* 589 S> */ B(LdaSmi), I8(1),
- /* 594 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(92),
+ /* 594 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(92),
/* 601 S> */ B(LdaSmi), I8(1),
- /* 606 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(94),
+ /* 606 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(94),
/* 613 S> */ B(LdaSmi), I8(1),
- /* 618 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(96),
+ /* 618 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(96),
/* 625 S> */ B(LdaSmi), I8(1),
- /* 630 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(98),
+ /* 630 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(98),
/* 637 S> */ B(LdaSmi), I8(1),
- /* 642 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(100),
+ /* 642 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(100),
/* 649 S> */ B(LdaSmi), I8(1),
- /* 654 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(102),
+ /* 654 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(102),
/* 661 S> */ B(LdaSmi), I8(1),
- /* 666 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(104),
+ /* 666 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(104),
/* 673 S> */ B(LdaSmi), I8(1),
- /* 678 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(106),
+ /* 678 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(106),
/* 685 S> */ B(LdaSmi), I8(1),
- /* 690 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(108),
+ /* 690 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(108),
/* 697 S> */ B(LdaSmi), I8(1),
- /* 702 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(110),
+ /* 702 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(110),
/* 709 S> */ B(LdaSmi), I8(1),
- /* 714 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(112),
+ /* 714 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(112),
/* 721 S> */ B(LdaSmi), I8(1),
- /* 726 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(114),
+ /* 726 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(114),
/* 733 S> */ B(LdaSmi), I8(1),
- /* 738 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(116),
+ /* 738 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(116),
/* 745 S> */ B(LdaSmi), I8(1),
- /* 750 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(118),
+ /* 750 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(118),
/* 757 S> */ B(LdaSmi), I8(1),
- /* 762 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(120),
+ /* 762 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(120),
/* 769 S> */ B(LdaSmi), I8(1),
- /* 774 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(122),
+ /* 774 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(122),
/* 781 S> */ B(LdaSmi), I8(1),
- /* 786 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(124),
+ /* 786 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(124),
/* 793 S> */ B(LdaSmi), I8(1),
- /* 798 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(126),
+ /* 798 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(126),
/* 805 S> */ B(LdaSmi), I8(1),
- /* 810 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(128),
+ /* 810 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(128),
/* 817 S> */ B(LdaSmi), I8(1),
- /* 822 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(130),
+ /* 822 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(130),
/* 829 S> */ B(LdaSmi), I8(1),
- /* 834 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(132),
+ /* 834 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(132),
/* 841 S> */ B(LdaSmi), I8(1),
- /* 846 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(134),
+ /* 846 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(134),
/* 853 S> */ B(LdaSmi), I8(1),
- /* 858 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(136),
+ /* 858 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(136),
/* 865 S> */ B(LdaSmi), I8(1),
- /* 870 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(138),
+ /* 870 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(138),
/* 877 S> */ B(LdaSmi), I8(1),
- /* 882 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(140),
+ /* 882 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(140),
/* 889 S> */ B(LdaSmi), I8(1),
- /* 894 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(142),
+ /* 894 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(142),
/* 901 S> */ B(LdaSmi), I8(1),
- /* 906 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(144),
+ /* 906 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(144),
/* 913 S> */ B(LdaSmi), I8(1),
- /* 918 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(146),
+ /* 918 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(146),
/* 925 S> */ B(LdaSmi), I8(1),
- /* 930 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(148),
+ /* 930 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(148),
/* 937 S> */ B(LdaSmi), I8(1),
- /* 942 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(150),
+ /* 942 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(150),
/* 949 S> */ B(LdaSmi), I8(1),
- /* 954 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(152),
+ /* 954 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(152),
/* 961 S> */ B(LdaSmi), I8(1),
- /* 966 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(154),
+ /* 966 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(154),
/* 973 S> */ B(LdaSmi), I8(1),
- /* 978 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(156),
+ /* 978 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(156),
/* 985 S> */ B(LdaSmi), I8(1),
- /* 990 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(158),
+ /* 990 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(158),
/* 997 S> */ B(LdaSmi), I8(1),
- /* 1002 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(160),
+ /* 1002 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(160),
/* 1009 S> */ B(LdaSmi), I8(1),
- /* 1014 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(162),
+ /* 1014 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(162),
/* 1021 S> */ B(LdaSmi), I8(1),
- /* 1026 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(164),
+ /* 1026 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(164),
/* 1033 S> */ B(LdaSmi), I8(1),
- /* 1038 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(166),
+ /* 1038 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(166),
/* 1045 S> */ B(LdaSmi), I8(1),
- /* 1050 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(168),
+ /* 1050 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(168),
/* 1057 S> */ B(LdaSmi), I8(1),
- /* 1062 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(170),
+ /* 1062 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(170),
/* 1069 S> */ B(LdaSmi), I8(1),
- /* 1074 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(172),
+ /* 1074 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(172),
/* 1081 S> */ B(LdaSmi), I8(1),
- /* 1086 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(174),
+ /* 1086 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(174),
/* 1093 S> */ B(LdaSmi), I8(1),
- /* 1098 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(176),
+ /* 1098 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(176),
/* 1105 S> */ B(LdaSmi), I8(1),
- /* 1110 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(178),
+ /* 1110 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(178),
/* 1117 S> */ B(LdaSmi), I8(1),
- /* 1122 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(180),
+ /* 1122 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(180),
/* 1129 S> */ B(LdaSmi), I8(1),
- /* 1134 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(182),
+ /* 1134 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(182),
/* 1141 S> */ B(LdaSmi), I8(1),
- /* 1146 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(184),
+ /* 1146 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(184),
/* 1153 S> */ B(LdaSmi), I8(1),
- /* 1158 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(186),
+ /* 1158 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(186),
/* 1165 S> */ B(LdaSmi), I8(1),
- /* 1170 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(188),
+ /* 1170 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(188),
/* 1177 S> */ B(LdaSmi), I8(1),
- /* 1182 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(190),
+ /* 1182 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(190),
/* 1189 S> */ B(LdaSmi), I8(1),
- /* 1194 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(192),
+ /* 1194 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(192),
/* 1201 S> */ B(LdaSmi), I8(1),
- /* 1206 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(194),
+ /* 1206 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(194),
/* 1213 S> */ B(LdaSmi), I8(1),
- /* 1218 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(196),
+ /* 1218 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(196),
/* 1225 S> */ B(LdaSmi), I8(1),
- /* 1230 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(198),
+ /* 1230 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(198),
/* 1237 S> */ B(LdaSmi), I8(1),
- /* 1242 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(200),
+ /* 1242 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(200),
/* 1249 S> */ B(LdaSmi), I8(1),
- /* 1254 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(202),
+ /* 1254 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(202),
/* 1261 S> */ B(LdaSmi), I8(1),
- /* 1266 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(204),
+ /* 1266 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(204),
/* 1273 S> */ B(LdaSmi), I8(1),
- /* 1278 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(206),
+ /* 1278 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(206),
/* 1285 S> */ B(LdaSmi), I8(1),
- /* 1290 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(208),
+ /* 1290 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(208),
/* 1297 S> */ B(LdaSmi), I8(1),
- /* 1302 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(210),
+ /* 1302 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(210),
/* 1309 S> */ B(LdaSmi), I8(1),
- /* 1314 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(212),
+ /* 1314 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(212),
/* 1321 S> */ B(LdaSmi), I8(1),
- /* 1326 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(214),
+ /* 1326 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(214),
/* 1333 S> */ B(LdaSmi), I8(1),
- /* 1338 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(216),
+ /* 1338 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(216),
/* 1345 S> */ B(LdaSmi), I8(1),
- /* 1350 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(218),
+ /* 1350 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(218),
/* 1357 S> */ B(LdaSmi), I8(1),
- /* 1362 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(220),
+ /* 1362 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(220),
/* 1369 S> */ B(LdaSmi), I8(1),
- /* 1374 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(222),
+ /* 1374 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(222),
/* 1381 S> */ B(LdaSmi), I8(1),
- /* 1386 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(224),
+ /* 1386 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(224),
/* 1393 S> */ B(LdaSmi), I8(1),
- /* 1398 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(226),
+ /* 1398 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(226),
/* 1405 S> */ B(LdaSmi), I8(1),
- /* 1410 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(228),
+ /* 1410 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(228),
/* 1417 S> */ B(LdaSmi), I8(1),
- /* 1422 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(230),
+ /* 1422 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(230),
/* 1429 S> */ B(LdaSmi), I8(1),
- /* 1434 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(232),
+ /* 1434 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(232),
/* 1441 S> */ B(LdaSmi), I8(1),
- /* 1446 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(234),
+ /* 1446 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(234),
/* 1453 S> */ B(LdaSmi), I8(1),
- /* 1458 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(236),
+ /* 1458 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(236),
/* 1465 S> */ B(LdaSmi), I8(1),
- /* 1470 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(238),
+ /* 1470 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(238),
/* 1477 S> */ B(LdaSmi), I8(1),
- /* 1482 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(240),
+ /* 1482 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(240),
/* 1489 S> */ B(LdaSmi), I8(1),
- /* 1494 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(242),
+ /* 1494 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(242),
/* 1501 S> */ B(LdaSmi), I8(1),
- /* 1506 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(244),
+ /* 1506 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(244),
/* 1513 S> */ B(LdaSmi), I8(1),
- /* 1518 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(246),
+ /* 1518 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(246),
/* 1525 S> */ B(LdaSmi), I8(1),
- /* 1530 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(248),
+ /* 1530 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(248),
/* 1537 S> */ B(LdaSmi), I8(1),
- /* 1542 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(250),
+ /* 1542 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(250),
/* 1549 S> */ B(LdaSmi), I8(1),
- /* 1554 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(252),
+ /* 1554 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(252),
/* 1561 S> */ B(LdaSmi), I8(1),
- /* 1566 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(254),
+ /* 1566 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(254),
/* 1573 S> */ B(LdaSmi), I8(2),
- /* 1578 E> */ B(Wide), B(StaKeyedPropertyStrict), R16(arg0), R16(arg1), U16(256),
+ /* 1578 E> */ B(Wide), B(StaKeyedProperty), R16(arg0), R16(arg1), U16(256),
B(LdaUndefined),
/* 1583 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
index 8e16be3dc6..4194925e41 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
@@ -231,7 +231,7 @@ bytecodes: [
B(JumpIfUndefined), U8(6),
B(Ldar), R(3),
B(JumpIfNotNull), U8(16),
- B(LdaSmi), I8(63),
+ B(LdaSmi), I8(67),
B(Star), R(4),
B(LdaConstant), U8(1),
B(Star), R(5),
@@ -272,18 +272,16 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 103
+bytecode array length: 96
bytecodes: [
B(Ldar), R(2),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
B(PushContext), R(4),
B(RestoreGeneratorState), R(2),
B(Star), R(3),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kAbort), R(4), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(3),
B(Mov), R(closure), R(4),
@@ -319,7 +317,7 @@ bytecodes: [
/* 62 S> */ B(Return),
]
constant pool: [
- Smi [36],
+ Smi [29],
Smi [10],
Smi [7],
]
@@ -335,18 +333,16 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 165
+bytecode array length: 151
bytecodes: [
B(Ldar), R(1),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
B(PushContext), R(3),
B(RestoreGeneratorState), R(1),
B(Star), R(2),
B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(2),
B(Mov), R(closure), R(3),
@@ -373,10 +369,8 @@ bytecodes: [
B(SwitchOnSmiNoFeedback), U8(4), U8(1), I8(1),
B(LdaSmi), I8(-2),
/* 31 E> */ B(TestEqualStrictNoFeedback), R(2),
- B(JumpIfTrue), U8(11),
- B(LdaSmi), I8(45),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
+ B(JumpIfTrue), U8(4),
+ B(Abort), U8(43),
/* 36 S> */ B(LdaSmi), I8(10),
/* 36 E> */ B(TestLessThan), R(0), U8(0),
B(JumpIfFalse), U8(56),
@@ -401,16 +395,16 @@ bytecodes: [
/* 44 S> */ B(Ldar), R(0),
B(Inc), U8(1),
B(Star), R(0),
- B(JumpLoop), U8(79), I8(0),
+ B(JumpLoop), U8(72), I8(0),
B(LdaUndefined),
/* 56 S> */ B(Return),
]
constant pool: [
- Smi [36],
- Smi [67],
+ Smi [29],
+ Smi [60],
Smi [10],
Smi [7],
- Smi [43],
+ Smi [36],
Smi [10],
Smi [7],
]
@@ -424,14 +418,12 @@ snippet: "
}
f();
"
-frame size: 13
+frame size: 12
parameter count: 1
-bytecode array length: 152
+bytecode array length: 137
bytecodes: [
/* 16 E> */ B(StackCheck),
- B(LdaUndefined),
- B(Star), R(4),
- B(CallJSRuntime), U8(%async_function_promise_create), R(4), U8(1),
+ B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
B(Star), R(3),
B(Mov), R(context), R(6),
B(Mov), R(context), R(7),
@@ -447,16 +439,14 @@ bytecodes: [
B(Star), R(1),
B(JumpLoop), U8(17), I8(0),
B(LdaUndefined),
- B(Star), R(8),
- B(LdaUndefined),
- B(Star), R(10),
- B(Mov), R(3), R(9),
- /* 49 E> */ B(CallJSRuntime), U8(%promise_resolve), R(8), U8(3),
+ B(Star), R(9),
+ B(Mov), R(3), R(8),
+ /* 49 E> */ B(CallJSRuntime), U8(%promise_resolve), R(8), U8(2),
B(LdaZero),
B(Star), R(4),
B(Mov), R(3), R(5),
- B(Jump), U8(61),
- B(Jump), U8(45),
+ B(Jump), U8(58),
+ B(Jump), U8(42),
B(Star), R(8),
B(Ldar), R(closure),
B(CreateCatchContext), R(8), U8(0), U8(1),
@@ -465,14 +455,12 @@ bytecodes: [
B(SetPendingMessage),
B(Ldar), R(7),
B(PushContext), R(8),
- B(LdaUndefined),
- B(Star), R(9),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(11),
+ B(Star), R(10),
B(LdaFalse),
- B(Star), R(12),
- B(Mov), R(3), R(10),
- B(CallJSRuntime), U8(%promise_internal_reject), R(9), U8(4),
+ B(Star), R(11),
+ B(Mov), R(3), R(9),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(9), U8(3),
B(PopContext), R(8),
B(LdaZero),
B(Star), R(4),
@@ -488,10 +476,7 @@ bytecodes: [
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(6),
- B(LdaUndefined),
- B(Star), R(7),
- B(Mov), R(3), R(8),
- B(CallJSRuntime), U8(%async_function_promise_release), R(7), U8(2),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(3), U8(1),
B(Ldar), R(6),
B(SetPendingMessage),
B(Ldar), R(4),
@@ -511,8 +496,8 @@ constant pool: [
Smi [9],
]
handlers: [
- [13, 105, 113],
- [16, 60, 62],
+ [10, 96, 104],
+ [13, 54, 56],
]
---
@@ -522,20 +507,18 @@ snippet: "
}
f();
"
-frame size: 13
+frame size: 12
parameter count: 1
-bytecode array length: 260
+bytecode array length: 231
bytecodes: [
B(Ldar), R(1),
- B(JumpIfUndefined), U8(25),
+ B(JumpIfUndefined), U8(18),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
B(PushContext), R(4),
B(RestoreGeneratorState), R(1),
B(Star), R(3),
B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(LdaSmi), I8(45),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kAbort), R(4), U8(1),
+ B(Abort), U8(43),
B(LdaSmi), I8(-2),
B(Star), R(3),
B(Mov), R(closure), R(4),
@@ -543,9 +526,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
B(Star), R(1),
/* 16 E> */ B(StackCheck),
- B(LdaUndefined),
- B(Star), R(4),
- B(CallJSRuntime), U8(%async_function_promise_create), R(4), U8(1),
+ B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
B(Star), R(2),
B(Mov), R(context), R(6),
B(Mov), R(context), R(7),
@@ -555,10 +536,8 @@ bytecodes: [
B(SwitchOnSmiNoFeedback), U8(1), U8(1), I8(0),
B(LdaSmi), I8(-2),
/* 36 E> */ B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(11),
- B(LdaSmi), I8(45),
- B(Star), R(8),
- B(CallRuntime), U16(Runtime::kAbort), R(8), U8(1),
+ B(JumpIfTrue), U8(4),
+ B(Abort), U8(43),
/* 41 S> */ B(LdaSmi), I8(10),
/* 41 E> */ B(TestLessThan), R(0), U8(0),
B(JumpIfFalse), U8(59),
@@ -584,18 +563,16 @@ bytecodes: [
/* 49 S> */ B(Ldar), R(0),
B(Inc), U8(1),
B(Star), R(0),
- B(JumpLoop), U8(82), I8(0),
- B(LdaUndefined),
- B(Star), R(8),
+ B(JumpLoop), U8(75), I8(0),
B(LdaUndefined),
- B(Star), R(10),
- B(Mov), R(2), R(9),
- /* 49 E> */ B(CallJSRuntime), U8(%promise_resolve), R(8), U8(3),
+ B(Star), R(9),
+ B(Mov), R(2), R(8),
+ /* 49 E> */ B(CallJSRuntime), U8(%promise_resolve), R(8), U8(2),
B(LdaZero),
B(Star), R(4),
B(Mov), R(2), R(5),
- B(Jump), U8(61),
- B(Jump), U8(45),
+ B(Jump), U8(58),
+ B(Jump), U8(42),
B(Star), R(8),
B(Ldar), R(closure),
B(CreateCatchContext), R(8), U8(2), U8(3),
@@ -604,14 +581,12 @@ bytecodes: [
B(SetPendingMessage),
B(Ldar), R(7),
B(PushContext), R(8),
- B(LdaUndefined),
- B(Star), R(9),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(11),
+ B(Star), R(10),
B(LdaFalse),
- B(Star), R(12),
- B(Mov), R(2), R(10),
- B(CallJSRuntime), U8(%promise_internal_reject), R(9), U8(4),
+ B(Star), R(11),
+ B(Mov), R(2), R(9),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(9), U8(3),
B(PopContext), R(8),
B(LdaZero),
B(Star), R(4),
@@ -627,10 +602,7 @@ bytecodes: [
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(6),
- B(LdaUndefined),
- B(Star), R(7),
- B(Mov), R(2), R(8),
- B(CallJSRuntime), U8(%async_function_promise_release), R(7), U8(2),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(2), U8(1),
B(Ldar), R(6),
B(SetPendingMessage),
B(Ldar), R(4),
@@ -644,15 +616,15 @@ bytecodes: [
/* 61 S> */ B(Return),
]
constant pool: [
- Smi [48],
- Smi [46],
+ Smi [38],
+ Smi [39],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
FIXED_ARRAY_TYPE,
Smi [6],
Smi [9],
]
handlers: [
- [56, 213, 221],
- [59, 168, 170],
+ [46, 190, 198],
+ [49, 148, 150],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
index 7c5e447a81..0d36442d47 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
@@ -91,34 +91,27 @@ snippet: "
test = new B(1, 2, 3).constructor;
})();
"
-frame size: 10
+frame size: 8
parameter count: 1
-bytecode array length: 72
+bytecode array length: 60
bytecodes: [
B(CreateRestParameter),
B(Star), R(2),
B(Mov), R(closure), R(1),
/* 128 E> */ B(StackCheck),
B(Mov), R(2), R(3),
- /* 140 S> */ B(LdaUndefined),
+ /* 140 S> */ B(CallRuntime), U16(Runtime::k_GetSuperConstructor), R(closure), U8(1),
B(Star), R(4),
- /* 140 E> */ B(CallRuntime), U16(Runtime::k_GetSuperConstructor), R(closure), U8(1),
+ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(5),
- B(LdaUndefined),
+ /* 152 E> */ B(CallJSRuntime), U8(%spread_iterable), R(2), U8(1),
B(Star), R(6),
- B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(7),
- B(LdaUndefined),
- B(Star), R(8),
- B(Mov), R(2), R(9),
- /* 152 E> */ B(CallJSRuntime), U8(%spread_iterable), R(8), U8(2),
- B(Star), R(8),
B(CreateArrayLiteral), U8(1), U8(1), U8(37),
- B(Star), R(9),
- B(CallJSRuntime), U8(%spread_arguments), R(6), U8(4),
- B(Star), R(6),
- B(Mov), R(0), R(7),
- /* 140 E> */ B(CallJSRuntime), U8(%reflect_construct), R(4), U8(4),
+ B(Star), R(7),
+ B(CallJSRuntime), U8(%spread_arguments), R(5), U8(3),
+ B(Star), R(5),
+ B(Mov), R(0), R(6),
+ /* 140 E> */ B(CallJSRuntime), U8(%reflect_construct), R(4), U8(3),
B(Star), R(4),
B(Ldar), R(this),
/* 140 E> */ B(ThrowSuperAlreadyCalledIfNotHole),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
index 0aa21aba35..a853183351 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
@@ -13,20 +13,19 @@ snippet: "
case 2: return 3;
}
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 34
+bytecode array length: 32
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- B(Star), R(1),
/* 45 S> */ B(LdaSmi), I8(1),
- B(TestEqualStrict), R(1), U8(0),
- B(Mov), R(0), R(2),
+ B(TestEqualStrict), R(0), U8(0),
+ B(Mov), R(0), R(1),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(2), U8(1),
+ B(TestEqualStrict), R(1), U8(1),
B(JumpIfTrue), U8(7),
B(Jump), U8(8),
/* 66 S> */ B(LdaSmi), I8(2),
@@ -49,20 +48,19 @@ snippet: "
case 2: a = 3; break;
}
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 40
+bytecode array length: 38
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- B(Star), R(1),
/* 45 S> */ B(LdaSmi), I8(1),
- B(TestEqualStrict), R(1), U8(0),
- B(Mov), R(0), R(2),
+ B(TestEqualStrict), R(0), U8(0),
+ B(Mov), R(0), R(1),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(2), U8(1),
+ B(TestEqualStrict), R(1), U8(1),
B(JumpIfTrue), U8(10),
B(Jump), U8(14),
/* 66 S> */ B(LdaSmi), I8(2),
@@ -87,20 +85,19 @@ snippet: "
case 2: a = 3; break;
}
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 38
+bytecode array length: 36
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- B(Star), R(1),
/* 45 S> */ B(LdaSmi), I8(1),
- B(TestEqualStrict), R(1), U8(0),
- B(Mov), R(0), R(2),
+ B(TestEqualStrict), R(0), U8(0),
+ B(Mov), R(0), R(1),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(2), U8(1),
+ B(TestEqualStrict), R(1), U8(1),
B(JumpIfTrue), U8(8),
B(Jump), U8(12),
/* 66 S> */ B(LdaSmi), I8(2),
@@ -125,20 +122,19 @@ snippet: "
default: a = 1; break;
}
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 38
+bytecode array length: 36
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- B(Star), R(1),
/* 45 S> */ B(LdaSmi), I8(2),
- B(TestEqualStrict), R(1), U8(0),
- B(Mov), R(0), R(2),
+ B(TestEqualStrict), R(0), U8(0),
+ B(Mov), R(0), R(1),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(3),
- B(TestEqualStrict), R(2), U8(1),
+ B(TestEqualStrict), R(1), U8(1),
B(JumpIfTrue), U8(6),
B(Jump), U8(6),
/* 66 S> */ B(Jump), U8(10),
@@ -163,21 +159,20 @@ snippet: "
default: a = 3; break;
}
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 47
+bytecode array length: 44
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 42 E> */ B(TypeOf),
+ /* 45 S> */ B(TypeOf),
B(Star), R(1),
- /* 45 S> */ B(LdaSmi), I8(2),
+ B(LdaSmi), I8(2),
B(TestEqualStrict), R(1), U8(0),
- B(Mov), R(1), R(2),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(3),
- B(TestEqualStrict), R(2), U8(1),
+ B(TestEqualStrict), R(1), U8(1),
B(JumpIfTrue), U8(10),
B(Jump), U8(14),
/* 74 S> */ B(LdaSmi), I8(1),
@@ -205,17 +200,16 @@ snippet: "
default: a = 2; break;
}
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 32
+bytecode array length: 30
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- B(Star), R(1),
/* 45 S> */ B(TypeOf),
- B(TestEqualStrict), R(1), U8(0),
- B(Mov), R(0), R(2),
+ B(TestEqualStrict), R(0), U8(0),
+ B(Mov), R(0), R(1),
B(JumpIfTrue), U8(4),
B(Jump), U8(8),
/* 74 S> */ B(LdaSmi), I8(1),
@@ -307,20 +301,19 @@ snippet: "
break;
}
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 292
+bytecode array length: 290
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- B(Star), R(1),
/* 45 S> */ B(LdaSmi), I8(1),
- B(TestEqualStrict), R(1), U8(0),
- B(Mov), R(0), R(2),
+ B(TestEqualStrict), R(0), U8(0),
+ B(Mov), R(0), R(1),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(2), U8(1),
+ B(TestEqualStrict), R(1), U8(1),
B(JumpIfTrueConstant), U8(0),
B(JumpConstant), U8(1),
/* 68 S> */ B(LdaSmi), I8(2),
@@ -477,28 +470,26 @@ snippet: "
case 2: a = 3;
}
"
-frame size: 5
+frame size: 3
parameter count: 1
-bytecode array length: 63
+bytecode array length: 58
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- B(Star), R(2),
/* 45 S> */ B(LdaSmi), I8(1),
- B(TestEqualStrict), R(2), U8(3),
- B(Mov), R(0), R(3),
+ B(TestEqualStrict), R(0), U8(3),
+ B(Mov), R(0), R(1),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(3), U8(4),
- B(JumpIfTrue), U8(35),
- B(Jump), U8(37),
- B(Ldar), R(0),
+ B(TestEqualStrict), R(1), U8(4),
+ B(JumpIfTrue), U8(32),
+ B(Jump), U8(34),
+ /* 70 S> */ B(Ldar), R(0),
/* 79 E> */ B(AddSmi), I8(1), U8(0),
- B(Star), R(1),
- /* 70 S> */ B(LdaSmi), I8(2),
- B(TestEqualStrict), R(1), U8(1),
- B(Mov), R(1), R(4),
+ B(Star), R(2),
+ B(LdaSmi), I8(2),
+ B(TestEqualStrict), R(2), U8(1),
B(JumpIfTrue), U8(4),
B(Jump), U8(8),
/* 101 S> */ B(LdaSmi), I8(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
index 8c6dc7d450..2297a7fdc2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
@@ -119,12 +119,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(13),
B(Star), R(0),
- /* 53 S> */ B(BitwiseXorSmi), I8(-1), U8(0),
+ /* 53 S> */ B(BitwiseNot), U8(0),
/* 56 S> */ B(Return),
]
constant pool: [
@@ -139,12 +139,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(13),
B(Star), R(0),
- /* 53 S> */ B(MulSmi), I8(1), U8(0),
+ /* 53 S> */ B(ToNumber), U8(0),
/* 56 S> */ B(Return),
]
constant pool: [
@@ -159,12 +159,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(13),
B(Star), R(0),
- /* 53 S> */ B(MulSmi), I8(-1), U8(0),
+ /* 53 S> */ B(Negate), U8(0),
/* 56 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
index b0a0f72acb..94450515ce 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
@@ -1085,7 +1085,7 @@ snippet: "
"
frame size: 163
parameter count: 1
-bytecode array length: 85
+bytecode array length: 89
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 1503 S> */ B(Wide), B(LdaSmi), I16(1234),
@@ -1093,10 +1093,11 @@ bytecodes: [
/* 1518 S> */ B(LdaZero),
B(Star), R(1),
/* 1534 S> */ B(Ldar), R(0),
- B(JumpIfUndefined), U8(70),
- B(JumpIfNull), U8(68),
+ B(JumpIfUndefined), U8(74),
+ B(JumpIfNull), U8(72),
B(Wide), B(ToObject), R16(157),
- B(Wide), B(ForInPrepare), R16(157), R16(158),
+ B(Wide), B(ForInEnumerate), R16(157),
+ B(Wide), B(ForInPrepare), R16(158), U16(1),
B(LdaZero),
B(Wide), B(Star), R16(161),
/* 1526 S> */ B(Wide), B(ForInContinue), R16(161), R16(160),
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.cc b/deps/v8/test/cctest/interpreter/interpreter-tester.cc
index fe50f1f12f..d112511d22 100644
--- a/deps/v8/test/cctest/interpreter/interpreter-tester.cc
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.cc
@@ -68,6 +68,8 @@ std::string InterpreterTester::function_name() {
return std::string(kFunctionName);
}
+const char InterpreterTester::kFunctionName[] = "f";
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.h b/deps/v8/test/cctest/interpreter/interpreter-tester.h
index 01831e8918..8bc6e67a32 100644
--- a/deps/v8/test/cctest/interpreter/interpreter-tester.h
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.h
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef V8_TEST_CCTEST_INTERPRETER_INTERPRETER_TESTER_H_
+#define V8_TEST_CCTEST_INTERPRETER_INTERPRETER_TESTER_H_
+
#include "src/v8.h"
#include "src/api.h"
@@ -46,10 +49,6 @@ class InterpreterCallable {
Handle<JSFunction> function_;
};
-namespace {
-const char kFunctionName[] = "f";
-} // namespace
-
class InterpreterTester {
public:
InterpreterTester(Isolate* isolate, const char* source,
@@ -82,6 +81,8 @@ class InterpreterTester {
static std::string function_name();
+ static const char kFunctionName[];
+
private:
Isolate* isolate_;
const char* source_;
@@ -109,8 +110,7 @@ class InterpreterTester {
source += "){})";
function = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CompileRun(source.c_str()))));
- function->ReplaceCode(
- *BUILTIN_CODE(isolate_, InterpreterEntryTrampoline));
+ function->set_code(*BUILTIN_CODE(isolate_, InterpreterEntryTrampoline));
}
if (!bytecode_.is_null()) {
@@ -131,3 +131,5 @@ class InterpreterTester {
} // namespace interpreter
} // namespace internal
} // namespace v8
+
+#endif // V8_TEST_CCTEST_INTERPRETER_INTERPRETER_TESTER_H_
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index 69ae4bb297..50e7034686 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -2675,6 +2675,29 @@ TEST(StringConcat) {
LoadGolden("StringConcat.golden")));
}
+#undef XSTR
+#undef STR
+#undef UNIQUE_VAR
+#undef REPEAT_2
+#undef REPEAT_4
+#undef REPEAT_8
+#undef REPEAT_16
+#undef REPEAT_32
+#undef REPEAT_64
+#undef REPEAT_128
+#undef REPEAT_256
+#undef REPEAT_127
+#undef REPEAT_249
+#undef REPEAT_2_UNIQUE_VARS
+#undef REPEAT_4_UNIQUE_VARS
+#undef REPEAT_8_UNIQUE_VARS
+#undef REPEAT_16_UNIQUE_VARS
+#undef REPEAT_32_UNIQUE_VARS
+#undef REPEAT_64_UNIQUE_VARS
+#undef REPEAT_128_UNIQUE_VARS
+#undef REPEAT_250_UNIQUE_VARS
+#undef FUNC_ARG
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index 69839d4d68..e1134e85b1 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -811,8 +811,8 @@ TEST(InterpreterUnaryOpFeedback) {
Handle<Object> any_feedback_value;
};
TestCase const kTestCases[] = {
- {Token::Value::ADD, smi_one, smi_max, number, str},
- {Token::Value::SUB, smi_one, smi_min, number, str}};
+ {Token::Value::INC, smi_one, smi_max, number, str},
+ {Token::Value::DEC, smi_one, smi_min, number, str}};
for (TestCase const& test_case : kTestCases) {
BytecodeArrayBuilder builder(isolate, zone, 4, 0);
@@ -826,13 +826,13 @@ TEST(InterpreterUnaryOpFeedback) {
i::NewFeedbackMetadata(isolate, &feedback_spec);
builder.LoadAccumulatorWithRegister(builder.Receiver())
- .CountOperation(test_case.op, GetIndex(slot0))
+ .UnaryOperation(test_case.op, GetIndex(slot0))
.LoadAccumulatorWithRegister(builder.Parameter(0))
- .CountOperation(test_case.op, GetIndex(slot1))
+ .UnaryOperation(test_case.op, GetIndex(slot1))
.LoadAccumulatorWithRegister(builder.Parameter(1))
- .CountOperation(test_case.op, GetIndex(slot2))
+ .UnaryOperation(test_case.op, GetIndex(slot2))
.LoadAccumulatorWithRegister(builder.Parameter(2))
- .CountOperation(test_case.op, GetIndex(slot3))
+ .UnaryOperation(test_case.op, GetIndex(slot3))
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
diff --git a/deps/v8/test/cctest/interpreter/test-source-positions.cc b/deps/v8/test/cctest/interpreter/test-source-positions.cc
index 4e8e37ec8f..6d9eff8685 100644
--- a/deps/v8/test/cctest/interpreter/test-source-positions.cc
+++ b/deps/v8/test/cctest/interpreter/test-source-positions.cc
@@ -4,6 +4,7 @@
#include "src/v8.h"
+#include "src/api.h"
#include "src/compiler/pipeline.h"
#include "src/handles.h"
#include "src/interpreter/bytecode-generator.h"
diff --git a/deps/v8/test/cctest/parsing/test-preparser.cc b/deps/v8/test/cctest/parsing/test-preparser.cc
index 97e4b28e5e..3db7fb99d6 100644
--- a/deps/v8/test/cctest/parsing/test-preparser.cc
+++ b/deps/v8/test/cctest/parsing/test-preparser.cc
@@ -182,6 +182,14 @@ TEST(PreParserScopeAnalysis) {
{"let var1 = function() { let var2; }"},
{"const var1 = function() { let var2; }"},
+ {"function *f1() { let var2; }"},
+ {"let var1 = function *f1() { let var2; }"},
+ {"let var1 = function*() { let var2; }"},
+
+ {"async function f1() { let var2; }"},
+ {"let var1 = async function f1() { let var2; }"},
+ {"let var1 = async function() { let var2; }"},
+
// Redeclarations.
{"var var1; var var1;"},
{"var var1; var var1; var1 = 5;"},
@@ -451,6 +459,12 @@ TEST(PreParserScopeAnalysis) {
{"if (true) { function f1() {} function f2() { f1(); } }"},
+ {"if (true) { function *f1() {} }"},
+ {"if (true) { async function f1() {} }"},
+
+ // (Potentially sloppy) block function shadowing a catch variable.
+ {"try { } catch(var1) { if (true) { function var1() {} } }"},
+
// Simple parameters.
{"var1", ""},
{"var1", "var1;"},
@@ -788,3 +802,35 @@ TEST(Regress753896) {
// error is not detected inside lazy functions, but it might be in the future.
i::parsing::ParseProgram(&info, isolate);
}
+
+TEST(ProducingAndConsumingByteData) {
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+ LocalContext env;
+
+ i::Zone zone(isolate->allocator(), ZONE_NAME);
+ i::ProducedPreParsedScopeData::ByteData bytes(&zone);
+ // Write some data.
+ bytes.WriteUint32(1983); // This will be overwritten.
+ bytes.WriteUint32(2147483647);
+ bytes.WriteUint8(4);
+ bytes.WriteUint8(255);
+ bytes.WriteUint32(0);
+ bytes.WriteUint8(0);
+ bytes.OverwriteFirstUint32(2017);
+ bytes.WriteUint8(100);
+
+ i::Handle<i::PodArray<uint8_t>> data_on_heap = bytes.Serialize(isolate);
+ i::ConsumedPreParsedScopeData::ByteData bytes_for_reading;
+ i::ConsumedPreParsedScopeData::ByteData::ReadingScope reading_scope(
+ &bytes_for_reading, *data_on_heap);
+
+ // Read the data back.
+ CHECK_EQ(bytes_for_reading.ReadUint32(), 2017);
+ CHECK_EQ(bytes_for_reading.ReadUint32(), 2147483647);
+ CHECK_EQ(bytes_for_reading.ReadUint8(), 4);
+ CHECK_EQ(bytes_for_reading.ReadUint8(), 255);
+ CHECK_EQ(bytes_for_reading.ReadUint32(), 0);
+ CHECK_EQ(bytes_for_reading.ReadUint8(), 0);
+ CHECK_EQ(bytes_for_reading.ReadUint8(), 100);
+}
diff --git a/deps/v8/test/cctest/parsing/test-scanner-streams.cc b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
index d892442c35..27fc086487 100644
--- a/deps/v8/test/cctest/parsing/test-scanner-streams.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
@@ -461,6 +461,25 @@ TEST(Regress651333) {
}
}
+void TestChunkStreamAgainstReference(
+ const char* cases[],
+ const std::vector<std::vector<uint16_t>>& unicode_expected) {
+ for (size_t c = 0; c < unicode_expected.size(); ++c) {
+ ChunkSource chunk_source(cases[c]);
+ std::unique_ptr<i::Utf16CharacterStream> stream(i::ScannerStream::For(
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
+ for (size_t i = 0; i < unicode_expected[c].size(); i++) {
+ CHECK_EQ(unicode_expected[c][i], stream->Advance());
+ }
+ CHECK_EQ(i::Utf16CharacterStream::kEndOfInput, stream->Advance());
+ stream->Seek(0);
+ for (size_t i = 0; i < unicode_expected[c].size(); i++) {
+ CHECK_EQ(unicode_expected[c][i], stream->Advance());
+ }
+ CHECK_EQ(i::Utf16CharacterStream::kEndOfInput, stream->Advance());
+ }
+}
+
TEST(Regress6377) {
const char* cases[] = {
"\xf0\x90\0" // first chunk - start of 4-byte seq
@@ -480,22 +499,54 @@ TEST(Regress6377) {
"a\xc3\0" // and an 'a' + start of 2-byte seq
"\xbf\0", // third chunk - end of 2-byte seq
};
- const std::vector<std::vector<uint16_t>> unicode = {
+ const std::vector<std::vector<uint16_t>> unicode_expected = {
{0xd800, 0xdc00, 97}, {0xfff, 97}, {0xff, 97}, {0xd800, 0xdc00, 97, 0xff},
};
- CHECK_EQ(unicode.size(), sizeof(cases) / sizeof(cases[0]));
- for (size_t c = 0; c < unicode.size(); ++c) {
- ChunkSource chunk_source(cases[c]);
- std::unique_ptr<i::Utf16CharacterStream> stream(i::ScannerStream::For(
- &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
- for (size_t i = 0; i < unicode[c].size(); i++) {
- CHECK_EQ(unicode[c][i], stream->Advance());
- }
- CHECK_EQ(i::Utf16CharacterStream::kEndOfInput, stream->Advance());
- stream->Seek(0);
- for (size_t i = 0; i < unicode[c].size(); i++) {
- CHECK_EQ(unicode[c][i], stream->Advance());
- }
- CHECK_EQ(i::Utf16CharacterStream::kEndOfInput, stream->Advance());
- }
+ CHECK_EQ(unicode_expected.size(), arraysize(cases));
+ TestChunkStreamAgainstReference(cases, unicode_expected);
+}
+
+TEST(Regress6836) {
+ const char* cases[] = {
+ // 0xc2 is a lead byte, but there's no continuation. The bug occurs when
+ // this happens near the chunk end.
+ "X\xc2Y\0",
+ // Last chunk ends with a 2-byte char lead.
+ "X\xc2\0",
+ // Last chunk ends with a 3-byte char lead and only one continuation
+ // character.
+ "X\xe0\xbf\0",
+ };
+ const std::vector<std::vector<uint16_t>> unicode_expected = {
+ {0x58, 0xfffd, 0x59}, {0x58, 0xfffd}, {0x58, 0xfffd},
+ };
+ CHECK_EQ(unicode_expected.size(), arraysize(cases));
+ TestChunkStreamAgainstReference(cases, unicode_expected);
+}
+
+TEST(TestOverlongAndInvalidSequences) {
+ const char* cases[] = {
+ // Overlong 2-byte sequence.
+ "X\xc0\xbfY\0",
+ // Another overlong 2-byte sequence.
+ "X\xc1\xbfY\0",
+ // Overlong 3-byte sequence.
+ "X\xe0\x9f\xbfY\0",
+ // Overlong 4-byte sequence.
+ "X\xf0\x89\xbf\xbfY\0",
+ // Invalid 3-byte sequence (reserved for surrogates).
+ "X\xed\xa0\x80Y\0",
+ // Invalid 4-bytes sequence (value out of range).
+ "X\xf4\x90\x80\x80Y\0",
+ };
+ const std::vector<std::vector<uint16_t>> unicode_expected = {
+ {0x58, 0xfffd, 0xfffd, 0x59},
+ {0x58, 0xfffd, 0xfffd, 0x59},
+ {0x58, 0xfffd, 0xfffd, 0xfffd, 0x59},
+ {0x58, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0x59},
+ {0x58, 0xfffd, 0xfffd, 0xfffd, 0x59},
+ {0x58, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0x59},
+ };
+ CHECK_EQ(unicode_expected.size(), arraysize(cases));
+ TestChunkStreamAgainstReference(cases, unicode_expected);
}
diff --git a/deps/v8/test/cctest/parsing/test-scanner.cc b/deps/v8/test/cctest/parsing/test-scanner.cc
index ea7a8fbaa2..9c18bfb1ae 100644
--- a/deps/v8/test/cctest/parsing/test-scanner.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner.cc
@@ -29,6 +29,7 @@ struct ScannerTestHelper {
std::unique_ptr<UnicodeCache> unicode_cache;
std::unique_ptr<Utf16CharacterStream> stream;
std::unique_ptr<Scanner> scanner;
+ int use_counts[v8::Isolate::kUseCounterFeatureCount];
Scanner* operator->() const { return scanner.get(); }
Scanner* get() const { return scanner.get(); }
@@ -38,8 +39,11 @@ ScannerTestHelper make_scanner(const char* src) {
ScannerTestHelper helper;
helper.unicode_cache = std::unique_ptr<UnicodeCache>(new UnicodeCache);
helper.stream = ScannerStream::ForTesting(src);
- helper.scanner =
- std::unique_ptr<Scanner>(new Scanner(helper.unicode_cache.get()));
+ for (int i = 0; i < v8::Isolate::kUseCounterFeatureCount; i++) {
+ helper.use_counts[i] = 0;
+ }
+ helper.scanner = std::unique_ptr<Scanner>(
+ new Scanner(helper.unicode_cache.get(), helper.use_counts));
helper.scanner->Initialize(helper.stream.get(), false);
return helper;
}
diff --git a/deps/v8/test/cctest/setup-isolate-for-tests.cc b/deps/v8/test/cctest/setup-isolate-for-tests.cc
index c09a362eff..ba9c4fb488 100644
--- a/deps/v8/test/cctest/setup-isolate-for-tests.cc
+++ b/deps/v8/test/cctest/setup-isolate-for-tests.cc
@@ -9,19 +9,25 @@
namespace v8 {
namespace internal {
-void SetupIsolateDelegateForTests::SetupBuiltins(Isolate* isolate,
- bool create_heap_objects) {
- if (create_heap_objects) {
+void SetupIsolateDelegateForTests::SetupBuiltins(Isolate* isolate) {
+ if (create_heap_objects_) {
SetupBuiltinsInternal(isolate);
}
}
void SetupIsolateDelegateForTests::SetupInterpreter(
- interpreter::Interpreter* interpreter, bool create_heap_objects) {
- if (create_heap_objects) {
+ interpreter::Interpreter* interpreter) {
+ if (create_heap_objects_) {
interpreter::SetupInterpreter::InstallBytecodeHandlers(interpreter);
}
}
+bool SetupIsolateDelegateForTests::SetupHeap(Heap* heap) {
+ if (create_heap_objects_) {
+ return SetupHeapInternal(heap);
+ }
+ return true;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/setup-isolate-for-tests.h b/deps/v8/test/cctest/setup-isolate-for-tests.h
index 6a49aa13cc..e3d34725f0 100644
--- a/deps/v8/test/cctest/setup-isolate-for-tests.h
+++ b/deps/v8/test/cctest/setup-isolate-for-tests.h
@@ -12,13 +12,15 @@ namespace internal {
class SetupIsolateDelegateForTests : public SetupIsolateDelegate {
public:
- SetupIsolateDelegateForTests() : SetupIsolateDelegate() {}
+ explicit SetupIsolateDelegateForTests(bool create_heap_objects)
+ : SetupIsolateDelegate(create_heap_objects) {}
virtual ~SetupIsolateDelegateForTests() {}
- void SetupBuiltins(Isolate* isolate, bool create_heap_objects) override;
+ void SetupBuiltins(Isolate* isolate) override;
- void SetupInterpreter(interpreter::Interpreter* interpreter,
- bool create_heap_objects) override;
+ void SetupInterpreter(interpreter::Interpreter* interpreter) override;
+
+ bool SetupHeap(Heap* heap) override;
};
} // namespace internal
diff --git a/deps/v8/test/cctest/test-accessor-assembler.cc b/deps/v8/test/cctest/test-accessor-assembler.cc
index 17617affee..0afdde390b 100644
--- a/deps/v8/test/cctest/test-accessor-assembler.cc
+++ b/deps/v8/test/cctest/test-accessor-assembler.cc
@@ -106,9 +106,9 @@ TEST(StubCacheSecondaryOffset) {
namespace {
-Handle<Code> CreateCodeWithFlags(Code::Flags flags) {
+Handle<Code> CreateCodeOfKind(Code::Kind kind) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester data(isolate, flags);
+ CodeAssemblerTester data(isolate, kind);
CodeStubAssembler m(data.state());
m.Return(m.UndefinedConstant());
return data.GenerateCodeCloseAndEscape();
@@ -124,8 +124,7 @@ TEST(TryProbeStubCache) {
CodeAssemblerTester data(isolate, kNumParams);
AccessorAssembler m(data.state());
- Code::Kind ic_kind = Code::LOAD_IC;
- StubCache stub_cache(isolate, ic_kind);
+ StubCache stub_cache(isolate);
stub_cache.Clear();
{
@@ -204,8 +203,7 @@ TEST(TryProbeStubCache) {
// Generate some number of handlers.
for (int i = 0; i < 30; i++) {
- Code::Flags flags = Code::ComputeHandlerFlags(ic_kind);
- handlers.push_back(CreateCodeWithFlags(flags));
+ handlers.push_back(CreateCodeOfKind(Code::STUB));
}
// Ensure that GC does happen because from now on we are going to fill our
diff --git a/deps/v8/test/cctest/test-allocation.cc b/deps/v8/test/cctest/test-allocation.cc
index c06dcc575a..f31b03670a 100644
--- a/deps/v8/test/cctest/test-allocation.cc
+++ b/deps/v8/test/cctest/test-allocation.cc
@@ -54,7 +54,7 @@ size_t GetHugeMemoryAmount() {
static size_t huge_memory = 0;
if (!huge_memory) {
for (int i = 0; i < 100; i++) {
- huge_memory |= bit_cast<size_t>(v8::base::OS::GetRandomMmapAddr());
+ huge_memory |= bit_cast<size_t>(v8::internal::GetRandomMmapAddr());
}
// Make it larger than the available address space.
huge_memory *= 2;
@@ -130,7 +130,7 @@ TEST(AlignedAllocOOM) {
TEST(AllocVirtualMemoryOOM) {
AllocationPlatform platform;
CHECK(!platform.oom_callback_called);
- v8::base::VirtualMemory result;
+ v8::internal::VirtualMemory result;
bool success =
v8::internal::AllocVirtualMemory(GetHugeMemoryAmount(), nullptr, &result);
// On a few systems, allocation somehow succeeds.
@@ -141,7 +141,7 @@ TEST(AllocVirtualMemoryOOM) {
TEST(AlignedAllocVirtualMemoryOOM) {
AllocationPlatform platform;
CHECK(!platform.oom_callback_called);
- v8::base::VirtualMemory result;
+ v8::internal::VirtualMemory result;
bool success = v8::internal::AlignedAllocVirtualMemory(
GetHugeMemoryAmount(), v8::base::OS::AllocateAlignment(), nullptr,
&result);
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index ca9b18016d..48809506f1 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -716,21 +716,20 @@ bool define_was_called_in_order = false;
void GetterCallbackOrder(Local<Name> property,
const v8::PropertyCallbackInfo<v8::Value>& info) {
get_was_called_in_order = true;
- CHECK(!define_was_called_in_order);
+ CHECK(define_was_called_in_order);
info.GetReturnValue().Set(property);
}
void DefinerCallbackOrder(Local<Name> property,
const v8::PropertyDescriptor& desc,
const v8::PropertyCallbackInfo<v8::Value>& info) {
- // Get called before DefineProperty because we query the descriptor first.
- CHECK(get_was_called_in_order);
+ CHECK(!get_was_called_in_order); // Define called before get.
define_was_called_in_order = true;
}
} // namespace
-// Check that getter callback is called before definer callback.
+// Check that definer callback is called before getter callback.
THREADED_TEST(DefinerCallbackGetAndDefine) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::FunctionTemplate> templ =
@@ -3327,12 +3326,18 @@ static void NamedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
ApiTestFuzzer::Fuzz();
v8::Local<v8::Array> result = v8::Array::New(info.GetIsolate(), 3);
v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
- result->Set(context, v8::Integer::New(info.GetIsolate(), 0), v8_str("foo"))
- .FromJust();
- result->Set(context, v8::Integer::New(info.GetIsolate(), 1), v8_str("bar"))
- .FromJust();
- result->Set(context, v8::Integer::New(info.GetIsolate(), 2), v8_str("baz"))
- .FromJust();
+ CHECK(
+ result
+ ->Set(context, v8::Integer::New(info.GetIsolate(), 0), v8_str("foo"))
+ .FromJust());
+ CHECK(
+ result
+ ->Set(context, v8::Integer::New(info.GetIsolate(), 1), v8_str("bar"))
+ .FromJust());
+ CHECK(
+ result
+ ->Set(context, v8::Integer::New(info.GetIsolate(), 2), v8_str("baz"))
+ .FromJust());
info.GetReturnValue().Set(result);
}
@@ -3341,10 +3346,12 @@ static void IndexedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
ApiTestFuzzer::Fuzz();
v8::Local<v8::Array> result = v8::Array::New(info.GetIsolate(), 2);
v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
- result->Set(context, v8::Integer::New(info.GetIsolate(), 0), v8_str("0"))
- .FromJust();
- result->Set(context, v8::Integer::New(info.GetIsolate(), 1), v8_str("1"))
- .FromJust();
+ CHECK(
+ result->Set(context, v8::Integer::New(info.GetIsolate(), 0), v8_str("0"))
+ .FromJust());
+ CHECK(
+ result->Set(context, v8::Integer::New(info.GetIsolate(), 1), v8_str("1"))
+ .FromJust());
info.GetReturnValue().Set(result);
}
@@ -4965,6 +4972,262 @@ THREADED_TEST(NonMaskingInterceptorPrototypePropertyIC) {
ExpectInt32("f(outer)", 4);
}
+namespace {
+
+void ConcatNamedPropertyGetter(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(
+ // Return the property name concatenated with itself.
+ String::Concat(name.As<String>(), name.As<String>()));
+}
+
+void ConcatIndexedPropertyGetter(
+ uint32_t index, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(
+ // Return the double value of the index.
+ v8_num(index + index));
+}
+
+void EnumCallbackWithNames(const v8::PropertyCallbackInfo<v8::Array>& info) {
+ ApiTestFuzzer::Fuzz();
+ v8::Local<v8::Array> result = v8::Array::New(info.GetIsolate(), 4);
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+ CHECK(
+ result
+ ->Set(context, v8::Integer::New(info.GetIsolate(), 0), v8_str("foo"))
+ .FromJust());
+ CHECK(
+ result
+ ->Set(context, v8::Integer::New(info.GetIsolate(), 1), v8_str("bar"))
+ .FromJust());
+ CHECK(
+ result
+ ->Set(context, v8::Integer::New(info.GetIsolate(), 2), v8_str("baz"))
+ .FromJust());
+ CHECK(
+ result->Set(context, v8::Integer::New(info.GetIsolate(), 3), v8_str("10"))
+ .FromJust());
+
+ // Create a holey array.
+ CHECK(result->Delete(context, v8::Integer::New(info.GetIsolate(), 1))
+ .FromJust());
+ info.GetReturnValue().Set(result);
+}
+
+void EnumCallbackWithIndices(const v8::PropertyCallbackInfo<v8::Array>& info) {
+ ApiTestFuzzer::Fuzz();
+ v8::Local<v8::Array> result = v8::Array::New(info.GetIsolate(), 4);
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+
+ CHECK(result->Set(context, v8::Integer::New(info.GetIsolate(), 0), v8_num(10))
+ .FromJust());
+ CHECK(result->Set(context, v8::Integer::New(info.GetIsolate(), 1), v8_num(11))
+ .FromJust());
+ CHECK(result->Set(context, v8::Integer::New(info.GetIsolate(), 2), v8_num(12))
+ .FromJust());
+ CHECK(result->Set(context, v8::Integer::New(info.GetIsolate(), 3), v8_num(14))
+ .FromJust());
+
+ // Create a holey array.
+ CHECK(result->Delete(context, v8::Integer::New(info.GetIsolate(), 1))
+ .FromJust());
+ info.GetReturnValue().Set(result);
+}
+
+void RestrictiveNamedQuery(Local<Name> property,
+ const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ // Only "foo" is enumerable.
+ if (v8_str("foo")
+ ->Equals(info.GetIsolate()->GetCurrentContext(), property)
+ .FromJust()) {
+ info.GetReturnValue().Set(v8::PropertyAttribute::None);
+ return;
+ }
+ info.GetReturnValue().Set(v8::PropertyAttribute::DontEnum);
+}
+
+void RestrictiveIndexedQuery(
+ uint32_t index, const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ // Only index 2 and 12 are enumerable.
+ if (index == 2 || index == 12) {
+ info.GetReturnValue().Set(v8::PropertyAttribute::None);
+ return;
+ }
+ info.GetReturnValue().Set(v8::PropertyAttribute::DontEnum);
+}
+} // namespace
+
+// Regression test for V8 bug 6627.
+// Object.keys() must return enumerable keys only.
+THREADED_TEST(EnumeratorsAndUnenumerableNamedProperties) {
+ // The enumerator interceptor returns a list
+ // of items which are filtered according to the
+ // properties defined in the query interceptor.
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
+ obj->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ ConcatNamedPropertyGetter, NULL, RestrictiveNamedQuery, NULL,
+ EnumCallbackWithNames));
+ LocalContext context;
+ context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ obj->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+
+ ExpectInt32("Object.getOwnPropertyNames(obj).length", 3);
+ ExpectString("Object.getOwnPropertyNames(obj)[0]", "foo");
+ ExpectString("Object.getOwnPropertyNames(obj)[1]", "baz");
+ ExpectString("Object.getOwnPropertyNames(obj)[2]", "10");
+
+ ExpectTrue("Object.getOwnPropertyDescriptor(obj, 'foo').enumerable");
+ ExpectFalse("Object.getOwnPropertyDescriptor(obj, 'baz').enumerable");
+
+ ExpectInt32("Object.entries(obj).length", 1);
+ ExpectString("Object.entries(obj)[0][0]", "foo");
+ ExpectString("Object.entries(obj)[0][1]", "foofoo");
+
+ ExpectInt32("Object.keys(obj).length", 1);
+ ExpectString("Object.keys(obj)[0]", "foo");
+
+ ExpectInt32("Object.values(obj).length", 1);
+ ExpectString("Object.values(obj)[0]", "foofoo");
+}
+
+namespace {
+void QueryInterceptorForFoo(Local<Name> property,
+ const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ // Don't intercept anything except "foo."
+ if (!v8_str("foo")
+ ->Equals(info.GetIsolate()->GetCurrentContext(), property)
+ .FromJust()) {
+ return;
+ }
+ // "foo" is enumerable.
+ info.GetReturnValue().Set(v8::PropertyAttribute::None);
+}
+} // namespace
+
+// Test that calls to the query interceptor are independent of each
+// other.
+THREADED_TEST(EnumeratorsAndUnenumerableNamedPropertiesWithoutSet) {
+ // The enumerator interceptor returns a list
+ // of items which are filtered according to the
+ // properties defined in the query interceptor.
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
+ obj->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ ConcatNamedPropertyGetter, NULL, QueryInterceptorForFoo, NULL,
+ EnumCallbackWithNames));
+ LocalContext context;
+ context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ obj->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+
+ ExpectInt32("Object.getOwnPropertyNames(obj).length", 3);
+ ExpectString("Object.getOwnPropertyNames(obj)[0]", "foo");
+ ExpectString("Object.getOwnPropertyNames(obj)[1]", "baz");
+ ExpectString("Object.getOwnPropertyNames(obj)[2]", "10");
+
+ ExpectTrue("Object.getOwnPropertyDescriptor(obj, 'foo').enumerable");
+ ExpectInt32("Object.keys(obj).length", 1);
+}
+
+THREADED_TEST(EnumeratorsAndUnenumerableIndexedPropertiesArgumentsElements) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
+ obj->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+ ConcatIndexedPropertyGetter, NULL, RestrictiveIndexedQuery, NULL,
+ SloppyArgsIndexedPropertyEnumerator));
+ LocalContext context;
+ context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ obj->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+
+ ExpectInt32("Object.getOwnPropertyNames(obj).length", 4);
+ ExpectString("Object.getOwnPropertyNames(obj)[0]", "0");
+ ExpectString("Object.getOwnPropertyNames(obj)[1]", "1");
+ ExpectString("Object.getOwnPropertyNames(obj)[2]", "2");
+ ExpectString("Object.getOwnPropertyNames(obj)[3]", "3");
+
+ ExpectTrue("Object.getOwnPropertyDescriptor(obj, '2').enumerable");
+
+ ExpectInt32("Object.entries(obj).length", 1);
+ ExpectString("Object.entries(obj)[0][0]", "2");
+ ExpectInt32("Object.entries(obj)[0][1]", 4);
+
+ ExpectInt32("Object.keys(obj).length", 1);
+ ExpectString("Object.keys(obj)[0]", "2");
+
+ ExpectInt32("Object.values(obj).length", 1);
+ ExpectInt32("Object.values(obj)[0]", 4);
+}
+
+THREADED_TEST(EnumeratorsAndUnenumerableIndexedProperties) {
+ // The enumerator interceptor returns a list
+ // of items which are filtered according to the
+ // properties defined in the query interceptor.
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
+ obj->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+ ConcatIndexedPropertyGetter, NULL, RestrictiveIndexedQuery, NULL,
+ EnumCallbackWithIndices));
+ LocalContext context;
+ context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ obj->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+
+ ExpectInt32("Object.getOwnPropertyNames(obj).length", 3);
+ ExpectString("Object.getOwnPropertyNames(obj)[0]", "10");
+ ExpectString("Object.getOwnPropertyNames(obj)[1]", "12");
+ ExpectString("Object.getOwnPropertyNames(obj)[2]", "14");
+
+ ExpectFalse("Object.getOwnPropertyDescriptor(obj, '10').enumerable");
+ ExpectTrue("Object.getOwnPropertyDescriptor(obj, '12').enumerable");
+
+ ExpectInt32("Object.entries(obj).length", 1);
+ ExpectString("Object.entries(obj)[0][0]", "12");
+ ExpectInt32("Object.entries(obj)[0][1]", 24);
+
+ ExpectInt32("Object.keys(obj).length", 1);
+ ExpectString("Object.keys(obj)[0]", "12");
+
+ ExpectInt32("Object.values(obj).length", 1);
+ ExpectInt32("Object.values(obj)[0]", 24);
+}
+
+THREADED_TEST(EnumeratorsAndForIn) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
+ obj->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ ConcatNamedPropertyGetter, NULL, RestrictiveNamedQuery, NULL, NamedEnum));
+ LocalContext context;
+ context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ obj->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+
+ ExpectInt32("Object.getOwnPropertyNames(obj).length", 3);
+ ExpectString("Object.getOwnPropertyNames(obj)[0]", "foo");
+
+ ExpectTrue("Object.getOwnPropertyDescriptor(obj, 'foo').enumerable");
+
+ CompileRun(
+ "let concat = '';"
+ "for(var prop in obj) {"
+ " concat += `key:${prop}:value:${obj[prop]}`;"
+ "}");
+
+ // Check that for...in only iterates over enumerable properties.
+ ExpectString("concat", "key:foo:value:foofoo");
+}
namespace {
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index cf5be0fa9c..99ab0bfaa1 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -2536,10 +2536,9 @@ THREADED_TEST(AccessorIsPreservedOnAttributeChange) {
LocalContext env;
v8::Local<v8::Value> res = CompileRun("var a = []; a;");
i::Handle<i::JSReceiver> a(v8::Utils::OpenHandle(v8::Object::Cast(*res)));
- CHECK(a->map()->instance_descriptors()->IsFixedArray());
- CHECK_GT(i::FixedArray::cast(a->map()->instance_descriptors())->length(), 0);
+ CHECK_EQ(1, a->map()->instance_descriptors()->number_of_descriptors());
CompileRun("Object.defineProperty(a, 'length', { writable: false });");
- CHECK_EQ(0, i::FixedArray::cast(a->map()->instance_descriptors())->length());
+ CHECK_EQ(0, a->map()->instance_descriptors()->number_of_descriptors());
// But we should still have an AccessorInfo.
i::Handle<i::String> name(v8::Utils::OpenHandle(*v8_str("length")));
i::LookupIterator it(a, name, i::LookupIterator::OWN_SKIP_INTERCEPTOR);
@@ -5781,23 +5780,55 @@ TEST(CustomErrorMessage) {
static void check_custom_rethrowing_message(v8::Local<v8::Message> message,
v8::Local<v8::Value> data) {
+ CHECK(data->IsExternal());
+ int* callcount = static_cast<int*>(data.As<v8::External>()->Value());
+ ++*callcount;
+
const char* uncaught_error = "Uncaught exception";
CHECK(message->Get()
->Equals(CcTest::isolate()->GetCurrentContext(),
v8_str(uncaught_error))
.FromJust());
+ // Test that compiling code inside a message handler works.
+ CHECK(CompileRunChecked(CcTest::isolate(), "(function(a) { return a; })(42)")
+ ->Equals(CcTest::isolate()->GetCurrentContext(),
+ v8::Integer::NewFromUnsigned(CcTest::isolate(), 42))
+ .FromJust());
}
TEST(CustomErrorRethrowsOnToString) {
+ int callcount = 0;
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- context->GetIsolate()->AddMessageListener(check_custom_rethrowing_message);
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ context->GetIsolate()->AddMessageListener(
+ check_custom_rethrowing_message, v8::External::New(isolate, &callcount));
CompileRun(
"var e = { toString: function() { throw e; } };"
"try { throw e; } finally {}");
+ CHECK_EQ(callcount, 1);
+ context->GetIsolate()->RemoveMessageListeners(
+ check_custom_rethrowing_message);
+}
+
+TEST(CustomErrorRethrowsOnToStringInsideVerboseTryCatch) {
+ int callcount = 0;
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::TryCatch try_catch(isolate);
+ try_catch.SetVerbose(true);
+ context->GetIsolate()->AddMessageListener(
+ check_custom_rethrowing_message, v8::External::New(isolate, &callcount));
+
+ CompileRun(
+ "var e = { toString: function() { throw e; } };"
+ "try { throw e; } finally {}");
+
+ CHECK_EQ(callcount, 1);
context->GetIsolate()->RemoveMessageListeners(
check_custom_rethrowing_message);
}
@@ -7613,6 +7644,28 @@ TEST(ErrorReporting) {
CHECK(last_location);
}
+static size_t dcheck_count;
+void DcheckErrorCallback(const char* file, int line, const char* message) {
+ last_message = message;
+ ++dcheck_count;
+}
+
+TEST(DcheckErrorHandler) {
+ V8::SetDcheckErrorHandler(DcheckErrorCallback);
+
+ last_message = nullptr;
+ dcheck_count = 0;
+
+ DCHECK(false && "w00t");
+#ifdef DEBUG
+ CHECK_EQ(dcheck_count, 1);
+ CHECK(last_message);
+ CHECK(std::string(last_message).find("w00t") != std::string::npos);
+#else
+ // The DCHECK should be a noop in non-DEBUG builds.
+ CHECK_EQ(dcheck_count, 0);
+#endif
+}
static void MissingScriptInfoMessageListener(v8::Local<v8::Message> message,
v8::Local<Value> data) {
@@ -8490,6 +8543,66 @@ static void Utf16Helper(
}
}
+void TestUtf8DecodingAgainstReference(
+ const char* cases[],
+ const std::vector<std::vector<uint16_t>>& unicode_expected) {
+ for (size_t test_ix = 0; test_ix < unicode_expected.size(); ++test_ix) {
+ v8::Local<String> str = v8_str(cases[test_ix]);
+ CHECK_EQ(unicode_expected[test_ix].size(), str->Length());
+
+ std::unique_ptr<uint16_t[]> buffer(new uint16_t[str->Length()]);
+ str->Write(buffer.get(), 0, -1, String::NO_NULL_TERMINATION);
+
+ for (size_t i = 0; i < unicode_expected[test_ix].size(); ++i) {
+ CHECK_EQ(unicode_expected[test_ix][i], buffer[i]);
+ }
+ }
+}
+
+THREADED_TEST(OverlongSequencesAndSurrogates) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+
+ const char* cases[] = {
+ // Overlong 2-byte sequence.
+ "X\xc0\xbfY\0",
+ // Another overlong 2-byte sequence.
+ "X\xc1\xbfY\0",
+ // Overlong 3-byte sequence.
+ "X\xe0\x9f\xbfY\0",
+ // Overlong 4-byte sequence.
+ "X\xf0\x89\xbf\xbfY\0",
+ // Invalid 3-byte sequence (reserved for surrogates).
+ "X\xed\xa0\x80Y\0",
+ // Invalid 4-bytes sequence (value out of range).
+ "X\xf4\x90\x80\x80Y\0",
+
+ // Start of an overlong 3-byte sequence but not enough continuation bytes.
+ "X\xe0\x9fY\0",
+ // Start of an overlong 4-byte sequence but not enough continuation bytes.
+ "X\xf0\x89\xbfY\0",
+ // Start of an invalid 3-byte sequence (reserved for surrogates) but not
+ // enough continuation bytes.
+ "X\xed\xa0Y\0",
+ // Start of an invalid 4-bytes sequence (value out of range) but not
+ // enough continuation bytes.
+ "X\xf4\x90\x80Y\0",
+ };
+ const std::vector<std::vector<uint16_t>> unicode_expected = {
+ {0x58, 0xfffd, 0xfffd, 0x59},
+ {0x58, 0xfffd, 0xfffd, 0x59},
+ {0x58, 0xfffd, 0xfffd, 0xfffd, 0x59},
+ {0x58, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0x59},
+ {0x58, 0xfffd, 0xfffd, 0xfffd, 0x59},
+ {0x58, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0x59},
+ {0x58, 0xfffd, 0xfffd, 0x59},
+ {0x58, 0xfffd, 0xfffd, 0xfffd, 0x59},
+ {0x58, 0xfffd, 0xfffd, 0x59},
+ {0x58, 0xfffd, 0xfffd, 0xfffd, 0x59},
+ };
+ CHECK_EQ(unicode_expected.size(), arraysize(cases));
+ TestUtf8DecodingAgainstReference(cases, unicode_expected);
+}
THREADED_TEST(Utf16) {
LocalContext context;
@@ -17525,6 +17638,8 @@ int promise_reject_frame_count = -1;
void PromiseRejectCallback(v8::PromiseRejectMessage reject_message) {
v8::Local<v8::Object> global = CcTest::global();
v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
+ CHECK_EQ(v8::Promise::PromiseState::kRejected,
+ reject_message.GetPromise()->State());
if (reject_message.GetEvent() == v8::kPromiseRejectWithNoHandler) {
promise_reject_counter++;
global->Set(context, v8_str("rejected"), reject_message.GetPromise())
@@ -22227,21 +22342,19 @@ const char* kMegamorphicTestProgram =
"}\n";
void TestStubCache(bool primary) {
- using namespace i;
-
- FLAG_native_code_counters = true;
+ i::FLAG_native_code_counters = true;
if (primary) {
- FLAG_test_primary_stub_cache = true;
+ i::FLAG_test_primary_stub_cache = true;
} else {
- FLAG_test_secondary_stub_cache = true;
+ i::FLAG_test_secondary_stub_cache = true;
}
- FLAG_opt = false;
+ i::FLAG_opt = false;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
create_params.counter_lookup_callback = LookupCounter;
v8::Isolate* isolate = v8::Isolate::New(create_params);
- Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
@@ -23177,16 +23290,14 @@ TEST(AccessCheckThrows) {
}
TEST(AccessCheckInIC) {
- using namespace i;
-
- FLAG_native_code_counters = true;
- FLAG_opt = false;
+ i::FLAG_native_code_counters = true;
+ i::FLAG_opt = false;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
create_params.counter_lookup_callback = LookupCounter;
v8::Isolate* isolate = v8::Isolate::New(create_params);
- Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
@@ -26948,20 +27059,19 @@ THREADED_TEST(GlobalAccessorInfo) {
}
UNINITIALIZED_TEST(IncreaseHeapLimitForDebugging) {
- using namespace i;
v8::Isolate::CreateParams create_params;
create_params.constraints.set_max_old_space_size(16);
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
- Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
size_t limit_before = i_isolate->heap()->MaxOldGenerationSize();
- CHECK_EQ(16 * MB, limit_before);
+ CHECK_EQ(16 * i::MB, limit_before);
CHECK(!isolate->IsHeapLimitIncreasedForDebugging());
isolate->IncreaseHeapLimitForDebugging();
CHECK(isolate->IsHeapLimitIncreasedForDebugging());
size_t limit_after = i_isolate->heap()->MaxOldGenerationSize();
- CHECK_EQ(4 * 16 * MB, limit_after);
+ CHECK_EQ(4 * 16 * i::MB, limit_after);
isolate->RestoreOriginalHeapLimit();
CHECK(!isolate->IsHeapLimitIncreasedForDebugging());
CHECK_EQ(limit_before, i_isolate->heap()->MaxOldGenerationSize());
@@ -26995,12 +27105,11 @@ TEST(DeterministicRandomNumberGeneration) {
}
UNINITIALIZED_TEST(AllowAtomicsWait) {
- using namespace i;
v8::Isolate::CreateParams create_params;
create_params.allow_atomics_wait = false;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
- Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
CHECK_EQ(false, i_isolate->allow_atomics_wait());
isolate->SetAllowAtomicsWait(true);
diff --git a/deps/v8/test/cctest/test-array-list.cc b/deps/v8/test/cctest/test-array-list.cc
index 9532554416..91882cd1da 100644
--- a/deps/v8/test/cctest/test-array-list.cc
+++ b/deps/v8/test/cctest/test-array-list.cc
@@ -4,22 +4,12 @@
#include <stdlib.h>
-#include "src/v8.h"
-
#include "src/factory.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/factory.h -> src/objects-inl.h
#include "src/objects-inl.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/feedback-vector.h ->
-// src/feedback-vector-inl.h
-#include "src/feedback-vector-inl.h"
#include "test/cctest/cctest.h"
-namespace {
-
-using namespace v8::internal;
-
+namespace v8 {
+namespace internal {
TEST(ArrayList) {
LocalContext context;
@@ -46,4 +36,5 @@ TEST(ArrayList) {
CHECK_EQ(200, Smi::ToInt(array->Get(1)));
}
-} // namespace
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 5483698975..5f405548f0 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -38,9 +38,11 @@
#include "src/v8.h"
#include "test/cctest/cctest.h"
-using namespace v8::base;
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
+namespace test_assembler_arm {
+using base::RandomNumberGenerator;
// Define these function prototypes to match JSEntryFunction in execution.cc.
typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
@@ -63,8 +65,8 @@ TEST(0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -100,8 +102,8 @@ TEST(1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -146,8 +148,8 @@ TEST(2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -194,8 +196,8 @@ TEST(3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -326,8 +328,8 @@ TEST(4) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -389,8 +391,8 @@ TEST(5) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -421,8 +423,8 @@ TEST(6) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -490,8 +492,8 @@ static void TestRoundingMode(VCVTTypes types,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -673,8 +675,8 @@ TEST(8) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -783,8 +785,8 @@ TEST(9) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -889,8 +891,8 @@ TEST(10) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -984,8 +986,8 @@ TEST(11) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -1111,8 +1113,8 @@ TEST(13) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -1184,8 +1186,8 @@ TEST(14) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -2065,8 +2067,8 @@ TEST(15) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -2342,8 +2344,8 @@ TEST(16) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -2423,8 +2425,8 @@ TEST(sdiv) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -2487,8 +2489,8 @@ TEST(udiv) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -2518,8 +2520,8 @@ TEST(smmla) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2527,7 +2529,7 @@ TEST(smmla) {
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt(), z = rng->NextInt();
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, z, 0);
- CHECK_EQ(bits::SignedMulHighAndAdd32(x, y, z), r);
+ CHECK_EQ(base::bits::SignedMulHighAndAdd32(x, y, z), r);
USE(dummy);
}
}
@@ -2544,8 +2546,8 @@ TEST(smmul) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2553,7 +2555,7 @@ TEST(smmul) {
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
- CHECK_EQ(bits::SignedMulHigh32(x, y), r);
+ CHECK_EQ(base::bits::SignedMulHigh32(x, y), r);
USE(dummy);
}
}
@@ -2570,8 +2572,8 @@ TEST(sxtb) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2596,8 +2598,8 @@ TEST(sxtab) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2622,8 +2624,8 @@ TEST(sxth) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2648,8 +2650,8 @@ TEST(sxtah) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2674,8 +2676,8 @@ TEST(uxtb) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2700,8 +2702,8 @@ TEST(uxtab) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2726,8 +2728,8 @@ TEST(uxth) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2752,8 +2754,8 @@ TEST(uxtah) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2794,8 +2796,8 @@ TEST(rbit) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
@@ -2876,8 +2878,8 @@ TEST(code_relative_offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), code_object);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, code_object);
F1 f = FUNCTION_CAST<F1>(code->entry());
int res =
reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 21, 0, 0, 0, 0));
@@ -2916,8 +2918,8 @@ TEST(msr_mrs) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -3017,8 +3019,8 @@ TEST(ARMv8_float32_vrintX) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -3122,8 +3124,8 @@ TEST(ARMv8_vrintX) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -3262,8 +3264,8 @@ TEST(ARMv8_vsel) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -3356,8 +3358,8 @@ TEST(ARMv8_vminmax_f64) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -3438,8 +3440,8 @@ TEST(ARMv8_vminmax_f32) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -3570,8 +3572,8 @@ static F4 GenerateMacroFloatMinMax(MacroAssembler& assm) {
CodeDesc desc;
assm.GetCode(assm.isolate(), &desc);
- Handle<Code> code = assm.isolate()->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ assm.isolate()->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -3738,8 +3740,8 @@ TEST(unaligned_loads) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -3784,8 +3786,8 @@ TEST(unaligned_stores) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -3887,8 +3889,8 @@ TEST(vswp) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -3992,3 +3994,7 @@ TEST(use_scratch_register_scope) {
}
#undef __
+
+} // namespace test_assembler_arm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 47b7c6c964..9ebe524a6f 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -46,7 +46,8 @@
#include "test/cctest/cctest.h"
#include "test/cctest/test-utils-arm64.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
// Test infrastructure.
//
@@ -12365,10 +12366,10 @@ static void PushPopJsspSimpleHelper(int reg_count,
reg_count = CountSetBits(allowed, kNumberOfRegisters);
}
// Work out which registers to use, based on reg_size.
- Register r[kNumberOfRegisters];
- Register x[kNumberOfRegisters];
- RegList list = PopulateRegisterArray(NULL, x, r, reg_size, reg_count,
- allowed);
+ auto r = CreateRegisterArray<Register, kNumberOfRegisters>();
+ auto x = CreateRegisterArray<Register, kNumberOfRegisters>();
+ RegList list = PopulateRegisterArray(NULL, x.data(), r.data(), reg_size,
+ reg_count, allowed);
// The literal base is chosen to have two useful properties:
// * When multiplied by small values (such as a register index), this value
@@ -12549,10 +12550,10 @@ static void PushPopFPJsspSimpleHelper(int reg_count,
reg_count = CountSetBits(allowed, kNumberOfVRegisters);
}
// Work out which registers to use, based on reg_size.
- VRegister v[kNumberOfRegisters];
- VRegister d[kNumberOfRegisters];
- RegList list =
- PopulateVRegisterArray(NULL, d, v, reg_size, reg_count, allowed);
+ auto v = CreateRegisterArray<VRegister, kNumberOfRegisters>();
+ auto d = CreateRegisterArray<VRegister, kNumberOfRegisters>();
+ RegList list = PopulateVRegisterArray(NULL, d.data(), v.data(), reg_size,
+ reg_count, allowed);
// The literal base is chosen to have two useful properties:
// * When multiplied (using an integer) by small values (such as a register
@@ -12718,9 +12719,9 @@ static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
static RegList const allowed =
~(x8.bit() | x9.bit() | jssp.bit() | xzr.bit());
// Work out which registers to use, based on reg_size.
- Register r[10];
- Register x[10];
- PopulateRegisterArray(NULL, x, r, reg_size, 10, allowed);
+ auto r = CreateRegisterArray<Register, 10>();
+ auto x = CreateRegisterArray<Register, 10>();
+ PopulateRegisterArray(NULL, x.data(), r.data(), reg_size, 10, allowed);
// Calculate some handy register lists.
RegList r0_to_r3 = 0;
@@ -12823,9 +12824,10 @@ static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
if (reg_count == kPushPopJsspMaxRegCount) {
reg_count = CountSetBits(allowed, kNumberOfRegisters);
}
- Register w[kNumberOfRegisters];
- Register x[kNumberOfRegisters];
- RegList list = PopulateRegisterArray(w, x, NULL, 0, reg_count, allowed);
+ auto w = CreateRegisterArray<Register, kNumberOfRegisters>();
+ auto x = CreateRegisterArray<Register, kNumberOfRegisters>();
+ RegList list =
+ PopulateRegisterArray(w.data(), x.data(), NULL, 0, reg_count, allowed);
// The number of W-sized slots we expect to pop. When we pop, we alternate
// between W and X registers, so we need reg_count*1.5 W-sized slots.
@@ -12905,13 +12907,9 @@ static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
int times = i % 4 + 1;
if (i & 1) {
// Push odd-numbered registers as W registers.
- if (i & 2) {
- __ PushMultipleTimes(w[i], times);
- } else {
- // Use a register to specify the count.
- __ Mov(tmp.W(), times);
- __ PushMultipleTimes(w[i], tmp.W());
- }
+ __ Mov(tmp.W(), times);
+ __ PushMultipleTimes(w[i], tmp.W());
+
// Fill in the expected stack slots.
for (int j = 0; j < times; j++) {
if (w[i].Is(wzr)) {
@@ -12923,13 +12921,9 @@ static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
}
} else {
// Push even-numbered registers as X registers.
- if (i & 2) {
- __ PushMultipleTimes(x[i], times);
- } else {
- // Use a register to specify the count.
- __ Mov(tmp, times);
- __ PushMultipleTimes(x[i], tmp);
- }
+ __ Mov(tmp, times);
+ __ PushMultipleTimes(x[i], tmp);
+
// Fill in the expected stack slots.
for (int j = 0; j < times; j++) {
if (x[i].IsZero()) {
@@ -13903,23 +13897,23 @@ TEST(isvalid) {
CHECK(d31.IsValid());
CHECK(s31.IsValid());
- CHECK(x0.IsValidRegister());
- CHECK(w0.IsValidRegister());
- CHECK(xzr.IsValidRegister());
- CHECK(wzr.IsValidRegister());
- CHECK(csp.IsValidRegister());
- CHECK(wcsp.IsValidRegister());
- CHECK(!x0.IsValidVRegister());
- CHECK(!w0.IsValidVRegister());
- CHECK(!xzr.IsValidVRegister());
- CHECK(!wzr.IsValidVRegister());
- CHECK(!csp.IsValidVRegister());
- CHECK(!wcsp.IsValidVRegister());
-
- CHECK(d0.IsValidVRegister());
- CHECK(s0.IsValidVRegister());
- CHECK(!d0.IsValidRegister());
- CHECK(!s0.IsValidRegister());
+ CHECK(x0.IsRegister());
+ CHECK(w0.IsRegister());
+ CHECK(xzr.IsRegister());
+ CHECK(wzr.IsRegister());
+ CHECK(csp.IsRegister());
+ CHECK(wcsp.IsRegister());
+ CHECK(!x0.IsVRegister());
+ CHECK(!w0.IsVRegister());
+ CHECK(!xzr.IsVRegister());
+ CHECK(!wzr.IsVRegister());
+ CHECK(!csp.IsVRegister());
+ CHECK(!wcsp.IsVRegister());
+
+ CHECK(d0.IsVRegister());
+ CHECK(s0.IsVRegister());
+ CHECK(!d0.IsRegister());
+ CHECK(!s0.IsRegister());
// Test the same as before, but using CPURegister types. This shouldn't make
// any difference.
@@ -13938,23 +13932,23 @@ TEST(isvalid) {
CHECK(static_cast<CPURegister>(d31).IsValid());
CHECK(static_cast<CPURegister>(s31).IsValid());
- CHECK(static_cast<CPURegister>(x0).IsValidRegister());
- CHECK(static_cast<CPURegister>(w0).IsValidRegister());
- CHECK(static_cast<CPURegister>(xzr).IsValidRegister());
- CHECK(static_cast<CPURegister>(wzr).IsValidRegister());
- CHECK(static_cast<CPURegister>(csp).IsValidRegister());
- CHECK(static_cast<CPURegister>(wcsp).IsValidRegister());
- CHECK(!static_cast<CPURegister>(x0).IsValidVRegister());
- CHECK(!static_cast<CPURegister>(w0).IsValidVRegister());
- CHECK(!static_cast<CPURegister>(xzr).IsValidVRegister());
- CHECK(!static_cast<CPURegister>(wzr).IsValidVRegister());
- CHECK(!static_cast<CPURegister>(csp).IsValidVRegister());
- CHECK(!static_cast<CPURegister>(wcsp).IsValidVRegister());
-
- CHECK(static_cast<CPURegister>(d0).IsValidVRegister());
- CHECK(static_cast<CPURegister>(s0).IsValidVRegister());
- CHECK(!static_cast<CPURegister>(d0).IsValidRegister());
- CHECK(!static_cast<CPURegister>(s0).IsValidRegister());
+ CHECK(static_cast<CPURegister>(x0).IsRegister());
+ CHECK(static_cast<CPURegister>(w0).IsRegister());
+ CHECK(static_cast<CPURegister>(xzr).IsRegister());
+ CHECK(static_cast<CPURegister>(wzr).IsRegister());
+ CHECK(static_cast<CPURegister>(csp).IsRegister());
+ CHECK(static_cast<CPURegister>(wcsp).IsRegister());
+ CHECK(!static_cast<CPURegister>(x0).IsVRegister());
+ CHECK(!static_cast<CPURegister>(w0).IsVRegister());
+ CHECK(!static_cast<CPURegister>(xzr).IsVRegister());
+ CHECK(!static_cast<CPURegister>(wzr).IsVRegister());
+ CHECK(!static_cast<CPURegister>(csp).IsVRegister());
+ CHECK(!static_cast<CPURegister>(wcsp).IsVRegister());
+
+ CHECK(static_cast<CPURegister>(d0).IsVRegister());
+ CHECK(static_cast<CPURegister>(s0).IsVRegister());
+ CHECK(!static_cast<CPURegister>(d0).IsRegister());
+ CHECK(!static_cast<CPURegister>(s0).IsRegister());
}
TEST(areconsecutive) {
@@ -15324,7 +15318,8 @@ TEST(pool_size) {
HandleScope handle_scope(isolate);
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(desc, 0, masm.CodeObject());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, masm.CodeObject());
unsigned pool_count = 0;
int pool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
@@ -15515,3 +15510,6 @@ TEST(internal_reference_linked) {
TEARDOWN();
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index 89908cbeb4..ab4a72f790 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -29,6 +29,7 @@
#include "src/v8.h"
+#include "src/assembler-inl.h"
#include "src/base/platform/platform.h"
#include "src/base/utils/random-number-generator.h"
#include "src/disassembler.h"
@@ -37,8 +38,8 @@
#include "src/ostreams.h"
#include "test/cctest/cctest.h"
-using namespace v8::internal;
-
+namespace v8 {
+namespace internal {
typedef int (*F0)();
typedef int (*F1)(int x);
@@ -61,8 +62,8 @@ TEST(AssemblerIa320) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -98,8 +99,8 @@ TEST(AssemblerIa321) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -139,8 +140,8 @@ TEST(AssemblerIa322) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -168,8 +169,8 @@ TEST(AssemblerIa323) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -197,8 +198,8 @@ TEST(AssemblerIa324) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -224,8 +225,8 @@ TEST(AssemblerIa325) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F0 f = FUNCTION_CAST<F0>(code->entry());
int res = f();
CHECK_EQ(42, res);
@@ -257,8 +258,8 @@ TEST(AssemblerIa326) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -289,8 +290,8 @@ TEST(AssemblerIa328) {
__ ret(0);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -371,8 +372,8 @@ TEST(AssemblerMultiByteNop) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
CHECK(code->IsCode());
F0 f = FUNCTION_CAST<F0>(code->entry());
@@ -421,8 +422,8 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F0 f = FUNCTION_CAST<F0>(code->entry());
int res = f();
@@ -486,8 +487,8 @@ TEST(AssemblerIa32Extractps) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -526,8 +527,8 @@ TEST(AssemblerIa32SSE) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -755,8 +756,8 @@ TEST(AssemblerX64FMA_sd) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -984,8 +985,8 @@ TEST(AssemblerX64FMA_ss) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -1092,8 +1093,8 @@ TEST(AssemblerIa32BMI1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -1140,8 +1141,8 @@ TEST(AssemblerIa32LZCNT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -1188,8 +1189,8 @@ TEST(AssemblerIa32POPCNT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -1334,8 +1335,8 @@ TEST(AssemblerIa32BMI2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -1378,8 +1379,8 @@ TEST(AssemblerIa32JumpTables1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -1426,8 +1427,8 @@ TEST(AssemblerIa32JumpTables2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -1469,8 +1470,8 @@ TEST(Regress621926) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
@@ -1482,3 +1483,6 @@ TEST(Regress621926) {
}
#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 212e97229b..e191b1eb63 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -29,6 +29,7 @@
#include "src/v8.h"
+#include "src/assembler-inl.h"
#include "src/base/utils/random-number-generator.h"
#include "src/disassembler.h"
#include "src/factory.h"
@@ -38,9 +39,8 @@
#include "test/cctest/cctest.h"
-
-using namespace v8::internal;
-
+namespace v8 {
+namespace internal {
// Define these function prototypes to match JSEntryFunction in execution.cc.
typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
@@ -64,8 +64,8 @@ TEST(MIPS0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
int res = reinterpret_cast<int>(
CALL_GENERATED_CODE(isolate, f, 0xab0, 0xc, 0, 0, 0));
@@ -100,8 +100,8 @@ TEST(MIPS1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F1 f = FUNCTION_CAST<F1>(code->entry());
int res = reinterpret_cast<int>(
CALL_GENERATED_CODE(isolate, f, 50, 0, 0, 0, 0));
@@ -238,8 +238,8 @@ TEST(MIPS2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
int res = reinterpret_cast<int>(
CALL_GENERATED_CODE(isolate, f, 0xab0, 0xc, 0, 0, 0));
@@ -340,8 +340,8 @@ TEST(MIPS3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
// Double test values.
t.a = 1.5e14;
@@ -444,8 +444,8 @@ TEST(MIPS4) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.a = 1.5e22;
t.b = 2.75e11;
@@ -507,8 +507,8 @@ TEST(MIPS5) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.a = 1.5e4;
t.b = 2.75e8;
@@ -577,8 +577,8 @@ TEST(MIPS6) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.ui = 0x11223344;
t.si = 0x99aabbcc;
@@ -670,8 +670,8 @@ TEST(MIPS7) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.a = 1.5e14;
t.b = 2.75e11;
@@ -768,8 +768,8 @@ TEST(MIPS8) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.input = 0x12345678;
Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0x0, 0, 0, 0);
@@ -814,8 +814,7 @@ TEST(MIPS9) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
}
@@ -865,8 +864,8 @@ TEST(MIPS10) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.a = 2.147483646e+09; // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double.
t.b_word = 0x0ff00ff0; // 0x0FF00FF0 -> 0x as double.
@@ -993,8 +992,8 @@ TEST(MIPS11) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.reg_init = 0xaabbccdd;
t.mem_init = 0x11223344;
@@ -1119,8 +1118,8 @@ TEST(MIPS12) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.x = 1;
t.y = 2;
@@ -1173,8 +1172,8 @@ TEST(MIPS13) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.cvt_big_in = 0xFFFFFFFF;
@@ -1294,8 +1293,8 @@ TEST(MIPS14) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.round_up_in = 123.51;
@@ -1400,8 +1399,8 @@ TEST(seleqz_selnez) {
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
(CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
@@ -1515,8 +1514,8 @@ TEST(min_max) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -1626,8 +1625,8 @@ TEST(rint_d) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int j = 0; j < 4; j++) {
@@ -1674,8 +1673,8 @@ TEST(sel) {
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
const int test_size = 3;
@@ -1807,8 +1806,8 @@ TEST(rint_s) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int j = 0; j < 4; j++) {
@@ -1856,8 +1855,8 @@ TEST(Cvt_d_uw) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.input = inputs[i];
@@ -1938,8 +1937,8 @@ TEST(mina_maxa) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -2019,8 +2018,8 @@ TEST(trunc_l) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2100,8 +2099,8 @@ TEST(movz_movn) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2202,8 +2201,8 @@ TEST(movt_movd) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
(CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
@@ -2287,8 +2286,8 @@ TEST(cvt_w_d) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
@@ -2354,8 +2353,8 @@ TEST(trunc_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2423,8 +2422,8 @@ TEST(round_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2495,8 +2494,8 @@ TEST(round_l) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2568,8 +2567,8 @@ TEST(sub) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
@@ -2647,8 +2646,8 @@ TEST(sqrt_rsqrt_recip) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
@@ -2727,8 +2726,8 @@ TEST(neg) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_S[i];
@@ -2784,8 +2783,8 @@ TEST(mul) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
@@ -2840,8 +2839,8 @@ TEST(mov) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2907,8 +2906,8 @@ TEST(floor_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2979,8 +2978,8 @@ TEST(floor_l) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3051,8 +3050,8 @@ TEST(ceil_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3123,8 +3122,8 @@ TEST(ceil_l) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3195,8 +3194,8 @@ TEST(jump_tables1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3265,8 +3264,8 @@ TEST(jump_tables2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3342,8 +3341,8 @@ TEST(jump_tables3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3393,8 +3392,8 @@ TEST(BITSWAP) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.r1 = 0x781A15C3;
t.r2 = 0x8B71FCDE;
@@ -3528,8 +3527,8 @@ TEST(class_fmt) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.dSignalingNan = std::numeric_limits<double>::signaling_NaN();
@@ -3620,8 +3619,8 @@ TEST(ABS) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
test.a = -2.0;
test.b = -2.0;
@@ -3713,8 +3712,8 @@ TEST(ADD_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
test.a = 2.0;
test.b = 3.0;
@@ -3868,8 +3867,8 @@ TEST(C_COND_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
test.dOp1 = 2.0;
test.dOp2 = 3.0;
@@ -4069,8 +4068,8 @@ TEST(CMP_COND_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
uint64_t dTrue = 0xFFFFFFFFFFFFFFFF;
uint64_t dFalse = 0x0000000000000000;
@@ -4255,8 +4254,8 @@ TEST(CVT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
test.cvt_d_s_in = -0.51;
@@ -4467,8 +4466,8 @@ TEST(DIV_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
@@ -4559,8 +4558,8 @@ uint32_t run_align(uint32_t rs_value, uint32_t rt_value, uint8_t bp) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -4612,8 +4611,8 @@ uint32_t run_aluipc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
PC = (uint32_t) f; // Set the program counter.
@@ -4666,8 +4665,8 @@ uint32_t run_auipc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
PC = (uint32_t) f; // Set the program counter.
@@ -4742,8 +4741,8 @@ uint32_t run_lwpc(int offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -4826,8 +4825,8 @@ uint32_t run_jic(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -4898,8 +4897,8 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -4992,8 +4991,8 @@ uint32_t run_jialc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -5041,8 +5040,8 @@ static uint32_t run_addiupc(int32_t imm19) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
PC = (uint32_t) f; // Set the program counter.
@@ -5124,8 +5123,8 @@ int32_t run_bc(int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -5206,8 +5205,8 @@ int32_t run_balc(int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -5231,8 +5230,8 @@ uint32_t run_aui(uint32_t rs, uint16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -5322,8 +5321,8 @@ uint32_t run_bal(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -5376,8 +5375,8 @@ TEST(Trampoline) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
int32_t res = reinterpret_cast<int32_t>(
@@ -5443,8 +5442,8 @@ void helper_madd_msub_maddf_msubf(F func) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub<T>);
@@ -5529,8 +5528,8 @@ uint32_t run_Subu(uint32_t imm, int32_t num_instr) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
uint32_t res = reinterpret_cast<uint32_t>(
@@ -5587,6 +5586,65 @@ TEST(Subu) {
}
}
+void load_uint64_elements_of_vector(MacroAssembler& assm,
+ const uint64_t elements[], MSARegister w,
+ Register t0, Register t1) {
+ __ li(t0, static_cast<uint32_t>(elements[0] & 0xffffffff));
+ __ li(t1, static_cast<uint32_t>((elements[0] >> 32) & 0xffffffff));
+ __ insert_w(w, 0, t0);
+ __ insert_w(w, 1, t1);
+ __ li(t0, static_cast<uint32_t>(elements[1] & 0xffffffff));
+ __ li(t1, static_cast<uint32_t>((elements[1] >> 32) & 0xffffffff));
+ __ insert_w(w, 2, t0);
+ __ insert_w(w, 3, t1);
+}
+
+void load_uint32_elements_of_vector(MacroAssembler& assm,
+ const uint64_t elements[], MSARegister w,
+ Register t0, Register t1) {
+ const uint32_t* const element = reinterpret_cast<const uint32_t*>(elements);
+ __ li(t0, element[0]);
+ __ li(t1, element[1]);
+ __ insert_w(w, 0, t0);
+ __ insert_w(w, 1, t1);
+ __ li(t0, element[2]);
+ __ li(t1, element[3]);
+ __ insert_w(w, 2, t0);
+ __ insert_w(w, 3, t1);
+}
+
+void load_uint16_elements_of_vector(MacroAssembler& assm,
+ const uint64_t elements[], MSARegister w,
+ Register t0, Register t1) {
+ const uint16_t* const element = reinterpret_cast<const uint16_t*>(elements);
+ __ li(t0, element[0]);
+ __ li(t1, element[1]);
+ __ insert_h(w, 0, t0);
+ __ insert_h(w, 1, t1);
+ __ li(t0, element[2]);
+ __ li(t1, element[3]);
+ __ insert_h(w, 2, t0);
+ __ insert_h(w, 3, t1);
+ __ li(t0, element[4]);
+ __ li(t1, element[5]);
+ __ insert_h(w, 4, t0);
+ __ insert_h(w, 5, t1);
+ __ li(t0, element[6]);
+ __ li(t1, element[7]);
+ __ insert_h(w, 6, t0);
+ __ insert_h(w, 7, t1);
+}
+
+inline void store_uint64_elements_of_vector(MacroAssembler& assm, MSARegister w,
+ Register a, Register t) {
+ __ st_d(w, MemOperand(a, 0));
+}
+
+inline void store_uint32_elements_of_vector(MacroAssembler& assm, MSARegister w,
+ Register a, Register t) {
+ __ st_w(w, MemOperand(a, 0));
+}
+
TEST(MSA_fill_copy) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -5634,8 +5692,8 @@ TEST(MSA_fill_copy) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5703,8 +5761,8 @@ TEST(MSA_fill_copy_2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5761,8 +5819,8 @@ TEST(MSA_fill_copy_3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5807,22 +5865,15 @@ void run_msa_insert(int32_t rs_value, int n, msa_reg_t* w) {
UNREACHABLE();
}
- __ copy_u_w(t2, w0, 0);
- __ sw(t2, MemOperand(a0, 0));
- __ copy_u_w(t2, w0, 1);
- __ sw(t2, MemOperand(a0, 4));
- __ copy_u_w(t2, w0, 2);
- __ sw(t2, MemOperand(a0, 8));
- __ copy_u_w(t2, w0, 3);
- __ sw(t2, MemOperand(a0, 12));
+ store_uint64_elements_of_vector(assm, w0, a0, t2);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5887,6 +5938,60 @@ TEST(MSA_insert) {
}
}
+void run_msa_ctc_cfc(uint32_t value) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ CpuFeatureScope fscope(&assm, MIPS_SIMD);
+
+ MSAControlRegister msareg = {kMSACSRRegister};
+ __ li(t0, value);
+ __ li(t2, 0);
+ __ cfcmsa(t1, msareg);
+ __ ctcmsa(msareg, t0);
+ __ cfcmsa(t2, msareg);
+ __ ctcmsa(msareg, t1);
+ __ sw(t2, MemOperand(a0, 0));
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ uint32_t res;
+ (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+
+ CHECK_EQ(value & 0x0167ffff, res);
+}
+
+TEST(MSA_cfc_ctc) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const uint32_t mask_without_cause = 0xff9c0fff;
+ const uint32_t mask_always_zero = 0x0167ffff;
+ const uint32_t mask_enables = 0x00000f80;
+ uint32_t test_case[] = {0x2d5ede31, 0x07955425, 0x15b7dbe3, 0x2bf8bc37,
+ 0xe6aae923, 0x24d0f68d, 0x41afa84c, 0x2d6bf64f,
+ 0x925014bd, 0x4dba7e61};
+ for (unsigned i = 0; i < arraysize(test_case); i++) {
+ // Setting enable bits and corresponding cause bits could result in
+ // exception raised and this prevents that from happening
+ test_case[i] = (~test_case[i] & mask_enables) << 5 |
+ (test_case[i] & mask_without_cause);
+ run_msa_ctc_cfc(test_case[i] & mask_always_zero);
+ }
+}
+
struct ExpResShf {
uint8_t i8;
uint64_t lo;
@@ -5954,14 +6059,7 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
UNREACHABLE();
}
- __ copy_u_w(t2, w2, 0);
- __ sw(t2, MemOperand(a0, 0));
- __ copy_u_w(t2, w2, 1);
- __ sw(t2, MemOperand(a0, 4));
- __ copy_u_w(t2, w2, 2);
- __ sw(t2, MemOperand(a0, 8));
- __ copy_u_w(t2, w2, 3);
- __ sw(t2, MemOperand(a0, 12));
+ store_uint64_elements_of_vector(assm, w2, a0, t2);
__ jr(ra);
__ nop();
@@ -5970,8 +6068,8 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6148,8 +6246,8 @@ uint32_t run_Ins(uint32_t imm, uint32_t source, uint16_t pos, uint16_t size) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
uint32_t res = reinterpret_cast<uint32_t>(
@@ -6199,8 +6297,8 @@ uint32_t run_Ext(uint32_t source, uint16_t pos, uint16_t size) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
uint32_t res = reinterpret_cast<uint32_t>(
@@ -6251,33 +6349,19 @@ void run_msa_i5(struct TestCaseMsaI5* input, bool i5_sign_ext,
int32_t i5 =
i5_sign_ext ? static_cast<int32_t>(input->i5 << 27) >> 27 : input->i5;
- __ li(t0, static_cast<uint32_t>(input->ws_lo & 0xffffffff));
- __ li(t1, static_cast<uint32_t>((input->ws_lo >> 32) & 0xffffffff));
- __ insert_w(w0, 0, t0);
- __ insert_w(w0, 1, t1);
- __ li(t0, static_cast<uint32_t>(input->ws_hi & 0xffffffff));
- __ li(t1, static_cast<uint32_t>((input->ws_hi >> 32) & 0xffffffff));
- __ insert_w(w0, 2, t0);
- __ insert_w(w0, 3, t1);
+ load_uint64_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
GenerateI5InstructionFunc(assm, i5);
- __ copy_u_w(t2, w2, 0);
- __ sw(t2, MemOperand(a0, 0));
- __ copy_u_w(t2, w2, 1);
- __ sw(t2, MemOperand(a0, 4));
- __ copy_u_w(t2, w2, 2);
- __ sw(t2, MemOperand(a0, 8));
- __ copy_u_w(t2, w2, 3);
- __ sw(t2, MemOperand(a0, 12));
+ store_uint64_elements_of_vector(assm, w2, a0, t2);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6676,8 +6760,11 @@ struct TestCaseMsa2R {
uint64_t exp_res_hi;
};
-template <typename Func>
-void run_msa_2r(struct TestCaseMsa2R* input, Func Generate2RInstructionFunc) {
+template <typename Func, typename FuncLoad, typename FuncStore>
+void run_msa_2r(const struct TestCaseMsa2R* input,
+ Func Generate2RInstructionFunc,
+ FuncLoad load_elements_of_vector,
+ FuncStore store_elements_of_vector) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -6685,33 +6772,18 @@ void run_msa_2r(struct TestCaseMsa2R* input, Func Generate2RInstructionFunc) {
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- __ li(t0, static_cast<uint32_t>(input->ws_lo & 0xffffffff));
- __ li(t1, static_cast<uint32_t>((input->ws_lo >> 32) & 0xffffffff));
- __ insert_w(w0, 0, t0);
- __ insert_w(w0, 1, t1);
- __ li(t0, static_cast<uint32_t>(input->ws_hi & 0xffffffff));
- __ li(t1, static_cast<uint32_t>((input->ws_hi >> 32) & 0xffffffff));
- __ insert_w(w0, 2, t0);
- __ insert_w(w0, 3, t1);
-
+ load_elements_of_vector(assm, reinterpret_cast<const uint64_t*>(input), w0,
+ t0, t1);
Generate2RInstructionFunc(assm);
-
- __ copy_u_w(t2, w2, 0);
- __ sw(t2, MemOperand(a0, 0));
- __ copy_u_w(t2, w2, 1);
- __ sw(t2, MemOperand(a0, 4));
- __ copy_u_w(t2, w2, 2);
- __ sw(t2, MemOperand(a0, 8));
- __ copy_u_w(t2, w2, 3);
- __ sw(t2, MemOperand(a0, 12));
+ store_elements_of_vector(assm, w2, a0, t2);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6719,8 +6791,17 @@ void run_msa_2r(struct TestCaseMsa2R* input, Func Generate2RInstructionFunc) {
(CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
- CHECK_EQ(input->exp_res_lo, res.d[0]);
- CHECK_EQ(input->exp_res_hi, res.d[1]);
+ if (store_elements_of_vector == store_uint64_elements_of_vector) {
+ CHECK_EQ(input->exp_res_lo, res.d[0]);
+ CHECK_EQ(input->exp_res_hi, res.d[1]);
+ } else if (store_elements_of_vector == store_uint32_elements_of_vector) {
+ const uint32_t* exp_res =
+ reinterpret_cast<const uint32_t*>(&input->exp_res_lo);
+ CHECK_EQ(exp_res[0], res.w[0]);
+ CHECK_EQ(exp_res[1], res.w[1]);
+ CHECK_EQ(exp_res[2], res.w[2]);
+ CHECK_EQ(exp_res[3], res.w[3]);
+ }
}
TEST(MSA_pcnt) {
@@ -6771,10 +6852,14 @@ TEST(MSA_pcnt) {
{0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0x20, 0x2a}};
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) {
- run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ pcnt_b(w2, w0); });
- run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ pcnt_h(w2, w0); });
- run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ pcnt_w(w2, w0); });
- run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ pcnt_d(w2, w0); });
+ run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ pcnt_b(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ pcnt_h(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ pcnt_w(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ pcnt_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
}
}
@@ -6826,10 +6911,14 @@ TEST(MSA_nlzc) {
{0x00000000e338f8b0, 0x0754534acab32654, 0x20, 0x5}};
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) {
- run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nlzc_b(w2, w0); });
- run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nlzc_h(w2, w0); });
- run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nlzc_w(w2, w0); });
- run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nlzc_d(w2, w0); });
+ run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nlzc_b(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nlzc_h(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nlzc_w(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nlzc_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
}
}
@@ -6881,10 +6970,885 @@ TEST(MSA_nloc) {
{0xFFFFFFFF1CC7074F, 0xF8ABACB5354CD9AB, 0x20, 0x5}};
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) {
- run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nloc_b(w2, w0); });
- run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nloc_h(w2, w0); });
- run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nloc_w(w2, w0); });
- run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nloc_d(w2, w0); });
+ run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nloc_b(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nloc_h(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nloc_w(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nloc_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+struct TestCaseMsa2RF_F_U {
+ float ws1;
+ float ws2;
+ float ws3;
+ float ws4;
+ uint32_t exp_res_1;
+ uint32_t exp_res_2;
+ uint32_t exp_res_3;
+ uint32_t exp_res_4;
+};
+
+struct TestCaseMsa2RF_D_U {
+ double ws1;
+ double ws2;
+ uint64_t exp_res_1;
+ uint64_t exp_res_2;
+};
+
+TEST(MSA_fclass) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+#define BIT(n) (0x1 << n)
+#define SNAN BIT(0)
+#define QNAN BIT(1)
+#define NEG_INFINITY BIT((2))
+#define NEG_NORMAL BIT(3)
+#define NEG_SUBNORMAL BIT(4)
+#define NEG_ZERO BIT(5)
+#define POS_INFINITY BIT(6)
+#define POS_NORMAL BIT(7)
+#define POS_SUBNORMAL BIT(8)
+#define POS_ZERO BIT(9)
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ const struct TestCaseMsa2RF_F_U tc_s[] = {
+ {1.f, -0.00001, 208e10f, -34.8e-30f, POS_NORMAL, NEG_NORMAL, POS_NORMAL,
+ NEG_NORMAL},
+ {inf_float, -inf_float, 0, -0.f, POS_INFINITY, NEG_INFINITY, POS_ZERO,
+ NEG_ZERO},
+ {3.036e-40f, -6.392e-43f, 1.41e-45f, -1.17e-38f, POS_SUBNORMAL,
+ NEG_SUBNORMAL, POS_SUBNORMAL, NEG_SUBNORMAL}};
+
+ const struct TestCaseMsa2RF_D_U tc_d[] = {
+ {1., -0.00000001, POS_NORMAL, NEG_NORMAL},
+ {208e10, -34.8e-300, POS_NORMAL, NEG_NORMAL},
+ {inf_double, -inf_double, POS_INFINITY, NEG_INFINITY},
+ {0, -0., POS_ZERO, NEG_ZERO},
+ {1.036e-308, -6.392e-309, POS_SUBNORMAL, NEG_SUBNORMAL},
+ {1.41e-323, -3.17e208, POS_SUBNORMAL, NEG_NORMAL}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_U); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ fclass_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_U); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ fclass_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+
+#undef BIT
+#undef SNAN
+#undef QNAN
+#undef NEG_INFINITY
+#undef NEG_NORMAL
+#undef NEG_SUBNORMAL
+#undef NEG_ZERO
+#undef POS_INFINITY
+#undef POS_NORMAL
+#undef POS_SUBNORMAL
+#undef POS_ZERO
+}
+
+struct TestCaseMsa2RF_F_I {
+ float ws1;
+ float ws2;
+ float ws3;
+ float ws4;
+ int32_t exp_res_1;
+ int32_t exp_res_2;
+ int32_t exp_res_3;
+ int32_t exp_res_4;
+};
+
+struct TestCaseMsa2RF_D_I {
+ double ws1;
+ double ws2;
+ int64_t exp_res_1;
+ int64_t exp_res_2;
+};
+
+TEST(MSA_ftrunc_s) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const float qNaN_float = std::numeric_limits<float>::quiet_NaN();
+ const double inf_double = std::numeric_limits<double>::infinity();
+ const double qNaN_double = std::numeric_limits<double>::quiet_NaN();
+ const int32_t max_int32 = std::numeric_limits<int32_t>::max();
+ const int32_t min_int32 = std::numeric_limits<int32_t>::min();
+ const int64_t max_int64 = std::numeric_limits<int64_t>::max();
+ const int64_t min_int64 = std::numeric_limits<int64_t>::min();
+
+ const struct TestCaseMsa2RF_F_I tc_s[] = {
+ {inf_float, 2.345f, -324.9235f, 30004.51f, max_int32, 2, -324, 30004},
+ {-inf_float, -0.983f, 0.0832f, static_cast<float>(max_int32) * 3.f,
+ min_int32, 0, 0, max_int32},
+ {-23.125f, qNaN_float, 2 * static_cast<float>(min_int32), -0.f, -23, 0,
+ min_int32, 0}};
+
+ const struct TestCaseMsa2RF_D_I tc_d[] = {
+ {inf_double, 2.345, max_int64, 2},
+ {-324.9235, 246569139.51, -324, 246569139},
+ {-inf_double, -0.983, min_int64, 0},
+ {0.0832, 6 * static_cast<double>(max_int64), 0, max_int64},
+ {-21453889872.94, qNaN_double, -21453889872, 0},
+ {2 * static_cast<double>(min_int64), -0., min_int64, 0}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_I); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ ftrunc_s_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_I); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ ftrunc_s_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+TEST(MSA_ftrunc_u) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const float qNaN_float = std::numeric_limits<float>::quiet_NaN();
+ const double inf_double = std::numeric_limits<double>::infinity();
+ const double qNaN_double = std::numeric_limits<double>::quiet_NaN();
+ const uint32_t max_uint32 = std::numeric_limits<uint32_t>::max();
+ const uint64_t max_uint64 = std::numeric_limits<uint64_t>::max();
+
+ const struct TestCaseMsa2RF_F_U tc_s[] = {
+ {inf_float, 2.345f, -324.9235f, 30004.51f, max_uint32, 2, 0, 30004},
+ {-inf_float, 0.983f, 0.0832f, static_cast<float>(max_uint32) * 3., 0, 0,
+ 0, max_uint32},
+ {23.125f, qNaN_float, -0.982, -0.f, 23, 0, 0, 0}};
+
+ const struct TestCaseMsa2RF_D_U tc_d[] = {
+ {inf_double, 2.345, max_uint64, 2},
+ {-324.9235, 246569139.51, 0, 246569139},
+ {-inf_double, -0.983, 0, 0},
+ {0.0832, 6 * static_cast<double>(max_uint64), 0, max_uint64},
+ {21453889872.94, qNaN_double, 21453889872, 0},
+ {0.9889, -0., 0, 0}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_U); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ ftrunc_u_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_U); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ ftrunc_u_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+struct TestCaseMsa2RF_F_F {
+ float ws1;
+ float ws2;
+ float ws3;
+ float ws4;
+ float exp_res_1;
+ float exp_res_2;
+ float exp_res_3;
+ float exp_res_4;
+};
+
+struct TestCaseMsa2RF_D_D {
+ double ws1;
+ double ws2;
+ double exp_res_1;
+ double exp_res_2;
+};
+
+TEST(MSA_fsqrt) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ const struct TestCaseMsa2RF_F_F tc_s[] = {
+ {81.f, 576.f, inf_float, -0.f, 9.f, 24.f, inf_float, -0.f}};
+
+ const struct TestCaseMsa2RF_D_D tc_d[] = {{81., inf_double, 9., inf_double},
+ {331776., -0., 576, -0.}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ fsqrt_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ fsqrt_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+TEST(MSA_frsqrt) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ const struct TestCaseMsa2RF_F_F tc_s[] = {
+ {81.f, 576.f, inf_float, -0.f, 1.f / 9.f, 1.f / 24.f, 0.f, -inf_float},
+ {0.f, 1.f / 576.f, 1.f / 81.f, 1.f / 4.f, inf_float, 24.f, 9.f, 2.f}};
+
+ const struct TestCaseMsa2RF_D_D tc_d[] = {
+ {81., inf_double, 1. / 9., 0.},
+ {331776., -0., 1. / 576., -inf_double},
+ {0., 1. / 81, inf_double, 9.}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ frsqrt_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ frsqrt_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+TEST(MSA_frcp) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ const struct TestCaseMsa2RF_F_F tc_s[] = {
+ {12.f, 576.f, inf_float, -0.f, 1.f / 12.f, 1.f / 576.f, 0.f, -inf_float},
+ {0.f, 1.f / 576.f, -inf_float, 1.f / 400.f, inf_float, 576.f, -0.f,
+ 400.f}};
+
+ const struct TestCaseMsa2RF_D_D tc_d[] = {
+ {81., inf_double, 1. / 81., 0.},
+ {331777., -0., 1. / 331777., -inf_double},
+ {0., 1. / 80, inf_double, 80.},
+ {1. / 40000., -inf_double, 40000., -0.}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ frcp_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ frcp_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+void test_frint_s(size_t data_size, TestCaseMsa2RF_F_F tc_d[],
+ int rounding_mode) {
+ for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_F_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [&rounding_mode](MacroAssembler& assm) {
+ MSAControlRegister msareg = {kMSACSRRegister};
+ __ li(t0, static_cast<uint32_t>(rounding_mode));
+ __ cfcmsa(t1, msareg);
+ __ ctcmsa(msareg, t0);
+ __ frint_w(w2, w0);
+ __ ctcmsa(msareg, t1);
+ },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+}
+
+void test_frint_d(size_t data_size, TestCaseMsa2RF_D_D tc_d[],
+ int rounding_mode) {
+ for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_D_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [&rounding_mode](MacroAssembler& assm) {
+ MSAControlRegister msareg = {kMSACSRRegister};
+ __ li(t0, static_cast<uint32_t>(rounding_mode));
+ __ cfcmsa(t1, msareg);
+ __ ctcmsa(msareg, t0);
+ __ frint_d(w2, w0);
+ __ ctcmsa(msareg, t1);
+ },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+TEST(MSA_frint) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ struct TestCaseMsa2RF_F_F tc_s1[] = {
+ {0.f, 4.51f, 1.49f, -12.51f, 0.f, 5.f, 1.f, -13.f},
+ {-1.32f, -23.38f, 2.8f, -32.5f, -1.f, -23.f, 3.f, -32.f}};
+
+ struct TestCaseMsa2RF_D_D tc_d1[] = {{0., 4.51, 0., 5.},
+ {1.49, -12.51, 1., -13.},
+ {-1.32, -23.38, -1., -23.},
+ {2.8, -32.6, 3., -33.}};
+
+ test_frint_s(sizeof(tc_s1), tc_s1, kRoundToNearest);
+ test_frint_d(sizeof(tc_d1), tc_d1, kRoundToNearest);
+
+ struct TestCaseMsa2RF_F_F tc_s2[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0.f, 4.f, 1.f, -12.f},
+ {-1.f, -23.38f, 2.8f, -32.6f, -1.f, -23.f, 2.f, -32.f}};
+
+ struct TestCaseMsa2RF_D_D tc_d2[] = {{0., 4.5, 0., 4.},
+ {1.49, -12.51, 1., -12.},
+ {-1., -23.38, -1., -23.},
+ {2.8, -32.6, 2., -32.}};
+
+ test_frint_s(sizeof(tc_s2), tc_s2, kRoundToZero);
+ test_frint_d(sizeof(tc_d2), tc_d2, kRoundToZero);
+
+ struct TestCaseMsa2RF_F_F tc_s3[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0.f, 5.f, 2.f, -12.f},
+ {-1.f, -23.38f, 2.8f, -32.6f, -1.f, -23.f, 3.f, -32.f}};
+
+ struct TestCaseMsa2RF_D_D tc_d3[] = {{0., 4.5, 0., 5.},
+ {1.49, -12.51, 2., -12.},
+ {-1., -23.38, -1., -23.},
+ {2.8, -32.6, 3., -32.}};
+
+ test_frint_s(sizeof(tc_s3), tc_s3, kRoundToPlusInf);
+ test_frint_d(sizeof(tc_d3), tc_d3, kRoundToPlusInf);
+
+ struct TestCaseMsa2RF_F_F tc_s4[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0.f, 4.f, 1.f, -13.f},
+ {-1.f, -23.38f, 2.8f, -32.6f, -1.f, -24.f, 2.f, -33.f}};
+
+ struct TestCaseMsa2RF_D_D tc_d4[] = {{0., 4.5, 0., 4.},
+ {1.49, -12.51, 1., -13.},
+ {-1., -23.38, -1., -24.},
+ {2.8, -32.6, 2., -33.}};
+
+ test_frint_s(sizeof(tc_s4), tc_s4, kRoundToMinusInf);
+ test_frint_d(sizeof(tc_d4), tc_d4, kRoundToMinusInf);
+}
+
+TEST(MSA_flog2) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ struct TestCaseMsa2RF_F_F tc_s[] = {
+ {std::ldexp(0.58f, -48), std::ldexp(0.5f, 110), std::ldexp(1.11f, -130),
+ inf_float, -49.f, 109.f, -130.f, inf_float},
+ {0.f, -0.f, std::ldexp(0.89f, -12), std::ldexp(0.32f, 126), -inf_float,
+ -inf_float, -13.f, 124.f}};
+
+ struct TestCaseMsa2RF_D_D tc_d[] = {
+ {std::ldexp(0.58, -48), std::ldexp(0.5, 110), -49., 109.},
+ {std::ldexp(1.11, -1050), inf_double, -1050., inf_double},
+ {0., -0., -inf_double, -inf_double},
+ {std::ldexp(0.32, 1021), std::ldexp(1.23, -123), 1019., -123.}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ flog2_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ flog2_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+void test_ftint_s_s(size_t data_size, TestCaseMsa2RF_F_I tc_d[],
+ int rounding_mode) {
+ for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_F_I); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [&rounding_mode](MacroAssembler& assm) {
+ MSAControlRegister msareg = {kMSACSRRegister};
+ __ li(t0, static_cast<uint32_t>(rounding_mode));
+ __ cfcmsa(t1, msareg);
+ __ ctcmsa(msareg, t0);
+ __ ftint_s_w(w2, w0);
+ __ ctcmsa(msareg, t1);
+ },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+}
+
+void test_ftint_s_d(size_t data_size, TestCaseMsa2RF_D_I tc_d[],
+ int rounding_mode) {
+ for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_D_I); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [&rounding_mode](MacroAssembler& assm) {
+ MSAControlRegister msareg = {kMSACSRRegister};
+ __ li(t0, static_cast<uint32_t>(rounding_mode));
+ __ cfcmsa(t1, msareg);
+ __ ctcmsa(msareg, t0);
+ __ ftint_s_d(w2, w0);
+ __ ctcmsa(msareg, t1);
+ },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+TEST(MSA_ftint_s) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+ const int32_t int32_max = std::numeric_limits<int32_t>::max();
+ const int32_t int32_min = std::numeric_limits<int32_t>::min();
+ const int64_t int64_max = std::numeric_limits<int64_t>::max();
+ const int64_t int64_min = std::numeric_limits<int64_t>::min();
+
+ struct TestCaseMsa2RF_F_I tc_s1[] = {
+ {0.f, 4.51f, 1.49f, -12.51f, 0, 5, 1, -13},
+ {-0.32f, -23.38f, 2.8f, -32.6f, 0, -23, 3, -33},
+ {inf_float, -inf_float, 3.f * int32_min, 4.f * int32_max, int32_max,
+ int32_min, int32_min, int32_max}};
+
+ struct TestCaseMsa2RF_D_I tc_d1[] = {
+ {0., 4.51, 0, 5},
+ {1.49, -12.51, 1, -13},
+ {-0.32, -23.38, 0, -23},
+ {2.8, -32.6, 3, -33},
+ {inf_double, -inf_double, int64_max, int64_min},
+ {33.23 * int64_min, 4000. * int64_max, int64_min, int64_max}};
+
+ test_ftint_s_s(sizeof(tc_s1), tc_s1, kRoundToNearest);
+ test_ftint_s_d(sizeof(tc_d1), tc_d1, kRoundToNearest);
+
+ struct TestCaseMsa2RF_F_I tc_s2[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0, 4, 1, -12},
+ {-0.f, -23.38f, 2.8f, -32.6f, -0, -23, 2, -32},
+ {inf_float, -inf_float, 3.f * int32_min, 4.f * int32_max, int32_max,
+ int32_min, int32_min, int32_max}};
+
+ struct TestCaseMsa2RF_D_I tc_d2[] = {
+ {0., 4.5, 0, 4},
+ {1.49, -12.51, 1, -12},
+ {-0., -23.38, -0, -23},
+ {2.8, -32.6, 2, -32},
+ {inf_double, -inf_double, int64_max, int64_min},
+ {33.23 * int64_min, 4000. * int64_max, int64_min, int64_max}};
+
+ test_ftint_s_s(sizeof(tc_s2), tc_s2, kRoundToZero);
+ test_ftint_s_d(sizeof(tc_d2), tc_d2, kRoundToZero);
+
+ struct TestCaseMsa2RF_F_I tc_s3[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0, 5, 2, -12},
+ {-0.f, -23.38f, 2.8f, -32.6f, -0, -23, 3, -32},
+ {inf_float, -inf_float, 3.f * int32_min, 4.f * int32_max, int32_max,
+ int32_min, int32_min, int32_max}};
+
+ struct TestCaseMsa2RF_D_I tc_d3[] = {
+ {0., 4.5, 0, 5},
+ {1.49, -12.51, 2, -12},
+ {-0., -23.38, -0, -23},
+ {2.8, -32.6, 3, -32},
+ {inf_double, -inf_double, int64_max, int64_min},
+ {33.23 * int64_min, 4000. * int64_max, int64_min, int64_max}};
+
+ test_ftint_s_s(sizeof(tc_s3), tc_s3, kRoundToPlusInf);
+ test_ftint_s_d(sizeof(tc_d3), tc_d3, kRoundToPlusInf);
+
+ struct TestCaseMsa2RF_F_I tc_s4[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0, 4, 1, -13},
+ {-0.f, -23.38f, 2.8f, -32.6f, -0, -24, 2, -33},
+ {inf_float, -inf_float, 3.f * int32_min, 4.f * int32_max, int32_max,
+ int32_min, int32_min, int32_max}};
+
+ struct TestCaseMsa2RF_D_I tc_d4[] = {
+ {0., 4.5, 0, 4},
+ {1.49, -12.51, 1, -13},
+ {-0., -23.38, -0, -24},
+ {2.8, -32.6, 2, -33},
+ {inf_double, -inf_double, int64_max, int64_min},
+ {33.23 * int64_min, 4000. * int64_max, int64_min, int64_max}};
+
+ test_ftint_s_s(sizeof(tc_s4), tc_s4, kRoundToMinusInf);
+ test_ftint_s_d(sizeof(tc_d4), tc_d4, kRoundToMinusInf);
+}
+
+void test_ftint_u_s(size_t data_size, TestCaseMsa2RF_F_U tc_d[],
+ int rounding_mode) {
+ for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_F_U); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [&rounding_mode](MacroAssembler& assm) {
+ MSAControlRegister msareg = {kMSACSRRegister};
+ __ li(t0, static_cast<uint32_t>(rounding_mode));
+ __ cfcmsa(t1, msareg);
+ __ ctcmsa(msareg, t0);
+ __ ftint_u_w(w2, w0);
+ __ ctcmsa(msareg, t1);
+ },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+}
+
+void test_ftint_u_d(size_t data_size, TestCaseMsa2RF_D_U tc_d[],
+ int rounding_mode) {
+ for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_D_U); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [&rounding_mode](MacroAssembler& assm) {
+ MSAControlRegister msareg = {kMSACSRRegister};
+ __ li(t0, static_cast<uint32_t>(rounding_mode));
+ __ cfcmsa(t1, msareg);
+ __ ctcmsa(msareg, t0);
+ __ ftint_u_d(w2, w0);
+ __ ctcmsa(msareg, t1);
+ },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+TEST(MSA_ftint_u) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+ const uint32_t uint32_max = std::numeric_limits<uint32_t>::max();
+ const uint64_t uint64_max = std::numeric_limits<uint64_t>::max();
+
+ struct TestCaseMsa2RF_F_U tc_s1[] = {
+ {0.f, 4.51f, 1.49f, -12.51f, 0, 5, 1, 0},
+ {-0.32f, 23.38f, 2.8f, 32.6f, 0, 23, 3, 33},
+ {inf_float, -inf_float, 0, 4.f * uint32_max, uint32_max, 0, 0,
+ uint32_max}};
+
+ struct TestCaseMsa2RF_D_U tc_d1[] = {
+ {0., 4.51, 0, 5},
+ {1.49, -12.51, 1, 0},
+ {-0.32, 23.38, 0, 23},
+ {2.8, 32.6, 3, 33},
+ {inf_double, -inf_double, uint64_max, 0},
+ {-0., 4000. * uint64_max, 0, uint64_max}};
+
+ test_ftint_u_s(sizeof(tc_s1), tc_s1, kRoundToNearest);
+ test_ftint_u_d(sizeof(tc_d1), tc_d1, kRoundToNearest);
+
+ struct TestCaseMsa2RF_F_U tc_s2[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0, 4, 1, 0},
+ {-0.f, 23.38f, 2.8f, 32.6f, 0, 23, 2, 32},
+ {inf_float, -inf_float, 0., 4.f * uint32_max, uint32_max, 0, 0,
+ uint32_max}};
+
+ struct TestCaseMsa2RF_D_U tc_d2[] = {
+ {0., 4.5, 0, 4},
+ {1.49, -12.51, 1, 0},
+ {-0., 23.38, 0, 23},
+ {2.8, 32.6, 2, 32},
+ {inf_double, -inf_double, uint64_max, 0},
+ {-0.2345, 4000. * uint64_max, 0, uint64_max}};
+
+ test_ftint_u_s(sizeof(tc_s2), tc_s2, kRoundToZero);
+ test_ftint_u_d(sizeof(tc_d2), tc_d2, kRoundToZero);
+
+ struct TestCaseMsa2RF_F_U tc_s3[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0, 5, 2, 0},
+ {-0.f, 23.38f, 2.8f, 32.6f, 0, 24, 3, 33},
+ {inf_float, -inf_float, 0, 4.f * uint32_max, uint32_max, 0, 0,
+ uint32_max}};
+
+ struct TestCaseMsa2RF_D_U tc_d3[] = {
+ {0., 4.5, 0, 5},
+ {1.49, -12.51, 2, 0},
+ {-0., 23.38, -0, 24},
+ {2.8, 32.6, 3, 33},
+ {inf_double, -inf_double, uint64_max, 0},
+ {-0.5252, 4000. * uint64_max, 0, uint64_max}};
+
+ test_ftint_u_s(sizeof(tc_s3), tc_s3, kRoundToPlusInf);
+ test_ftint_u_d(sizeof(tc_d3), tc_d3, kRoundToPlusInf);
+
+ struct TestCaseMsa2RF_F_U tc_s4[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0, 4, 1, 0},
+ {-0.f, 23.38f, 2.8f, 32.6f, 0, 23, 2, 32},
+ {inf_float, -inf_float, 0, 4.f * uint32_max, uint32_max, 0, 0,
+ uint32_max}};
+
+ struct TestCaseMsa2RF_D_U tc_d4[] = {
+ {0., 4.5, 0, 4},
+ {1.49, -12.51, 1, 0},
+ {-0., 23.38, -0, 23},
+ {2.8, 32.6, 2, 32},
+ {inf_double, -inf_double, uint64_max, 0},
+ {-0.098797, 4000. * uint64_max, 0, uint64_max}};
+
+ test_ftint_u_s(sizeof(tc_s4), tc_s4, kRoundToMinusInf);
+ test_ftint_u_d(sizeof(tc_d4), tc_d4, kRoundToMinusInf);
+}
+
+struct TestCaseMsa2RF_U_F {
+ uint32_t ws1;
+ uint32_t ws2;
+ uint32_t ws3;
+ uint32_t ws4;
+ float exp_res_1;
+ float exp_res_2;
+ float exp_res_3;
+ float exp_res_4;
+};
+
+struct TestCaseMsa2RF_U_D {
+ uint64_t ws1;
+ uint64_t ws2;
+ double exp_res_1;
+ double exp_res_2;
+};
+
+TEST(MSA_ffint_u) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ struct TestCaseMsa2RF_U_F tc_s[] = {
+ {0, 345, 234, 1000, 0.f, 345.f, 234.f, 1000.f}};
+
+ struct TestCaseMsa2RF_U_D tc_d[] = {{0, 345, 0., 345.},
+ {234, 1000, 234., 1000.}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ ffint_u_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_U_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ ffint_u_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+struct TestCaseMsa2RF_I_F {
+ int32_t ws1;
+ int32_t ws2;
+ int32_t ws3;
+ int32_t ws4;
+ float exp_res_1;
+ float exp_res_2;
+ float exp_res_3;
+ float exp_res_4;
+};
+
+struct TestCaseMsa2RF_I_D {
+ int64_t ws1;
+ int64_t ws2;
+ double exp_res_1;
+ double exp_res_2;
+};
+
+TEST(MSA_ffint_s) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ struct TestCaseMsa2RF_I_F tc_s[] = {
+ {0, 345, -234, 1000, 0.f, 345.f, -234.f, 1000.f}};
+
+ struct TestCaseMsa2RF_I_D tc_d[] = {{0, 345, 0., 345.},
+ {-234, 1000, -234., 1000.}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_I_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ ffint_s_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_I_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ ffint_s_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+struct TestCaseMsa2RF_U16_F {
+ uint16_t ws1;
+ uint16_t ws2;
+ uint16_t ws3;
+ uint16_t ws4;
+ uint16_t ws5;
+ uint16_t ws6;
+ uint16_t ws7;
+ uint16_t ws8;
+ float exp_res_1;
+ float exp_res_2;
+ float exp_res_3;
+ float exp_res_4;
+};
+
+struct TestCaseMsa2RF_F_D {
+ float ws1;
+ float ws2;
+ float ws3;
+ float ws4;
+ double exp_res_1;
+ double exp_res_2;
+};
+
+TEST(MSA_fexupl) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ struct TestCaseMsa2RF_U16_F tc_s[] = {
+ {1, 2, 0x7c00, 0x0c00, 0, 0x7c00, 0xfc00, 0x8000, 0.f, inf_float,
+ -inf_float, -0.f},
+ {0xfc00, 0xffff, 0x00ff, 0x8000, 0x81fe, 0x8000, 0x0345, 0xaaaa,
+ -3.0398368835e-5f, -0.f, 4.9889088e-5f, -5.2062988281e-2f},
+ {3, 4, 0x5555, 6, 0x2aaa, 0x8700, 0x7777, 0x6a8b, 5.2062988281e-2f,
+ -1.06811523458e-4f, 3.0576e4f, 3.35e3f}};
+
+ struct TestCaseMsa2RF_F_D tc_d[] = {
+ {0.f, 123.456f, inf_float, -0.f, inf_double, -0.},
+ {-inf_float, -3.f, 0.f, -inf_float, 0., -inf_double},
+ {2.3f, 3., 1.37747639043129518071e-41f, -3.22084585277826e35f,
+ 1.37747639043129518071e-41, -3.22084585277826e35}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ fexupl_w(w2, w0); },
+ load_uint16_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_F_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ fexupl_d(w2, w0); },
+ load_uint32_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+TEST(MSA_fexupr) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ struct TestCaseMsa2RF_U16_F tc_s[] = {
+ {0, 0x7c00, 0xfc00, 0x8000, 1, 2, 0x7c00, 0x0c00, 0.f, inf_float,
+ -inf_float, -0.f},
+ {0x81fe, 0x8000, 0x0345, 0xaaaa, 0xfc00, 0xffff, 0x00ff, 0x8000,
+ -3.0398368835e-5f, -0.f, 4.9889088e-5f, -5.2062988281e-2f},
+ {0x2aaa, 0x8700, 0x7777, 0x6a8b, 3, 4, 0x5555, 6, 5.2062988281e-2f,
+ -1.06811523458e-4f, 3.0576e4f, 3.35e3f}};
+
+ struct TestCaseMsa2RF_F_D tc_d[] = {
+ {inf_float, -0.f, 0.f, 123.456f, inf_double, -0.},
+ {0.f, -inf_float, -inf_float, -3.f, 0., -inf_double},
+ {1.37747639043129518071e-41f, -3.22084585277826e35f, 2.3f, 3.,
+ 1.37747639043129518071e-41, -3.22084585277826e35}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ fexupr_w(w2, w0); },
+ load_uint16_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_F_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ fexupr_d(w2, w0); },
+ load_uint32_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+struct TestCaseMsa2RF_U32_D {
+ uint32_t ws1;
+ uint32_t ws2;
+ uint32_t ws3;
+ uint32_t ws4;
+ double exp_res_1;
+ double exp_res_2;
+};
+
+TEST(MSA_ffql) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ struct TestCaseMsa2RF_U16_F tc_s[] = {{0, 3, 0xffff, 0x8000, 0x8000, 0xe000,
+ 0x0FF0, 0, -1.f, -0.25f,
+ 0.12451171875f, 0.f}};
+
+ struct TestCaseMsa2RF_U32_D tc_d[] = {
+ {0, 45, 0x80000000, 0xe0000000, -1., -0.25},
+ {0x28379, 0xaaaa5555, 0x024903d3, 0, 17.853239085525274277e-3, 0.}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ ffql_w(w2, w0); },
+ load_uint16_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_U32_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ ffql_d(w2, w0); },
+ load_uint32_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+TEST(MSA_ffqr) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ struct TestCaseMsa2RF_U16_F tc_s[] = {{0x8000, 0xe000, 0x0FF0, 0, 0, 3,
+ 0xffff, 0x8000, -1.f, -0.25f,
+ 0.12451171875f, 0.f}};
+
+ struct TestCaseMsa2RF_U32_D tc_d[] = {
+ {0x80000000, 0xe0000000, 0, 45, -1., -0.25},
+ {0x024903d3, 0, 0x28379, 0xaaaa5555, 17.853239085525274277e-3, 0.}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ ffqr_w(w2, w0); },
+ load_uint16_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_U32_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ ffqr_d(w2, w0); },
+ load_uint32_elements_of_vector, store_uint64_elements_of_vector);
}
}
@@ -6908,39 +7872,21 @@ void run_msa_vector(struct TestCaseMsaVector* input,
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
-#define LOAD_W_REG(lo, hi, w_reg) \
- __ li(t0, static_cast<uint32_t>(lo & 0xffffffff)); \
- __ li(t1, static_cast<uint32_t>((lo >> 32) & 0xffffffff)); \
- __ insert_w(w_reg, 0, t0); \
- __ insert_w(w_reg, 1, t1); \
- __ li(t0, static_cast<uint32_t>(hi & 0xffffffff)); \
- __ li(t1, static_cast<uint32_t>((hi >> 32) & 0xffffffff)); \
- __ insert_w(w_reg, 2, t0); \
- __ insert_w(w_reg, 3, t1)
-
- LOAD_W_REG(input->ws_lo, input->ws_hi, w0);
- LOAD_W_REG(input->wt_lo, input->wt_hi, w2);
- LOAD_W_REG(input->wd_lo, input->wd_hi, w4);
-#undef LOAD_W_REG
+ load_uint64_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
+ load_uint64_elements_of_vector(assm, &(input->wt_lo), w2, t0, t1);
+ load_uint64_elements_of_vector(assm, &(input->wd_lo), w4, t0, t1);
GenerateVectorInstructionFunc(assm);
- __ copy_u_w(t2, w4, 0);
- __ sw(t2, MemOperand(a0, 0));
- __ copy_u_w(t2, w4, 1);
- __ sw(t2, MemOperand(a0, 4));
- __ copy_u_w(t2, w4, 2);
- __ sw(t2, MemOperand(a0, 8));
- __ copy_u_w(t2, w4, 3);
- __ sw(t2, MemOperand(a0, 12));
+ store_uint64_elements_of_vector(assm, w4, a0, t2);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7015,38 +7961,20 @@ void run_msa_bit(struct TestCaseMsaBit* input, InstFunc GenerateInstructionFunc,
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
-#define LOAD_W_REG(lo, hi, w_reg) \
- __ li(t0, static_cast<uint32_t>(lo & 0xffffffff)); \
- __ li(t1, static_cast<uint32_t>((lo >> 32) & 0xffffffff)); \
- __ insert_w(w_reg, 0, t0); \
- __ insert_w(w_reg, 1, t1); \
- __ li(t0, static_cast<uint32_t>(hi & 0xffffffff)); \
- __ li(t1, static_cast<uint32_t>((hi >> 32) & 0xffffffff)); \
- __ insert_w(w_reg, 2, t0); \
- __ insert_w(w_reg, 3, t1)
-
- LOAD_W_REG(input->ws_lo, input->ws_hi, w0);
- LOAD_W_REG(input->wd_lo, input->wd_hi, w2);
-#undef LOAD_W_REG
+ load_uint64_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
+ load_uint64_elements_of_vector(assm, &(input->wd_lo), w2, t0, t1);
GenerateInstructionFunc(assm, input->m);
- __ copy_u_w(t2, w2, 0);
- __ sw(t2, MemOperand(a0, 0));
- __ copy_u_w(t2, w2, 1);
- __ sw(t2, MemOperand(a0, 4));
- __ copy_u_w(t2, w2, 2);
- __ sw(t2, MemOperand(a0, 8));
- __ copy_u_w(t2, w2, 3);
- __ sw(t2, MemOperand(a0, 12));
+ store_uint64_elements_of_vector(assm, w2, a0, t2);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7511,22 +8439,15 @@ void run_msa_i10(int32_t input, InstFunc GenerateVectorInstructionFunc,
GenerateVectorInstructionFunc(assm, input);
- __ copy_u_w(t2, w0, 0);
- __ sw(t2, MemOperand(a0, 0));
- __ copy_u_w(t2, w0, 1);
- __ sw(t2, MemOperand(a0, 4));
- __ copy_u_w(t2, w0, 2);
- __ sw(t2, MemOperand(a0, 8));
- __ copy_u_w(t2, w0, 3);
- __ sw(t2, MemOperand(a0, 12));
+ store_uint64_elements_of_vector(assm, w0, a0, t2);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7603,8 +8524,8 @@ void run_msa_mi10(InstFunc GenerateVectorInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7649,4 +8570,783 @@ TEST(MSA_load_store_vector) {
#undef LDI_DF
}
+struct TestCaseMsa3R {
+ uint64_t ws_lo;
+ uint64_t ws_hi;
+ uint64_t wt_lo;
+ uint64_t wt_hi;
+ uint64_t wd_lo;
+ uint64_t wd_hi;
+};
+
+static const uint64_t Unpredictable = 0x312014017725ll;
+
+template <typename InstFunc, typename OperFunc>
+void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
+ OperFunc GenerateOperationFunc) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ CpuFeatureScope fscope(&assm, MIPS_SIMD);
+ msa_reg_t res;
+ uint64_t expected;
+
+ load_uint64_elements_of_vector(assm, &(input->wt_lo), w0, t0, t1);
+ load_uint64_elements_of_vector(assm, &(input->ws_lo), w1, t0, t1);
+ load_uint64_elements_of_vector(assm, &(input->wd_lo), w2, t0, t1);
+
+ GenerateI5InstructionFunc(assm);
+
+ store_uint64_elements_of_vector(assm, w2, a0, t2);
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+
+ expected = GenerateOperationFunc(input->ws_lo, input->wt_lo, input->wd_lo);
+ if (expected != Unpredictable) {
+ CHECK_EQ(expected, res.d[0]);
+ }
+
+ expected = GenerateOperationFunc(input->ws_hi, input->wt_hi, input->wd_hi);
+ if (expected != Unpredictable) {
+ CHECK_EQ(expected, res.d[1]);
+ }
+}
+
+TEST(MSA_3R_instructions) {
+ if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ struct TestCaseMsa3R tc[] = {
+ {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x1169751bb9a7d9c3,
+ 0xf7a594aec8ef8a9c, 0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c},
+ {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x2b665362c4e812df,
+ 0x3a0d80d68b3f8bc8, 0x2b665362c4e812df, 0x3a0d80d68b3f8bc8},
+ {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x1169751bb9a7d9c3,
+ 0xf7a594aec8ef8a9c, 0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c},
+ {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x2b665362c4e812df,
+ 0x3a0d80d68b3f8bc8, 0x2b665362c4e812df, 0x3a0d80d68b3f8bc8},
+ {0xffab807f807fffcd, 0x7f23ff80ff567f80, 0xffab807f807fffcd,
+ 0x7f23ff80ff567f80, 0xffab807f807fffcd, 0x7f23ff80ff567f80},
+ {0x80ffefff7f12807f, 0x807f80ff7fdeff78, 0x80ffefff7f12807f,
+ 0x807f80ff7fdeff78, 0x80ffefff7f12807f, 0x807f80ff7fdeff78},
+ {0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff,
+ 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff},
+ {0x0000000000000000, 0xffffffffffffffff, 0xffffffffffffffff,
+ 0x0000000000000000, 0x0000000000000000, 0xffffffffffffffff},
+ {0xffff0000ffff0000, 0xffff0000ffff0000, 0xffff0000ffff0000,
+ 0xffff0000ffff0000, 0xffff0000ffff0000, 0xffff0000ffff0000},
+ {0xff00ff00ff00ff00, 0xff00ff00ff00ff00, 0xff00ff00ff00ff00,
+ 0xff00ff00ff00ff00, 0xff00ff00ff00ff00, 0xff00ff00ff00ff00},
+ {0xf0f0f0f0f0f0f0f0, 0xf0f0f0f0f0f0f0f0, 0xf0f0f0f0f0f0f0f0,
+ 0xf0f0f0f0f0f0f0f0, 0xf0f0f0f0f0f0f0f0, 0xf0f0f0f0f0f0f0f0},
+ {0xff0000ffff0000ff, 0xff0000ffff0000ff, 0xff0000ffff0000ff,
+ 0xff0000ffff0000ff, 0xff0000ffff0000ff, 0xff0000ffff0000ff},
+ {0xffff00000000ffff, 0xffff00000000ffff, 0xffff00000000ffff,
+ 0xffff00000000ffff, 0xffff00000000ffff, 0xffff00000000ffff}};
+
+#define SLL_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T src_op = static_cast<T>((ws >> shift) & mask); \
+ T shift_op = static_cast<T>((wt >> shift) & mask) % size_in_bits; \
+ res |= (static_cast<uint64_t>(src_op << shift_op) & mask) << shift; \
+ } \
+ return res
+
+#define SRA_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T src_op = static_cast<T>((ws >> shift) & mask); \
+ T shift_op = ((wt >> shift) & mask) % size_in_bits; \
+ res |= \
+ (static_cast<uint64_t>(ArithmeticShiftRight(src_op, shift_op) & mask)) \
+ << shift; \
+ } \
+ return res
+
+#define SRL_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T src_op = static_cast<T>((ws >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
+ res |= (static_cast<uint64_t>(src_op >> shift_op) & mask) << shift; \
+ } \
+ return res
+
+#define BCRL_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T src_op = static_cast<T>((ws >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
+ T r = (static_cast<T>(~(1ull << shift_op)) & src_op) & mask; \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ return res
+
+#define BSET_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T src_op = static_cast<T>((ws >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
+ T r = (static_cast<T>(1ull << shift_op) | src_op) & mask; \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ return res
+
+#define BNEG_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T src_op = static_cast<T>((ws >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
+ T r = (static_cast<T>(1ull << shift_op) ^ src_op) & mask; \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ return res
+
+#define BINSL_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wd_op = static_cast<T>((wd >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
+ int bits = shift_op + 1; \
+ T r; \
+ if (bits == size_in_bits) { \
+ r = static_cast<T>(ws_op); \
+ } else { \
+ uint64_t mask2 = ((1ull << bits) - 1) << (size_in_bits - bits); \
+ r = static_cast<T>((static_cast<T>(mask2) & ws_op) | \
+ (static_cast<T>(~mask2) & wd_op)); \
+ } \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ return res
+
+#define BINSR_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wd_op = static_cast<T>((wd >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
+ int bits = shift_op + 1; \
+ T r; \
+ if (bits == size_in_bits) { \
+ r = static_cast<T>(ws_op); \
+ } else { \
+ uint64_t mask2 = (1ull << bits) - 1; \
+ r = static_cast<T>((static_cast<T>(mask2) & ws_op) | \
+ (static_cast<T>(~mask2) & wd_op)); \
+ } \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ return res
+
+#define ADDV_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(ws_op + wt_op) & mask) << shift; \
+ } \
+ return res
+
+#define SUBV_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(ws_op - wt_op) & mask) << shift; \
+ } \
+ return res
+
+#define MAX_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Max<T>(ws_op, wt_op)) & mask) << shift; \
+ } \
+ return res
+
+#define MIN_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Min<T>(ws_op, wt_op)) & mask) << shift; \
+ } \
+ return res
+
+#define MAXA_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Nabs(ws_op) < Nabs(wt_op) ? ws_op : wt_op) & \
+ mask) \
+ << shift; \
+ } \
+ return res
+
+#define MINA_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Nabs(ws_op) > Nabs(wt_op) ? ws_op : wt_op) & \
+ mask) \
+ << shift; \
+ } \
+ return res
+
+#define CEQ_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= \
+ (static_cast<uint64_t>(!Compare(ws_op, wt_op) ? -1ull : 0ull) & mask) \
+ << shift; \
+ } \
+ return res
+
+#define CLT_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= \
+ (static_cast<uint64_t>((Compare(ws_op, wt_op) == -1) ? -1ull : 0ull) & \
+ mask) \
+ << shift; \
+ } \
+ return res
+
+#define CLE_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= \
+ (static_cast<uint64_t>((Compare(ws_op, wt_op) != 1) ? -1ull : 0ull) & \
+ mask) \
+ << shift; \
+ } \
+ return res
+
+#define ADD_A_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Abs(ws_op) + Abs(wt_op)) & mask) << shift; \
+ } \
+ return res
+
+#define ADDS_A_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = Nabs(static_cast<T>((ws >> shift) & mask)); \
+ T wt_op = Nabs(static_cast<T>((wt >> shift) & mask)); \
+ T r; \
+ if (ws_op < -std::numeric_limits<T>::max() - wt_op) { \
+ r = std::numeric_limits<T>::max(); \
+ } else { \
+ r = -(ws_op + wt_op); \
+ } \
+ res |= (static_cast<uint64_t>(r) & mask) << shift; \
+ } \
+ return res
+
+#define ADDS_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(SaturateAdd(ws_op, wt_op)) & mask) << shift; \
+ } \
+ return res
+
+#define AVE_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(((wt_op & ws_op) + ((ws_op ^ wt_op) >> 1)) & \
+ mask)) \
+ << shift; \
+ } \
+ return res
+
+#define AVER_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(((wt_op | ws_op) - ((ws_op ^ wt_op) >> 1)) & \
+ mask)) \
+ << shift; \
+ } \
+ return res
+
+#define SUBS_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(SaturateSub(ws_op, wt_op)) & mask) << shift; \
+ } \
+ return res
+
+#define SUBSUS_U_DF(T, lanes, mask) \
+ typedef typename std::make_unsigned<T>::type uT; \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ uT ws_op = static_cast<uT>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ T r; \
+ if (wt_op > 0) { \
+ uT wtu = static_cast<uT>(wt_op); \
+ if (wtu > ws_op) { \
+ r = 0; \
+ } else { \
+ r = static_cast<T>(ws_op - wtu); \
+ } \
+ } else { \
+ if (ws_op > std::numeric_limits<uT>::max() + wt_op) { \
+ r = static_cast<T>(std::numeric_limits<uT>::max()); \
+ } else { \
+ r = static_cast<T>(ws_op - wt_op); \
+ } \
+ } \
+ res |= (static_cast<uint64_t>(r) & mask) << shift; \
+ } \
+ return res
+
+#define SUBSUU_S_DF(T, lanes, mask) \
+ typedef typename std::make_unsigned<T>::type uT; \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ uT ws_op = static_cast<uT>((ws >> shift) & mask); \
+ uT wt_op = static_cast<uT>((wt >> shift) & mask); \
+ uT wdu; \
+ T r; \
+ if (ws_op > wt_op) { \
+ wdu = ws_op - wt_op; \
+ if (wdu > std::numeric_limits<T>::max()) { \
+ r = std::numeric_limits<T>::max(); \
+ } else { \
+ r = static_cast<T>(wdu); \
+ } \
+ } else { \
+ wdu = wt_op - ws_op; \
+ CHECK(-std::numeric_limits<T>::max() == \
+ std::numeric_limits<T>::min() + 1); \
+ if (wdu <= std::numeric_limits<T>::max()) { \
+ r = -static_cast<T>(wdu); \
+ } else { \
+ r = std::numeric_limits<T>::min(); \
+ } \
+ } \
+ res |= (static_cast<uint64_t>(r) & mask) << shift; \
+ } \
+ return res
+
+#define ASUB_S_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Abs(ws_op - wt_op)) & mask) << shift; \
+ } \
+ return res
+
+#define ASUB_U_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(ws_op > wt_op ? ws_op - wt_op \
+ : wt_op - ws_op) & \
+ mask) \
+ << shift; \
+ } \
+ return res
+
+#define MULV_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(ws_op * wt_op) & mask) << shift; \
+ } \
+ return res
+
+#define MADDV_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ T wd_op = static_cast<T>((wd >> shift) & mask); \
+ res |= (static_cast<uint64_t>(wd_op + ws_op * wt_op) & mask) << shift; \
+ } \
+ return res
+
+#define MSUBV_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ T wd_op = static_cast<T>((wd >> shift) & mask); \
+ res |= (static_cast<uint64_t>(wd_op - ws_op * wt_op) & mask) << shift; \
+ } \
+ return res
+
+#define DIV_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ if (wt_op == 0) { \
+ res = Unpredictable; \
+ break; \
+ } \
+ res |= (static_cast<uint64_t>(ws_op / wt_op) & mask) << shift; \
+ } \
+ return res
+
+#define MOD_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ if (wt_op == 0) { \
+ res = Unpredictable; \
+ break; \
+ } \
+ res |= (static_cast<uint64_t>(wt_op != 0 ? ws_op % wt_op : 0) & mask) \
+ << shift; \
+ } \
+ return res
+
+#define SRAR_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T src_op = static_cast<T>((ws >> shift) & mask); \
+ T shift_op = ((wt >> shift) & mask) % size_in_bits; \
+ uint32_t bit = shift_op == 0 ? 0 : src_op >> (shift_op - 1) & 1; \
+ res |= \
+ (static_cast<uint64_t>(ArithmeticShiftRight(src_op, shift_op) + bit) & \
+ mask) \
+ << shift; \
+ } \
+ return res
+
+#define TEST_CASE(V) \
+ V(sll_b, SLL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(sll_h, SLL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(sll_w, SLL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(sll_d, SLL_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(sra_b, SRA_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(sra_h, SRA_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(sra_w, SRA_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(sra_d, SRA_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(srl_b, SRL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(srl_h, SRL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(srl_w, SRL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(srl_d, SRL_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(bclr_b, BCRL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(bclr_h, BCRL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(bclr_w, BCRL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(bclr_d, BCRL_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(bset_b, BSET_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(bset_h, BSET_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(bset_w, BSET_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(bset_d, BSET_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(bneg_b, BNEG_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(bneg_h, BNEG_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(bneg_w, BNEG_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(bneg_d, BNEG_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(binsl_b, BINSL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(binsl_h, BINSL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(binsl_w, BINSL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(binsl_d, BINSL_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(binsr_b, BINSR_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(binsr_h, BINSR_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(binsr_w, BINSR_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(binsr_d, BINSR_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(addv_b, ADDV_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(addv_h, ADDV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(addv_w, ADDV_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(addv_d, ADDV_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(subv_b, SUBV_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(subv_h, SUBV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(subv_w, SUBV_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(subv_d, SUBV_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(max_s_b, MAX_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(max_s_h, MAX_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(max_s_w, MAX_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(max_s_d, MAX_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(max_u_b, MAX_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(max_u_h, MAX_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(max_u_w, MAX_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(max_u_d, MAX_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(min_s_b, MIN_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(min_s_h, MIN_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(min_s_w, MIN_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(min_s_d, MIN_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(min_u_b, MIN_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(min_u_h, MIN_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(min_u_w, MIN_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(min_u_d, MIN_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(max_a_b, MAXA_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(max_a_h, MAXA_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(max_a_w, MAXA_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(max_a_d, MAXA_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(min_a_b, MINA_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(min_a_h, MINA_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(min_a_w, MINA_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(min_a_d, MINA_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(ceq_b, CEQ_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(ceq_h, CEQ_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(ceq_w, CEQ_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(ceq_d, CEQ_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(clt_s_b, CLT_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(clt_s_h, CLT_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(clt_s_w, CLT_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(clt_s_d, CLT_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(clt_u_b, CLT_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(clt_u_h, CLT_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(clt_u_w, CLT_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(clt_u_d, CLT_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(cle_s_b, CLE_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(cle_s_h, CLE_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(cle_s_w, CLE_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(cle_s_d, CLE_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(cle_u_b, CLE_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(cle_u_h, CLE_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(cle_u_w, CLE_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(cle_u_d, CLE_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(add_a_b, ADD_A_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(add_a_h, ADD_A_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(add_a_w, ADD_A_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(add_a_d, ADD_A_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(adds_a_b, ADDS_A_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(adds_a_h, ADDS_A_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(adds_a_w, ADDS_A_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(adds_a_d, ADDS_A_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(adds_s_b, ADDS_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(adds_s_h, ADDS_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(adds_s_w, ADDS_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(adds_s_d, ADDS_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(adds_u_b, ADDS_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(adds_u_h, ADDS_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(adds_u_w, ADDS_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(adds_u_d, ADDS_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(ave_s_b, AVE_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(ave_s_h, AVE_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(ave_s_w, AVE_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(ave_s_d, AVE_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(ave_u_b, AVE_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(ave_u_h, AVE_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(ave_u_w, AVE_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(ave_u_d, AVE_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(aver_s_b, AVER_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(aver_s_h, AVER_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(aver_s_w, AVER_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(aver_s_d, AVER_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(aver_u_b, AVER_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(aver_u_h, AVER_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(aver_u_w, AVER_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(aver_u_d, AVER_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(subs_s_b, SUBS_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(subs_s_h, SUBS_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(subs_s_w, SUBS_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(subs_s_d, SUBS_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(subs_u_b, SUBS_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(subs_u_h, SUBS_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(subs_u_w, SUBS_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(subs_u_d, SUBS_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(subsus_u_b, SUBSUS_U_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(subsus_u_h, SUBSUS_U_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(subsus_u_w, SUBSUS_U_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(subsus_u_d, SUBSUS_U_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(subsuu_s_b, SUBSUU_S_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(subsuu_s_h, SUBSUU_S_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(subsuu_s_w, SUBSUU_S_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(subsuu_s_d, SUBSUU_S_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(asub_s_b, ASUB_S_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(asub_s_h, ASUB_S_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(asub_s_w, ASUB_S_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(asub_s_d, ASUB_S_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(asub_u_b, ASUB_U_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(asub_u_h, ASUB_U_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(asub_u_w, ASUB_U_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(asub_u_d, ASUB_U_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(mulv_b, MULV_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(mulv_h, MULV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(mulv_w, MULV_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(mulv_d, MULV_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(maddv_b, MADDV_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(maddv_h, MADDV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(maddv_w, MADDV_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(maddv_d, MADDV_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(msubv_b, MSUBV_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(msubv_h, MSUBV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(msubv_w, MSUBV_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(msubv_d, MSUBV_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(div_s_b, DIV_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(div_s_h, DIV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(div_s_w, DIV_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(div_s_d, DIV_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(div_u_b, DIV_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(div_u_h, DIV_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(div_u_w, DIV_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(div_u_d, DIV_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(mod_s_b, MOD_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(mod_s_h, MOD_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(mod_s_w, MOD_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(mod_s_d, MOD_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(mod_u_b, MOD_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(mod_u_h, MOD_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(mod_u_w, MOD_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(mod_u_d, MOD_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(srar_b, SRAR_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(srar_h, SRAR_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(srar_w, SRAR_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(srar_d, SRAR_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(srlr_b, SRAR_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(srlr_h, SRAR_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(srlr_w, SRAR_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(srlr_d, SRAR_DF, uint64_t, kMSALanesDword, UINT64_MAX)
+
+#define RUN_TEST(instr, verify, type, lanes, mask) \
+ run_msa_3r(&tc[i], [](MacroAssembler& assm) { __ instr(w2, w1, w0); }, \
+ [](uint64_t ws, uint64_t wt, uint64_t wd) { \
+ verify(type, lanes, mask); \
+ });
+
+ for (size_t i = 0; i < arraysize(tc); ++i) {
+ TEST_CASE(RUN_TEST)
+ }
+
+#undef RUN_TEST
+#undef SLL_DF
+#undef SRL_DF
+#undef BCRL_DF
+#undef BSET_DF
+#undef BNEG_DF
+#undef BINSL_DF
+#undef BINSR_DF
+#undef ADDV_DF
+#undef SUBV_DF
+#undef MAX_DF
+#undef MIN_DF
+#undef MAXA_DF
+#undef MINA_DF
+#undef CEQ_DF
+#undef CLT_DF
+#undef CLE_DF
+#undef ADD_A_DF
+#undef ADDS_A_DF
+#undef ADDS_DF
+#undef AVE_DF
+#undef AVER_DF
+#undef SUBS_DF
+#undef SUBSUS_U_DF
+#undef SUBSUU_S_DF
+#undef ASUB_S_DF
+#undef ASUB_U_DF
+#undef MULV_DF
+#undef MADDV_DF
+#undef MSUBV_DF
+#undef DIV_DF
+#undef MOD_DF
+#undef SRAR_DF
+} // namespace internal
+
#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index 4a828c9785..976bd02824 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -29,6 +29,7 @@
#include "src/v8.h"
+#include "src/assembler-inl.h"
#include "src/base/utils/random-number-generator.h"
#include "src/disassembler.h"
#include "src/factory.h"
@@ -38,8 +39,8 @@
#include "test/cctest/cctest.h"
-using namespace v8::internal;
-
+namespace v8 {
+namespace internal {
// Define these function prototypes to match JSEntryFunction in execution.cc.
typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
@@ -64,8 +65,8 @@ TEST(MIPS0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
int64_t res = reinterpret_cast<int64_t>(
CALL_GENERATED_CODE(isolate, f, 0xab0, 0xc, 0, 0, 0));
@@ -100,8 +101,8 @@ TEST(MIPS1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F1 f = FUNCTION_CAST<F1>(code->entry());
int64_t res = reinterpret_cast<int64_t>(
CALL_GENERATED_CODE(isolate, f, 50, 0, 0, 0, 0));
@@ -246,8 +247,8 @@ TEST(MIPS2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
int64_t res = reinterpret_cast<int64_t>(
CALL_GENERATED_CODE(isolate, f, 0xab0, 0xc, 0, 0, 0));
@@ -349,8 +350,8 @@ TEST(MIPS3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
// Double test values.
t.a = 1.5e14;
@@ -443,8 +444,8 @@ TEST(MIPS4) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.a = 1.5e22;
t.b = 2.75e11;
@@ -509,8 +510,8 @@ TEST(MIPS5) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.a = 1.5e4;
t.b = 2.75e8;
@@ -579,8 +580,8 @@ TEST(MIPS6) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.ui = 0x11223344;
t.si = 0x99aabbcc;
@@ -666,8 +667,8 @@ TEST(MIPS7) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.a = 1.5e14;
t.b = 2.75e11;
@@ -764,8 +765,8 @@ TEST(MIPS8) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.input = 0x12345678;
Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0x0, 0, 0, 0);
@@ -810,8 +811,7 @@ TEST(MIPS9) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
}
@@ -886,8 +886,8 @@ TEST(MIPS10) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.a = 2.147483647e9; // 0x7fffffff -> 0x41DFFFFFFFC00000 as double.
t.b_long_hi = 0x000000ff; // 0xFF00FF00FF -> 0x426FE01FE01FE000 as double.
@@ -1022,8 +1022,8 @@ TEST(MIPS11) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.reg_init = 0xaabbccdd;
t.mem_init = 0x11223344;
@@ -1147,8 +1147,8 @@ TEST(MIPS12) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.x = 1;
t.y = 2;
@@ -1201,8 +1201,8 @@ TEST(MIPS13) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.cvt_big_in = 0xFFFFFFFF;
@@ -1322,8 +1322,8 @@ TEST(MIPS14) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.round_up_in = 123.51;
@@ -1452,8 +1452,8 @@ TEST(MIPS16) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.ui = 0x44332211;
t.si = 0x99aabbcc;
@@ -1581,8 +1581,8 @@ TEST(seleqz_selnez) {
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
(CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
@@ -1697,8 +1697,8 @@ TEST(min_max) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 4; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -1806,8 +1806,8 @@ TEST(rint_d) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int j = 0; j < 4; j++) {
@@ -1854,8 +1854,8 @@ TEST(sel) {
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
const int test_size = 3;
@@ -1987,8 +1987,8 @@ TEST(rint_s) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int j = 0; j < 4; j++) {
@@ -2073,8 +2073,8 @@ TEST(mina_maxa) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -2155,8 +2155,8 @@ TEST(trunc_l) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2236,8 +2236,8 @@ TEST(movz_movn) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2337,8 +2337,8 @@ TEST(movt_movd) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
(CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
@@ -2423,8 +2423,8 @@ TEST(cvt_w_d) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
@@ -2490,8 +2490,8 @@ TEST(trunc_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2559,8 +2559,8 @@ TEST(round_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2630,8 +2630,8 @@ TEST(round_l) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2702,8 +2702,8 @@ TEST(sub) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
@@ -2774,8 +2774,8 @@ TEST(sqrt_rsqrt_recip) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
@@ -2852,8 +2852,8 @@ TEST(neg) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_S[i];
@@ -2910,8 +2910,8 @@ TEST(mul) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
@@ -2965,8 +2965,8 @@ TEST(mov) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3032,8 +3032,8 @@ TEST(floor_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3103,8 +3103,8 @@ TEST(floor_l) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3174,8 +3174,8 @@ TEST(ceil_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3245,8 +3245,8 @@ TEST(ceil_l) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3316,8 +3316,8 @@ TEST(jump_tables1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3386,8 +3386,8 @@ TEST(jump_tables2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3466,8 +3466,8 @@ TEST(jump_tables3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3540,8 +3540,8 @@ TEST(BITSWAP) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.r1 = 0x00102100781A15C3;
t.r2 = 0x001021008B71FCDE;
@@ -3683,8 +3683,8 @@ TEST(class_fmt) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
// Double test values.
@@ -3777,8 +3777,8 @@ TEST(ABS) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
test.a = -2.0;
test.b = -2.0;
@@ -3870,8 +3870,8 @@ TEST(ADD_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
test.a = 2.0;
test.b = 3.0;
@@ -4025,8 +4025,8 @@ TEST(C_COND_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
test.dOp1 = 2.0;
test.dOp2 = 3.0;
@@ -4226,8 +4226,8 @@ TEST(CMP_COND_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
uint64_t dTrue = 0xFFFFFFFFFFFFFFFF;
uint64_t dFalse = 0x0000000000000000;
@@ -4404,8 +4404,8 @@ TEST(CVT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
test.cvt_d_s_in = -0.51;
@@ -4575,8 +4575,8 @@ TEST(DIV_FMT) {
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
(CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
@@ -4666,8 +4666,8 @@ uint64_t run_align(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F4 f = FUNCTION_CAST<F4>(code->entry());
@@ -4719,8 +4719,8 @@ uint64_t run_dalign(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F4 f = FUNCTION_CAST<F4>(code->entry());
uint64_t res = reinterpret_cast<uint64_t>(
@@ -4777,8 +4777,8 @@ uint64_t run_aluipc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
PC = (uint64_t) f; // Set the program counter.
@@ -4831,8 +4831,8 @@ uint64_t run_auipc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
PC = (uint64_t) f; // Set the program counter.
@@ -4886,8 +4886,8 @@ uint64_t run_aui(uint64_t rs, uint16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -4912,8 +4912,8 @@ uint64_t run_daui(uint64_t rs, uint16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -4938,8 +4938,8 @@ uint64_t run_dahi(uint64_t rs, uint16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -4964,8 +4964,8 @@ uint64_t run_dati(uint64_t rs, uint16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -5069,8 +5069,8 @@ uint64_t run_li_macro(uint64_t imm, LiFlags mode, int32_t num_instr = 0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5282,8 +5282,8 @@ uint64_t run_lwpc(int offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -5358,8 +5358,8 @@ uint64_t run_lwupc(int offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -5442,8 +5442,8 @@ uint64_t run_jic(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -5514,8 +5514,8 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -5608,8 +5608,8 @@ uint64_t run_jialc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -5660,8 +5660,8 @@ uint64_t run_addiupc(int32_t imm19) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
PC = (uint64_t) f; // Set the program counter.
@@ -5736,8 +5736,8 @@ uint64_t run_ldpc(int offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -5825,8 +5825,8 @@ int64_t run_bc(int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -5907,8 +5907,8 @@ int64_t run_balc(int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -5957,8 +5957,8 @@ uint64_t run_dsll(uint64_t rt_value, uint16_t sa_value) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F4 f = FUNCTION_CAST<F4>(code->entry());
@@ -6013,8 +6013,8 @@ uint64_t run_bal(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
@@ -6067,8 +6067,8 @@ TEST(Trampoline) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
int64_t res = reinterpret_cast<int64_t>(
@@ -6134,8 +6134,8 @@ void helper_madd_msub_maddf_msubf(F func) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub<T>);
@@ -6217,8 +6217,8 @@ uint64_t run_Subu(uint64_t imm, int32_t num_instr) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6299,8 +6299,8 @@ uint64_t run_Dsubu(uint64_t imm, int32_t num_instr) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6394,8 +6394,8 @@ uint64_t run_Dins(uint64_t imm, uint64_t source, uint16_t pos, uint16_t size) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
uint64_t res = reinterpret_cast<uint64_t>(
@@ -6453,8 +6453,8 @@ uint64_t run_Ins(uint64_t imm, uint64_t source, uint16_t pos, uint16_t size) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
uint64_t res = reinterpret_cast<uint64_t>(
@@ -6522,8 +6522,8 @@ uint64_t run_Ext(uint64_t source, uint16_t pos, uint16_t size) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
uint64_t res = reinterpret_cast<uint64_t>(
@@ -6555,6 +6555,63 @@ TEST(Ext) {
CHECK_EQ(run_Ext(0x0000000040000000, 31, 1), 0x0000000000000000);
}
+// Load elements in w0 MSA vector register
+void load_uint64_elements_of_vector(MacroAssembler& assm,
+ const uint64_t elements[], MSARegister w,
+ Register t0, Register t1) {
+ __ li(t0, elements[0]);
+ __ li(t1, elements[1]);
+ __ insert_d(w, 0, t0);
+ __ insert_d(w, 1, t1);
+}
+
+void load_uint32_elements_of_vector(MacroAssembler& assm,
+ const uint64_t elements[], MSARegister w,
+ Register t0, Register t1) {
+ const uint32_t* const element = reinterpret_cast<const uint32_t*>(elements);
+ __ li(t0, element[0]);
+ __ li(t1, element[1]);
+ __ insert_w(w, 0, t0);
+ __ insert_w(w, 1, t1);
+ __ li(t0, element[2]);
+ __ li(t1, element[3]);
+ __ insert_w(w, 2, t0);
+ __ insert_w(w, 3, t1);
+}
+
+void load_uint16_elements_of_vector(MacroAssembler& assm,
+ const uint64_t elements[], MSARegister w,
+ Register t0, Register t1) {
+ const uint16_t* const element = reinterpret_cast<const uint16_t*>(elements);
+ __ li(t0, element[0]);
+ __ li(t1, element[1]);
+ __ insert_h(w, 0, t0);
+ __ insert_h(w, 1, t1);
+ __ li(t0, element[2]);
+ __ li(t1, element[3]);
+ __ insert_h(w, 2, t0);
+ __ insert_h(w, 3, t1);
+ __ li(t0, element[4]);
+ __ li(t1, element[5]);
+ __ insert_h(w, 4, t0);
+ __ insert_h(w, 5, t1);
+ __ li(t0, element[6]);
+ __ li(t1, element[7]);
+ __ insert_h(w, 6, t0);
+ __ insert_h(w, 7, t1);
+}
+
+// Store vector elements from w2 to the memory pointed by a0
+void store_uint64_elements_of_vector(MacroAssembler& assm, MSARegister w,
+ Register a) {
+ __ st_d(w, MemOperand(a, 0));
+}
+
+void store_uint32_elements_of_vector(MacroAssembler& assm, MSARegister w,
+ Register a) {
+ __ st_w(w, MemOperand(a, 0));
+}
+
TEST(MSA_fill_copy) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -6606,8 +6663,8 @@ TEST(MSA_fill_copy) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6670,8 +6727,8 @@ TEST(MSA_fill_copy_2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6724,8 +6781,8 @@ TEST(MSA_fill_copy_3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6773,22 +6830,15 @@ void run_msa_insert(int64_t rs_value, int n, msa_reg_t* w) {
UNREACHABLE();
}
- __ copy_u_w(t2, w0, 0);
- __ sw(t2, MemOperand(a0, 0));
- __ copy_u_w(t2, w0, 1);
- __ sw(t2, MemOperand(a0, 4));
- __ copy_u_w(t2, w0, 2);
- __ sw(t2, MemOperand(a0, 8));
- __ copy_u_w(t2, w0, 3);
- __ sw(t2, MemOperand(a0, 12));
+ store_uint64_elements_of_vector(assm, w0, a0);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6865,6 +6915,64 @@ TEST(MSA_insert) {
}
}
+void run_msa_ctc_cfc(uint64_t value) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ CpuFeatureScope fscope(&assm, MIPS_SIMD);
+
+ MSAControlRegister msareg = {kMSACSRRegister};
+ __ li(t0, value);
+ __ li(t2, 0);
+ __ cfcmsa(t1, msareg);
+ __ ctcmsa(msareg, t0);
+ __ cfcmsa(t2, msareg);
+ __ ctcmsa(msareg, t1);
+ __ sd(t2, MemOperand(a0, 0));
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ uint64_t res;
+ (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+
+ CHECK_EQ(bit_cast<uint64_t>(static_cast<int64_t>(
+ bit_cast<int32_t>(static_cast<uint32_t>(value & 0x0167ffff)))),
+ res);
+}
+
+TEST(MSA_cfc_ctc) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const uint64_t mask_without_cause = 0xffffffffff9c0fff;
+ const uint64_t mask_always_zero = 0x0167ffff;
+ const uint64_t mask_enables = 0x0000000000000f80;
+ uint64_t test_case[] = {0x30c6f6352d5ede31, 0xefc9fed507955425,
+ 0x64f2a3ff15b7dbe3, 0x6aa069352bf8bc37,
+ 0x7ea7ab2ae6aae923, 0xa10f5d4c24d0f68d,
+ 0x6dd14c9441afa84c, 0xc366373b2d6bf64f,
+ 0x6b35fb04925014bd, 0x9e3ea39a4dba7e61};
+ for (unsigned i = 0; i < arraysize(test_case); i++) {
+ // Setting enable bits and corresponding cause bits could result in
+ // exception raised and this prevents that from happening
+ test_case[i] = (~test_case[i] & mask_enables) << 5 |
+ (test_case[i] & mask_without_cause);
+ run_msa_ctc_cfc(test_case[i] & mask_always_zero);
+ }
+}
+
struct ExpResShf {
uint8_t i8;
uint64_t lo;
@@ -6928,14 +7036,7 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
UNREACHABLE();
}
- __ copy_u_w(t2, w2, 0);
- __ sw(t2, MemOperand(a0, 0));
- __ copy_u_w(t2, w2, 1);
- __ sw(t2, MemOperand(a0, 4));
- __ copy_u_w(t2, w2, 2);
- __ sw(t2, MemOperand(a0, 8));
- __ copy_u_w(t2, w2, 3);
- __ sw(t2, MemOperand(a0, 12));
+ store_uint64_elements_of_vector(assm, w2, a0);
__ jr(ra);
__ nop();
@@ -6944,8 +7045,8 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7127,29 +7228,19 @@ void run_msa_i5(struct TestCaseMsaI5* input, bool i5_sign_ext,
int32_t i5 =
i5_sign_ext ? static_cast<int32_t>(input->i5 << 27) >> 27 : input->i5;
- __ li(t0, input->ws_lo);
- __ li(t1, input->ws_hi);
- __ insert_d(w0, 0, t0);
- __ insert_d(w0, 1, t1);
+ load_uint64_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
GenerateI5InstructionFunc(assm, i5);
- __ copy_u_w(t2, w2, 0);
- __ sw(t2, MemOperand(a0, 0));
- __ copy_u_w(t2, w2, 1);
- __ sw(t2, MemOperand(a0, 4));
- __ copy_u_w(t2, w2, 2);
- __ sw(t2, MemOperand(a0, 8));
- __ copy_u_w(t2, w2, 3);
- __ sw(t2, MemOperand(a0, 12));
+ store_uint64_elements_of_vector(assm, w2, a0);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7548,8 +7639,11 @@ struct TestCaseMsa2R {
uint64_t exp_res_hi;
};
-template <typename Func>
-void run_msa_2r(struct TestCaseMsa2R* input, Func Generate2RInstructionFunc) {
+template <typename Func, typename FuncLoad, typename FuncStore>
+void run_msa_2r(const struct TestCaseMsa2R* input,
+ Func Generate2RInstructionFunc,
+ FuncLoad load_elements_of_vector,
+ FuncStore store_elements_of_vector) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -7557,29 +7651,18 @@ void run_msa_2r(struct TestCaseMsa2R* input, Func Generate2RInstructionFunc) {
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
- __ li(t0, input->ws_lo);
- __ li(t1, input->ws_hi);
- __ insert_d(w0, 0, t0);
- __ insert_d(w0, 1, t1);
-
+ load_elements_of_vector(assm, reinterpret_cast<const uint64_t*>(input), w0,
+ t0, t1);
Generate2RInstructionFunc(assm);
-
- __ copy_u_w(t2, w2, 0);
- __ sw(t2, MemOperand(a0, 0));
- __ copy_u_w(t2, w2, 1);
- __ sw(t2, MemOperand(a0, 4));
- __ copy_u_w(t2, w2, 2);
- __ sw(t2, MemOperand(a0, 8));
- __ copy_u_w(t2, w2, 3);
- __ sw(t2, MemOperand(a0, 12));
+ store_elements_of_vector(assm, w2, a0);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7587,8 +7670,17 @@ void run_msa_2r(struct TestCaseMsa2R* input, Func Generate2RInstructionFunc) {
(CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
- CHECK_EQ(input->exp_res_lo, res.d[0]);
- CHECK_EQ(input->exp_res_hi, res.d[1]);
+ if (store_elements_of_vector == store_uint64_elements_of_vector) {
+ CHECK_EQ(input->exp_res_lo, res.d[0]);
+ CHECK_EQ(input->exp_res_hi, res.d[1]);
+ } else if (store_elements_of_vector == store_uint32_elements_of_vector) {
+ const uint32_t* exp_res =
+ reinterpret_cast<const uint32_t*>(&input->exp_res_lo);
+ CHECK_EQ(exp_res[0], res.w[0]);
+ CHECK_EQ(exp_res[1], res.w[1]);
+ CHECK_EQ(exp_res[2], res.w[2]);
+ CHECK_EQ(exp_res[3], res.w[3]);
+ }
}
TEST(MSA_pcnt) {
@@ -7639,10 +7731,14 @@ TEST(MSA_pcnt) {
{0xf35862e13e38f8b0, 0x4f41ffdef2bfe636, 0x20, 0x2a}};
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) {
- run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ pcnt_b(w2, w0); });
- run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ pcnt_h(w2, w0); });
- run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ pcnt_w(w2, w0); });
- run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ pcnt_d(w2, w0); });
+ run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ pcnt_b(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ pcnt_h(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ pcnt_w(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ pcnt_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
}
}
@@ -7694,10 +7790,14 @@ TEST(MSA_nlzc) {
{0x00000000e338f8b0, 0x0754534acab32654, 0x20, 0x5}};
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) {
- run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nlzc_b(w2, w0); });
- run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nlzc_h(w2, w0); });
- run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nlzc_w(w2, w0); });
- run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nlzc_d(w2, w0); });
+ run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nlzc_b(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nlzc_h(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nlzc_w(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nlzc_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
}
}
@@ -7749,10 +7849,885 @@ TEST(MSA_nloc) {
{0xFFFFFFFF1CC7074F, 0xF8ABACB5354CD9AB, 0x20, 0x5}};
for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) {
- run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nloc_b(w2, w0); });
- run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nloc_h(w2, w0); });
- run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nloc_w(w2, w0); });
- run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nloc_d(w2, w0); });
+ run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nloc_b(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nloc_h(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nloc_w(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nloc_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+struct TestCaseMsa2RF_F_U {
+ float ws1;
+ float ws2;
+ float ws3;
+ float ws4;
+ uint32_t exp_res_1;
+ uint32_t exp_res_2;
+ uint32_t exp_res_3;
+ uint32_t exp_res_4;
+};
+
+struct TestCaseMsa2RF_D_U {
+ double ws1;
+ double ws2;
+ uint64_t exp_res_1;
+ uint64_t exp_res_2;
+};
+
+TEST(MSA_fclass) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+#define BIT(n) (0x1 << n)
+#define SNAN BIT(0)
+#define QNAN BIT(1)
+#define NEG_INFINITY BIT((2))
+#define NEG_NORMAL BIT(3)
+#define NEG_SUBNORMAL BIT(4)
+#define NEG_ZERO BIT(5)
+#define POS_INFINITY BIT(6)
+#define POS_NORMAL BIT(7)
+#define POS_SUBNORMAL BIT(8)
+#define POS_ZERO BIT(9)
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ const struct TestCaseMsa2RF_F_U tc_s[] = {
+ {1.f, -0.00001, 208e10f, -34.8e-30f, POS_NORMAL, NEG_NORMAL, POS_NORMAL,
+ NEG_NORMAL},
+ {inf_float, -inf_float, 0, -0.f, POS_INFINITY, NEG_INFINITY, POS_ZERO,
+ NEG_ZERO},
+ {3.036e-40f, -6.392e-43f, 1.41e-45f, -1.17e-38f, POS_SUBNORMAL,
+ NEG_SUBNORMAL, POS_SUBNORMAL, NEG_SUBNORMAL}};
+
+ const struct TestCaseMsa2RF_D_U tc_d[] = {
+ {1., -0.00000001, POS_NORMAL, NEG_NORMAL},
+ {208e10, -34.8e-300, POS_NORMAL, NEG_NORMAL},
+ {inf_double, -inf_double, POS_INFINITY, NEG_INFINITY},
+ {0, -0., POS_ZERO, NEG_ZERO},
+ {1.036e-308, -6.392e-309, POS_SUBNORMAL, NEG_SUBNORMAL},
+ {1.41e-323, -3.17e208, POS_SUBNORMAL, NEG_NORMAL}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_U); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ fclass_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_U); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ fclass_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+
+#undef BIT
+#undef SNAN
+#undef QNAN
+#undef NEG_INFINITY
+#undef NEG_NORMAL
+#undef NEG_SUBNORMAL
+#undef NEG_ZERO
+#undef POS_INFINITY
+#undef POS_NORMAL
+#undef POS_SUBNORMAL
+#undef POS_ZERO
+}
+
+struct TestCaseMsa2RF_F_I {
+ float ws1;
+ float ws2;
+ float ws3;
+ float ws4;
+ int32_t exp_res_1;
+ int32_t exp_res_2;
+ int32_t exp_res_3;
+ int32_t exp_res_4;
+};
+
+struct TestCaseMsa2RF_D_I {
+ double ws1;
+ double ws2;
+ int64_t exp_res_1;
+ int64_t exp_res_2;
+};
+
+TEST(MSA_ftrunc_s) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const float qNaN_float = std::numeric_limits<float>::quiet_NaN();
+ const double inf_double = std::numeric_limits<double>::infinity();
+ const double qNaN_double = std::numeric_limits<double>::quiet_NaN();
+ const int32_t max_int32 = std::numeric_limits<int32_t>::max();
+ const int32_t min_int32 = std::numeric_limits<int32_t>::min();
+ const int64_t max_int64 = std::numeric_limits<int64_t>::max();
+ const int64_t min_int64 = std::numeric_limits<int64_t>::min();
+
+ const struct TestCaseMsa2RF_F_I tc_s[] = {
+ {inf_float, 2.345f, -324.9235f, 30004.51f, max_int32, 2, -324, 30004},
+ {-inf_float, -0.983f, 0.0832f, static_cast<float>(max_int32) * 3.f,
+ min_int32, 0, 0, max_int32},
+ {-23.125f, qNaN_float, 2 * static_cast<float>(min_int32), -0.f, -23, 0,
+ min_int32, 0}};
+
+ const struct TestCaseMsa2RF_D_I tc_d[] = {
+ {inf_double, 2.345, max_int64, 2},
+ {-324.9235, 246569139.51, -324, 246569139},
+ {-inf_double, -0.983, min_int64, 0},
+ {0.0832, 6 * static_cast<double>(max_int64), 0, max_int64},
+ {-21453889872.94, qNaN_double, -21453889872, 0},
+ {2 * static_cast<double>(min_int64), -0., min_int64, 0}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_I); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ ftrunc_s_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_I); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ ftrunc_s_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+TEST(MSA_ftrunc_u) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const float qNaN_float = std::numeric_limits<float>::quiet_NaN();
+ const double inf_double = std::numeric_limits<double>::infinity();
+ const double qNaN_double = std::numeric_limits<double>::quiet_NaN();
+ const uint32_t max_uint32 = std::numeric_limits<uint32_t>::max();
+ const uint64_t max_uint64 = std::numeric_limits<uint64_t>::max();
+
+ const struct TestCaseMsa2RF_F_U tc_s[] = {
+ {inf_float, 2.345f, -324.9235f, 30004.51f, max_uint32, 2, 0, 30004},
+ {-inf_float, 0.983f, 0.0832f, static_cast<float>(max_uint32) * 3., 0, 0,
+ 0, max_uint32},
+ {23.125f, qNaN_float, -0.982, -0.f, 23, 0, 0, 0}};
+
+ const struct TestCaseMsa2RF_D_U tc_d[] = {
+ {inf_double, 2.345, max_uint64, 2},
+ {-324.9235, 246569139.51, 0, 246569139},
+ {-inf_double, -0.983, 0, 0},
+ {0.0832, 6 * static_cast<double>(max_uint64), 0, max_uint64},
+ {21453889872.94, qNaN_double, 21453889872, 0},
+ {0.9889, -0., 0, 0}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_U); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ ftrunc_u_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_U); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ ftrunc_u_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+struct TestCaseMsa2RF_F_F {
+ float ws1;
+ float ws2;
+ float ws3;
+ float ws4;
+ float exp_res_1;
+ float exp_res_2;
+ float exp_res_3;
+ float exp_res_4;
+};
+
+struct TestCaseMsa2RF_D_D {
+ double ws1;
+ double ws2;
+ double exp_res_1;
+ double exp_res_2;
+};
+
+TEST(MSA_fsqrt) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ const struct TestCaseMsa2RF_F_F tc_s[] = {
+ {81.f, 576.f, inf_float, -0.f, 9.f, 24.f, inf_float, -0.f}};
+
+ const struct TestCaseMsa2RF_D_D tc_d[] = {{81., inf_double, 9., inf_double},
+ {331776., -0., 576, -0.}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ fsqrt_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ fsqrt_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+TEST(MSA_frsqrt) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ const struct TestCaseMsa2RF_F_F tc_s[] = {
+ {81.f, 576.f, inf_float, -0.f, 1.f / 9.f, 1.f / 24.f, 0.f, -inf_float},
+ {0.f, 1.f / 576.f, 1.f / 81.f, 1.f / 4.f, inf_float, 24.f, 9.f, 2.f}};
+
+ const struct TestCaseMsa2RF_D_D tc_d[] = {
+ {81., inf_double, 1. / 9., 0.},
+ {331776., -0., 1. / 576., -inf_double},
+ {0., 1. / 81, inf_double, 9.}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ frsqrt_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ frsqrt_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+TEST(MSA_frcp) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ const struct TestCaseMsa2RF_F_F tc_s[] = {
+ {12.f, 576.f, inf_float, -0.f, 1.f / 12.f, 1.f / 576.f, 0.f, -inf_float},
+ {0.f, 1.f / 576.f, -inf_float, 1.f / 400.f, inf_float, 576.f, -0.f,
+ 400.f}};
+
+ const struct TestCaseMsa2RF_D_D tc_d[] = {
+ {81., inf_double, 1. / 81., 0.},
+ {331777., -0., 1. / 331777., -inf_double},
+ {0., 1. / 80, inf_double, 80.},
+ {1. / 40000., -inf_double, 40000., -0.}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ frcp_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ frcp_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+void test_frint_s(size_t data_size, TestCaseMsa2RF_F_F tc_d[],
+ int rounding_mode) {
+ for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_F_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [&rounding_mode](MacroAssembler& assm) {
+ MSAControlRegister msareg = {kMSACSRRegister};
+ __ li(t0, static_cast<uint32_t>(rounding_mode));
+ __ cfcmsa(t1, msareg);
+ __ ctcmsa(msareg, t0);
+ __ frint_w(w2, w0);
+ __ ctcmsa(msareg, t1);
+ },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+}
+
+void test_frint_d(size_t data_size, TestCaseMsa2RF_D_D tc_d[],
+ int rounding_mode) {
+ for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_D_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [&rounding_mode](MacroAssembler& assm) {
+ MSAControlRegister msareg = {kMSACSRRegister};
+ __ li(t0, static_cast<uint32_t>(rounding_mode));
+ __ cfcmsa(t1, msareg);
+ __ ctcmsa(msareg, t0);
+ __ frint_d(w2, w0);
+ __ ctcmsa(msareg, t1);
+ },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+TEST(MSA_frint) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ struct TestCaseMsa2RF_F_F tc_s1[] = {
+ {0.f, 4.51f, 1.49f, -12.51f, 0.f, 5.f, 1.f, -13.f},
+ {-1.32f, -23.38f, 2.8f, -32.6f, -1.f, -23.f, 3.f, -33.f}};
+
+ struct TestCaseMsa2RF_D_D tc_d1[] = {{0., 4.51, 0., 5.},
+ {1.49, -12.51, 1., -13.},
+ {-1.32, -23.38, -1., -23.},
+ {2.8, -32.6, 3., -33.}};
+
+ test_frint_s(sizeof(tc_s1), tc_s1, kRoundToNearest);
+ test_frint_d(sizeof(tc_d1), tc_d1, kRoundToNearest);
+
+ struct TestCaseMsa2RF_F_F tc_s2[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0.f, 4.f, 1.f, -12.f},
+ {-1.f, -23.38f, 2.8f, -32.6f, -1.f, -23.f, 2.f, -32.f}};
+
+ struct TestCaseMsa2RF_D_D tc_d2[] = {{0., 4.5, 0., 4.},
+ {1.49, -12.51, 1., -12.},
+ {-1., -23.38, -1., -23.},
+ {2.8, -32.6, 2., -32.}};
+
+ test_frint_s(sizeof(tc_s2), tc_s2, kRoundToZero);
+ test_frint_d(sizeof(tc_d2), tc_d2, kRoundToZero);
+
+ struct TestCaseMsa2RF_F_F tc_s3[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0.f, 5.f, 2.f, -12.f},
+ {-1.f, -23.38f, 2.8f, -32.6f, -1.f, -23.f, 3.f, -32.f}};
+
+ struct TestCaseMsa2RF_D_D tc_d3[] = {{0., 4.5, 0., 5.},
+ {1.49, -12.51, 2., -12.},
+ {-1., -23.38, -1., -23.},
+ {2.8, -32.6, 3., -32.}};
+
+ test_frint_s(sizeof(tc_s3), tc_s3, kRoundToPlusInf);
+ test_frint_d(sizeof(tc_d3), tc_d3, kRoundToPlusInf);
+
+ struct TestCaseMsa2RF_F_F tc_s4[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0.f, 4.f, 1.f, -13.f},
+ {-1.f, -23.38f, 2.8f, -32.6f, -1.f, -24.f, 2.f, -33.f}};
+
+ struct TestCaseMsa2RF_D_D tc_d4[] = {{0., 4.5, 0., 4.},
+ {1.49, -12.51, 1., -13.},
+ {-1., -23.38, -1., -24.},
+ {2.8, -32.6, 2., -33.}};
+
+ test_frint_s(sizeof(tc_s4), tc_s4, kRoundToMinusInf);
+ test_frint_d(sizeof(tc_d4), tc_d4, kRoundToMinusInf);
+}
+
+TEST(MSA_flog2) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ struct TestCaseMsa2RF_F_F tc_s[] = {
+ {std::ldexp(0.58f, -48), std::ldexp(0.5f, 110), std::ldexp(1.11f, -130),
+ inf_float, -49.f, 109.f, -130.f, inf_float},
+ {0.f, -0.f, std::ldexp(0.89f, -12), std::ldexp(0.32f, 126), -inf_float,
+ -inf_float, -13.f, 124.f}};
+
+ struct TestCaseMsa2RF_D_D tc_d[] = {
+ {std::ldexp(0.58, -48), std::ldexp(0.5, 110), -49., 109.},
+ {std::ldexp(1.11, -1050), inf_double, -1050., inf_double},
+ {0., -0., -inf_double, -inf_double},
+ {std::ldexp(0.32, 1021), std::ldexp(1.23, -123), 1019., -123.}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ flog2_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ flog2_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+void test_ftint_s_s(size_t data_size, TestCaseMsa2RF_F_I tc_d[],
+ int rounding_mode) {
+ for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_F_I); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [&rounding_mode](MacroAssembler& assm) {
+ MSAControlRegister msareg = {kMSACSRRegister};
+ __ li(t0, static_cast<uint32_t>(rounding_mode));
+ __ cfcmsa(t1, msareg);
+ __ ctcmsa(msareg, t0);
+ __ ftint_s_w(w2, w0);
+ __ ctcmsa(msareg, t1);
+ },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+}
+
+void test_ftint_s_d(size_t data_size, TestCaseMsa2RF_D_I tc_d[],
+ int rounding_mode) {
+ for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_D_I); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [&rounding_mode](MacroAssembler& assm) {
+ MSAControlRegister msareg = {kMSACSRRegister};
+ __ li(t0, static_cast<uint32_t>(rounding_mode));
+ __ cfcmsa(t1, msareg);
+ __ ctcmsa(msareg, t0);
+ __ ftint_s_d(w2, w0);
+ __ ctcmsa(msareg, t1);
+ },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+TEST(MSA_ftint_s) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+ const int32_t int32_max = std::numeric_limits<int32_t>::max();
+ const int32_t int32_min = std::numeric_limits<int32_t>::min();
+ const int64_t int64_max = std::numeric_limits<int64_t>::max();
+ const int64_t int64_min = std::numeric_limits<int64_t>::min();
+
+ struct TestCaseMsa2RF_F_I tc_s1[] = {
+ {0.f, 4.51f, 1.49f, -12.51f, 0, 5, 1, -13},
+ {-0.32f, -23.38f, 2.8f, -32.6f, 0, -23, 3, -33},
+ {inf_float, -inf_float, 3.f * int32_min, 4.f * int32_max, int32_max,
+ int32_min, int32_min, int32_max}};
+
+ struct TestCaseMsa2RF_D_I tc_d1[] = {
+ {0., 4.51, 0, 5},
+ {1.49, -12.51, 1, -13},
+ {-0.32, -23.38, 0, -23},
+ {2.8, -32.6, 3, -33},
+ {inf_double, -inf_double, int64_max, int64_min},
+ {33.23 * int64_min, 4000. * int64_max, int64_min, int64_max}};
+
+ test_ftint_s_s(sizeof(tc_s1), tc_s1, kRoundToNearest);
+ test_ftint_s_d(sizeof(tc_d1), tc_d1, kRoundToNearest);
+
+ struct TestCaseMsa2RF_F_I tc_s2[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0, 4, 1, -12},
+ {-0.f, -23.38f, 2.8f, -32.6f, -0, -23, 2, -32},
+ {inf_float, -inf_float, 3.f * int32_min, 4.f * int32_max, int32_max,
+ int32_min, int32_min, int32_max}};
+
+ struct TestCaseMsa2RF_D_I tc_d2[] = {
+ {0., 4.5, 0, 4},
+ {1.49, -12.51, 1, -12},
+ {-0., -23.38, -0, -23},
+ {2.8, -32.6, 2, -32},
+ {inf_double, -inf_double, int64_max, int64_min},
+ {33.23 * int64_min, 4000. * int64_max, int64_min, int64_max}};
+
+ test_ftint_s_s(sizeof(tc_s2), tc_s2, kRoundToZero);
+ test_ftint_s_d(sizeof(tc_d2), tc_d2, kRoundToZero);
+
+ struct TestCaseMsa2RF_F_I tc_s3[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0, 5, 2, -12},
+ {-0.f, -23.38f, 2.8f, -32.6f, -0, -23, 3, -32},
+ {inf_float, -inf_float, 3.f * int32_min, 4.f * int32_max, int32_max,
+ int32_min, int32_min, int32_max}};
+
+ struct TestCaseMsa2RF_D_I tc_d3[] = {
+ {0., 4.5, 0, 5},
+ {1.49, -12.51, 2, -12},
+ {-0., -23.38, -0, -23},
+ {2.8, -32.6, 3, -32},
+ {inf_double, -inf_double, int64_max, int64_min},
+ {33.23 * int64_min, 4000. * int64_max, int64_min, int64_max}};
+
+ test_ftint_s_s(sizeof(tc_s3), tc_s3, kRoundToPlusInf);
+ test_ftint_s_d(sizeof(tc_d3), tc_d3, kRoundToPlusInf);
+
+ struct TestCaseMsa2RF_F_I tc_s4[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0, 4, 1, -13},
+ {-0.f, -23.38f, 2.8f, -32.6f, -0, -24, 2, -33},
+ {inf_float, -inf_float, 3.f * int32_min, 4.f * int32_max, int32_max,
+ int32_min, int32_min, int32_max}};
+
+ struct TestCaseMsa2RF_D_I tc_d4[] = {
+ {0., 4.5, 0, 4},
+ {1.49, -12.51, 1, -13},
+ {-0., -23.38, -0, -24},
+ {2.8, -32.6, 2, -33},
+ {inf_double, -inf_double, int64_max, int64_min},
+ {33.23 * int64_min, 4000. * int64_max, int64_min, int64_max}};
+
+ test_ftint_s_s(sizeof(tc_s4), tc_s4, kRoundToMinusInf);
+ test_ftint_s_d(sizeof(tc_d4), tc_d4, kRoundToMinusInf);
+}
+
+void test_ftint_u_s(size_t data_size, TestCaseMsa2RF_F_U tc_d[],
+ int rounding_mode) {
+ for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_F_U); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [&rounding_mode](MacroAssembler& assm) {
+ MSAControlRegister msareg = {kMSACSRRegister};
+ __ li(t0, static_cast<uint32_t>(rounding_mode));
+ __ cfcmsa(t1, msareg);
+ __ ctcmsa(msareg, t0);
+ __ ftint_u_w(w2, w0);
+ __ ctcmsa(msareg, t1);
+ },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+}
+
+void test_ftint_u_d(size_t data_size, TestCaseMsa2RF_D_U tc_d[],
+ int rounding_mode) {
+ for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_D_U); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [&rounding_mode](MacroAssembler& assm) {
+ MSAControlRegister msareg = {kMSACSRRegister};
+ __ li(t0, static_cast<uint32_t>(rounding_mode));
+ __ cfcmsa(t1, msareg);
+ __ ctcmsa(msareg, t0);
+ __ ftint_u_d(w2, w0);
+ __ ctcmsa(msareg, t1);
+ },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+TEST(MSA_ftint_u) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+ const uint32_t uint32_max = std::numeric_limits<uint32_t>::max();
+ const uint64_t uint64_max = std::numeric_limits<uint64_t>::max();
+
+ struct TestCaseMsa2RF_F_U tc_s1[] = {
+ {0.f, 4.51f, 1.49f, -12.51f, 0, 5, 1, 0},
+ {-0.32f, 23.38f, 2.8f, 32.6f, 0, 23, 3, 33},
+ {inf_float, -inf_float, 0, 4.f * uint32_max, uint32_max, 0, 0,
+ uint32_max}};
+
+ struct TestCaseMsa2RF_D_U tc_d1[] = {
+ {0., 4.51, 0, 5},
+ {1.49, -12.51, 1, 0},
+ {-0.32, 23.38, 0, 23},
+ {2.8, 32.6, 3, 33},
+ {inf_double, -inf_double, uint64_max, 0},
+ {-0., 4000. * uint64_max, 0, uint64_max}};
+
+ test_ftint_u_s(sizeof(tc_s1), tc_s1, kRoundToNearest);
+ test_ftint_u_d(sizeof(tc_d1), tc_d1, kRoundToNearest);
+
+ struct TestCaseMsa2RF_F_U tc_s2[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0, 4, 1, 0},
+ {-0.f, 23.38f, 2.8f, 32.6f, 0, 23, 2, 32},
+ {inf_float, -inf_float, 0., 4.f * uint32_max, uint32_max, 0, 0,
+ uint32_max}};
+
+ struct TestCaseMsa2RF_D_U tc_d2[] = {
+ {0., 4.5, 0, 4},
+ {1.49, -12.51, 1, 0},
+ {-0., 23.38, 0, 23},
+ {2.8, 32.6, 2, 32},
+ {inf_double, -inf_double, uint64_max, 0},
+ {-0.2345, 4000. * uint64_max, 0, uint64_max}};
+
+ test_ftint_u_s(sizeof(tc_s2), tc_s2, kRoundToZero);
+ test_ftint_u_d(sizeof(tc_d2), tc_d2, kRoundToZero);
+
+ struct TestCaseMsa2RF_F_U tc_s3[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0, 5, 2, 0},
+ {-0.f, 23.38f, 2.8f, 32.6f, 0, 24, 3, 33},
+ {inf_float, -inf_float, 0, 4.f * uint32_max, uint32_max, 0, 0,
+ uint32_max}};
+
+ struct TestCaseMsa2RF_D_U tc_d3[] = {
+ {0., 4.5, 0, 5},
+ {1.49, -12.51, 2, 0},
+ {-0., 23.38, -0, 24},
+ {2.8, 32.6, 3, 33},
+ {inf_double, -inf_double, uint64_max, 0},
+ {-0.5252, 4000. * uint64_max, 0, uint64_max}};
+
+ test_ftint_u_s(sizeof(tc_s3), tc_s3, kRoundToPlusInf);
+ test_ftint_u_d(sizeof(tc_d3), tc_d3, kRoundToPlusInf);
+
+ struct TestCaseMsa2RF_F_U tc_s4[] = {
+ {0.f, 4.5f, 1.49f, -12.51f, 0, 4, 1, 0},
+ {-0.f, 23.38f, 2.8f, 32.6f, 0, 23, 2, 32},
+ {inf_float, -inf_float, 0, 4.f * uint32_max, uint32_max, 0, 0,
+ uint32_max}};
+
+ struct TestCaseMsa2RF_D_U tc_d4[] = {
+ {0., 4.5, 0, 4},
+ {1.49, -12.51, 1, 0},
+ {-0., 23.38, -0, 23},
+ {2.8, 32.6, 2, 32},
+ {inf_double, -inf_double, uint64_max, 0},
+ {-0.098797, 4000. * uint64_max, 0, uint64_max}};
+
+ test_ftint_u_s(sizeof(tc_s4), tc_s4, kRoundToMinusInf);
+ test_ftint_u_d(sizeof(tc_d4), tc_d4, kRoundToMinusInf);
+}
+
+struct TestCaseMsa2RF_U_F {
+ uint32_t ws1;
+ uint32_t ws2;
+ uint32_t ws3;
+ uint32_t ws4;
+ float exp_res_1;
+ float exp_res_2;
+ float exp_res_3;
+ float exp_res_4;
+};
+
+struct TestCaseMsa2RF_U_D {
+ uint64_t ws1;
+ uint64_t ws2;
+ double exp_res_1;
+ double exp_res_2;
+};
+
+TEST(MSA_ffint_u) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ struct TestCaseMsa2RF_U_F tc_s[] = {
+ {0, 345, 234, 1000, 0.f, 345.f, 234.f, 1000.f}};
+
+ struct TestCaseMsa2RF_U_D tc_d[] = {{0, 345, 0., 345.},
+ {234, 1000, 234., 1000.}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ ffint_u_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_U_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ ffint_u_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+struct TestCaseMsa2RF_I_F {
+ int32_t ws1;
+ int32_t ws2;
+ int32_t ws3;
+ int32_t ws4;
+ float exp_res_1;
+ float exp_res_2;
+ float exp_res_3;
+ float exp_res_4;
+};
+
+struct TestCaseMsa2RF_I_D {
+ int64_t ws1;
+ int64_t ws2;
+ double exp_res_1;
+ double exp_res_2;
+};
+
+TEST(MSA_ffint_s) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ struct TestCaseMsa2RF_I_F tc_s[] = {
+ {0, 345, -234, 1000, 0.f, 345.f, -234.f, 1000.f}};
+
+ struct TestCaseMsa2RF_I_D tc_d[] = {{0, 345, 0., 345.},
+ {-234, 1000, -234., 1000.}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_I_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ ffint_s_w(w2, w0); },
+ load_uint32_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_I_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ ffint_s_d(w2, w0); },
+ load_uint64_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+struct TestCaseMsa2RF_U16_F {
+ uint16_t ws1;
+ uint16_t ws2;
+ uint16_t ws3;
+ uint16_t ws4;
+ uint16_t ws5;
+ uint16_t ws6;
+ uint16_t ws7;
+ uint16_t ws8;
+ float exp_res_1;
+ float exp_res_2;
+ float exp_res_3;
+ float exp_res_4;
+};
+
+struct TestCaseMsa2RF_F_D {
+ float ws1;
+ float ws2;
+ float ws3;
+ float ws4;
+ double exp_res_1;
+ double exp_res_2;
+};
+
+TEST(MSA_fexupl) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ struct TestCaseMsa2RF_U16_F tc_s[] = {
+ {1, 2, 0x7c00, 0x0c00, 0, 0x7c00, 0xfc00, 0x8000, 0.f, inf_float,
+ -inf_float, -0.f},
+ {0xfc00, 0xffff, 0x00ff, 0x8000, 0x81fe, 0x8000, 0x0345, 0xaaaa,
+ -3.0398368835e-5f, -0.f, 4.9889088e-5f, -5.2062988281e-2f},
+ {3, 4, 0x5555, 6, 0x2aaa, 0x8700, 0x7777, 0x6a8b, 5.2062988281e-2f,
+ -1.06811523458e-4f, 3.0576e4f, 3.35e3f}};
+
+ struct TestCaseMsa2RF_F_D tc_d[] = {
+ {0.f, 123.456f, inf_float, -0.f, inf_double, -0.},
+ {-inf_float, -3.f, 0.f, -inf_float, 0., -inf_double},
+ {2.3f, 3., 1.37747639043129518071e-41f, -3.22084585277826e35f,
+ 1.37747639043129518071e-41, -3.22084585277826e35}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ fexupl_w(w2, w0); },
+ load_uint16_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_F_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ fexupl_d(w2, w0); },
+ load_uint32_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+TEST(MSA_fexupr) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ const float inf_float = std::numeric_limits<float>::infinity();
+ const double inf_double = std::numeric_limits<double>::infinity();
+
+ struct TestCaseMsa2RF_U16_F tc_s[] = {
+ {0, 0x7c00, 0xfc00, 0x8000, 1, 2, 0x7c00, 0x0c00, 0.f, inf_float,
+ -inf_float, -0.f},
+ {0x81fe, 0x8000, 0x0345, 0xaaaa, 0xfc00, 0xffff, 0x00ff, 0x8000,
+ -3.0398368835e-5f, -0.f, 4.9889088e-5f, -5.2062988281e-2f},
+ {0x2aaa, 0x8700, 0x7777, 0x6a8b, 3, 4, 0x5555, 6, 5.2062988281e-2f,
+ -1.06811523458e-4f, 3.0576e4f, 3.35e3f}};
+
+ struct TestCaseMsa2RF_F_D tc_d[] = {
+ {inf_float, -0.f, 0.f, 123.456f, inf_double, -0.},
+ {0.f, -inf_float, -inf_float, -3.f, 0., -inf_double},
+ {1.37747639043129518071e-41f, -3.22084585277826e35f, 2.3f, 3.,
+ 1.37747639043129518071e-41, -3.22084585277826e35}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ fexupr_w(w2, w0); },
+ load_uint16_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_F_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ fexupr_d(w2, w0); },
+ load_uint32_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+struct TestCaseMsa2RF_U32_D {
+ uint32_t ws1;
+ uint32_t ws2;
+ uint32_t ws3;
+ uint32_t ws4;
+ double exp_res_1;
+ double exp_res_2;
+};
+
+TEST(MSA_ffql) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ struct TestCaseMsa2RF_U16_F tc_s[] = {{0, 3, 0xffff, 0x8000, 0x8000, 0xe000,
+ 0x0FF0, 0, -1.f, -0.25f,
+ 0.12451171875f, 0.f}};
+
+ struct TestCaseMsa2RF_U32_D tc_d[] = {
+ {0, 45, 0x80000000, 0xe0000000, -1., -0.25},
+ {0x28379, 0xaaaa5555, 0x024903d3, 0, 17.853239085525274277e-3, 0.}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ ffql_w(w2, w0); },
+ load_uint16_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_U32_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ ffql_d(w2, w0); },
+ load_uint32_elements_of_vector, store_uint64_elements_of_vector);
+ }
+}
+
+TEST(MSA_ffqr) {
+ if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
+ return;
+
+ CcTest::InitializeVM();
+
+ struct TestCaseMsa2RF_U16_F tc_s[] = {{0x8000, 0xe000, 0x0FF0, 0, 0, 3,
+ 0xffff, 0x8000, -1.f, -0.25f,
+ 0.12451171875f, 0.f}};
+
+ struct TestCaseMsa2RF_U32_D tc_d[] = {
+ {0x80000000, 0xe0000000, 0, 45, -1., -0.25},
+ {0x024903d3, 0, 0x28379, 0xaaaa5555, 17.853239085525274277e-3, 0.}};
+
+ for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_s[i]),
+ [](MacroAssembler& assm) { __ ffqr_w(w2, w0); },
+ load_uint16_elements_of_vector, store_uint32_elements_of_vector);
+ }
+ for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_U32_D); ++i) {
+ run_msa_2r(reinterpret_cast<const TestCaseMsa2R*>(&tc_d[i]),
+ [](MacroAssembler& assm) { __ ffqr_d(w2, w0); },
+ load_uint32_elements_of_vector, store_uint64_elements_of_vector);
}
}
@@ -7776,35 +8751,21 @@ void run_msa_vector(struct TestCaseMsaVector* input,
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
-#define LOAD_W_REG(lo, hi, w_reg) \
- __ li(t0, lo); \
- __ li(t1, hi); \
- __ insert_d(w_reg, 0, t0); \
- __ insert_d(w_reg, 1, t1)
-
- LOAD_W_REG(input->ws_lo, input->ws_hi, w0);
- LOAD_W_REG(input->wt_lo, input->wt_hi, w2);
- LOAD_W_REG(input->wd_lo, input->wd_hi, w4);
-#undef LOAD_W_REG
+ load_uint64_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
+ load_uint64_elements_of_vector(assm, &(input->wt_lo), w2, t0, t1);
+ load_uint64_elements_of_vector(assm, &(input->wd_lo), w4, t0, t1);
GenerateVectorInstructionFunc(assm);
- __ copy_u_w(t2, w4, 0);
- __ sw(t2, MemOperand(a0, 0));
- __ copy_u_w(t2, w4, 1);
- __ sw(t2, MemOperand(a0, 4));
- __ copy_u_w(t2, w4, 2);
- __ sw(t2, MemOperand(a0, 8));
- __ copy_u_w(t2, w4, 3);
- __ sw(t2, MemOperand(a0, 12));
+ store_uint64_elements_of_vector(assm, w4, a0);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7879,34 +8840,20 @@ void run_msa_bit(struct TestCaseMsaBit* input, InstFunc GenerateInstructionFunc,
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
-#define LOAD_W_REG(lo, hi, w_reg) \
- __ li(t0, lo); \
- __ li(t1, hi); \
- __ insert_d(w_reg, 0, t0); \
- __ insert_d(w_reg, 1, t1)
-
- LOAD_W_REG(input->ws_lo, input->ws_hi, w0);
- LOAD_W_REG(input->wd_lo, input->wd_hi, w2);
-#undef LOAD_W_REG
+ load_uint64_elements_of_vector(assm, &(input->ws_lo), w0, t0, t1);
+ load_uint64_elements_of_vector(assm, &(input->wd_lo), w2, t0, t1);
GenerateInstructionFunc(assm, input->m);
- __ copy_u_w(t2, w2, 0);
- __ sw(t2, MemOperand(a0, 0));
- __ copy_u_w(t2, w2, 1);
- __ sw(t2, MemOperand(a0, 4));
- __ copy_u_w(t2, w2, 2);
- __ sw(t2, MemOperand(a0, 8));
- __ copy_u_w(t2, w2, 3);
- __ sw(t2, MemOperand(a0, 12));
+ store_uint64_elements_of_vector(assm, w2, a0);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -8371,22 +9318,15 @@ void run_msa_i10(int32_t input, InstFunc GenerateVectorInstructionFunc,
GenerateVectorInstructionFunc(assm, input);
- __ copy_u_w(t2, w0, 0);
- __ sw(t2, MemOperand(a0, 0));
- __ copy_u_w(t2, w0, 1);
- __ sw(t2, MemOperand(a0, 4));
- __ copy_u_w(t2, w0, 2);
- __ sw(t2, MemOperand(a0, 8));
- __ copy_u_w(t2, w0, 3);
- __ sw(t2, MemOperand(a0, 12));
+ store_uint64_elements_of_vector(assm, w0, a0);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -8463,8 +9403,8 @@ void run_msa_mi10(InstFunc GenerateVectorInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -8509,4 +9449,782 @@ TEST(MSA_load_store_vector) {
#undef LDI_DF
}
+struct TestCaseMsa3R {
+ uint64_t ws_lo;
+ uint64_t ws_hi;
+ uint64_t wt_lo;
+ uint64_t wt_hi;
+ uint64_t wd_lo;
+ uint64_t wd_hi;
+};
+
+static const uint64_t Unpredictable = 0x312014017725ll;
+
+template <typename InstFunc, typename OperFunc>
+void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
+ OperFunc GenerateOperationFunc) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ CpuFeatureScope fscope(&assm, MIPS_SIMD);
+ msa_reg_t res;
+ uint64_t expected;
+
+ load_uint64_elements_of_vector(assm, &(input->wt_lo), w0, t0, t1);
+ load_uint64_elements_of_vector(assm, &(input->ws_lo), w1, t0, t1);
+ load_uint64_elements_of_vector(assm, &(input->wd_lo), w2, t0, t1);
+
+ GenerateI5InstructionFunc(assm);
+
+ store_uint64_elements_of_vector(assm, w2, a0);
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ (CALL_GENERATED_CODE(isolate, f, &res, 0, 0, 0, 0));
+
+ expected = GenerateOperationFunc(input->ws_lo, input->wt_lo, input->wd_lo);
+ if (expected != Unpredictable) {
+ CHECK_EQ(expected, res.d[0]);
+ }
+
+ expected = GenerateOperationFunc(input->ws_hi, input->wt_hi, input->wd_hi);
+ if (expected != Unpredictable) {
+ CHECK_EQ(expected, res.d[1]);
+ }
+}
+
+TEST(MSA_3R_instructions) {
+ if (kArchVariant == kMips64r6 || !CpuFeatures::IsSupported(MIPS_SIMD)) return;
+
+ CcTest::InitializeVM();
+
+ struct TestCaseMsa3R tc[] = {
+ {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x1169751bb9a7d9c3,
+ 0xf7a594aec8ef8a9c, 0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c},
+ {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x2b665362c4e812df,
+ 0x3a0d80d68b3f8bc8, 0x2b665362c4e812df, 0x3a0d80d68b3f8bc8},
+ {0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c, 0x1169751bb9a7d9c3,
+ 0xf7a594aec8ef8a9c, 0x1169751bb9a7d9c3, 0xf7a594aec8ef8a9c},
+ {0x2b665362c4e812df, 0x3a0d80d68b3f8bc8, 0x2b665362c4e812df,
+ 0x3a0d80d68b3f8bc8, 0x2b665362c4e812df, 0x3a0d80d68b3f8bc8},
+ {0xffab807f807fffcd, 0x7f23ff80ff567f80, 0xffab807f807fffcd,
+ 0x7f23ff80ff567f80, 0xffab807f807fffcd, 0x7f23ff80ff567f80},
+ {0x80ffefff7f12807f, 0x807f80ff7fdeff78, 0x80ffefff7f12807f,
+ 0x807f80ff7fdeff78, 0x80ffefff7f12807f, 0x807f80ff7fdeff78},
+ {0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff,
+ 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff},
+ {0x0000000000000000, 0xffffffffffffffff, 0xffffffffffffffff,
+ 0x0000000000000000, 0x0000000000000000, 0xffffffffffffffff},
+ {0xffff0000ffff0000, 0xffff0000ffff0000, 0xffff0000ffff0000,
+ 0xffff0000ffff0000, 0xffff0000ffff0000, 0xffff0000ffff0000},
+ {0xff00ff00ff00ff00, 0xff00ff00ff00ff00, 0xff00ff00ff00ff00,
+ 0xff00ff00ff00ff00, 0xff00ff00ff00ff00, 0xff00ff00ff00ff00},
+ {0xf0f0f0f0f0f0f0f0, 0xf0f0f0f0f0f0f0f0, 0xf0f0f0f0f0f0f0f0,
+ 0xf0f0f0f0f0f0f0f0, 0xf0f0f0f0f0f0f0f0, 0xf0f0f0f0f0f0f0f0},
+ {0xff0000ffff0000ff, 0xff0000ffff0000ff, 0xff0000ffff0000ff,
+ 0xff0000ffff0000ff, 0xff0000ffff0000ff, 0xff0000ffff0000ff},
+ {0xffff00000000ffff, 0xffff00000000ffff, 0xffff00000000ffff,
+ 0xffff00000000ffff, 0xffff00000000ffff, 0xffff00000000ffff}};
+
+#define SLL_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T src_op = static_cast<T>((ws >> shift) & mask); \
+ T shift_op = static_cast<T>((wt >> shift) & mask) % size_in_bits; \
+ res |= (static_cast<uint64_t>(src_op << shift_op) & mask) << shift; \
+ } \
+ return res
+
+#define SRA_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T src_op = static_cast<T>((ws >> shift) & mask); \
+ int shift_op = ((wt >> shift) & mask) % size_in_bits; \
+ res |= \
+ (static_cast<uint64_t>(ArithmeticShiftRight(src_op, shift_op) & mask)) \
+ << shift; \
+ } \
+ return res
+
+#define SRL_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T src_op = static_cast<T>((ws >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
+ res |= (static_cast<uint64_t>(src_op >> shift_op) & mask) << shift; \
+ } \
+ return res
+
+#define BCRL_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T src_op = static_cast<T>((ws >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
+ T r = (static_cast<T>(~(1ull << shift_op)) & src_op) & mask; \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ return res
+
+#define BSET_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T src_op = static_cast<T>((ws >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
+ T r = (static_cast<T>(1ull << shift_op) | src_op) & mask; \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ return res
+
+#define BNEG_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T src_op = static_cast<T>((ws >> shift) & mask); \
+ T shift_op = static_cast<T>(((wt >> shift) & mask) % size_in_bits); \
+ T r = (static_cast<T>(1ull << shift_op) ^ src_op) & mask; \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ return res
+
+#define BINSL_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wd_op = static_cast<T>((wd >> shift) & mask); \
+ int shift_op = static_cast<int>(((wt >> shift) & mask) % size_in_bits); \
+ int bits = shift_op + 1; \
+ T r; \
+ if (bits == size_in_bits) { \
+ r = static_cast<T>(ws_op); \
+ } else { \
+ uint64_t mask2 = ((1ull << bits) - 1) << (size_in_bits - bits); \
+ r = static_cast<T>((static_cast<T>(mask2) & ws_op) | \
+ (static_cast<T>(~mask2) & wd_op)); \
+ } \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ return res
+
+#define BINSR_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wd_op = static_cast<T>((wd >> shift) & mask); \
+ int shift_op = static_cast<int>(((wt >> shift) & mask) % size_in_bits); \
+ int bits = shift_op + 1; \
+ T r; \
+ if (bits == size_in_bits) { \
+ r = static_cast<T>(ws_op); \
+ } else { \
+ uint64_t mask2 = (1ull << bits) - 1; \
+ r = static_cast<T>((static_cast<T>(mask2) & ws_op) | \
+ (static_cast<T>(~mask2) & wd_op)); \
+ } \
+ res |= static_cast<uint64_t>(r) << shift; \
+ } \
+ return res
+
+#define ADDV_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(ws_op + wt_op) & mask) << shift; \
+ } \
+ return res
+
+#define SUBV_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(ws_op - wt_op) & mask) << shift; \
+ } \
+ return res
+
+#define MAX_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Max<T>(ws_op, wt_op)) & mask) << shift; \
+ } \
+ return res
+
+#define MIN_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Min<T>(ws_op, wt_op)) & mask) << shift; \
+ } \
+ return res
+
+#define MAXA_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Nabs(ws_op) < Nabs(wt_op) ? ws_op : wt_op) & \
+ mask) \
+ << shift; \
+ } \
+ return res
+
+#define MINA_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Nabs(ws_op) > Nabs(wt_op) ? ws_op : wt_op) & \
+ mask) \
+ << shift; \
+ } \
+ return res
+
+#define CEQ_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= \
+ (static_cast<uint64_t>(!Compare(ws_op, wt_op) ? -1ull : 0ull) & mask) \
+ << shift; \
+ } \
+ return res
+
+#define CLT_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= \
+ (static_cast<uint64_t>((Compare(ws_op, wt_op) == -1) ? -1ull : 0ull) & \
+ mask) \
+ << shift; \
+ } \
+ return res
+
+#define CLE_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= \
+ (static_cast<uint64_t>((Compare(ws_op, wt_op) != 1) ? -1ull : 0ull) & \
+ mask) \
+ << shift; \
+ } \
+ return res
+
+#define ADD_A_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Abs(ws_op) + Abs(wt_op)) & mask) << shift; \
+ } \
+ return res
+
+#define ADDS_A_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = Nabs(static_cast<T>((ws >> shift) & mask)); \
+ T wt_op = Nabs(static_cast<T>((wt >> shift) & mask)); \
+ T r; \
+ if (ws_op < -std::numeric_limits<T>::max() - wt_op) { \
+ r = std::numeric_limits<T>::max(); \
+ } else { \
+ r = -(ws_op + wt_op); \
+ } \
+ res |= (static_cast<uint64_t>(r) & mask) << shift; \
+ } \
+ return res
+
+#define ADDS_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(SaturateAdd(ws_op, wt_op)) & mask) << shift; \
+ } \
+ return res
+
+#define AVE_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(((wt_op & ws_op) + ((ws_op ^ wt_op) >> 1)) & \
+ mask)) \
+ << shift; \
+ } \
+ return res
+
+#define AVER_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(((wt_op | ws_op) - ((ws_op ^ wt_op) >> 1)) & \
+ mask)) \
+ << shift; \
+ } \
+ return res
+
+#define SUBS_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(SaturateSub(ws_op, wt_op)) & mask) << shift; \
+ } \
+ return res
+
+#define SUBSUS_U_DF(T, lanes, mask) \
+ typedef typename std::make_unsigned<T>::type uT; \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ uT ws_op = static_cast<uT>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ T r; \
+ if (wt_op > 0) { \
+ uT wtu = static_cast<uT>(wt_op); \
+ if (wtu > ws_op) { \
+ r = 0; \
+ } else { \
+ r = static_cast<T>(ws_op - wtu); \
+ } \
+ } else { \
+ if (ws_op > std::numeric_limits<uT>::max() + wt_op) { \
+ r = static_cast<T>(std::numeric_limits<uT>::max()); \
+ } else { \
+ r = static_cast<T>(ws_op - wt_op); \
+ } \
+ } \
+ res |= (static_cast<uint64_t>(r) & mask) << shift; \
+ } \
+ return res
+
+#define SUBSUU_S_DF(T, lanes, mask) \
+ typedef typename std::make_unsigned<T>::type uT; \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ uT ws_op = static_cast<uT>((ws >> shift) & mask); \
+ uT wt_op = static_cast<uT>((wt >> shift) & mask); \
+ uT wdu; \
+ T r; \
+ if (ws_op > wt_op) { \
+ wdu = ws_op - wt_op; \
+ if (wdu > std::numeric_limits<T>::max()) { \
+ r = std::numeric_limits<T>::max(); \
+ } else { \
+ r = static_cast<T>(wdu); \
+ } \
+ } else { \
+ wdu = wt_op - ws_op; \
+ CHECK(-std::numeric_limits<T>::max() == \
+ std::numeric_limits<T>::min() + 1); \
+ if (wdu <= std::numeric_limits<T>::max()) { \
+ r = -static_cast<T>(wdu); \
+ } else { \
+ r = std::numeric_limits<T>::min(); \
+ } \
+ } \
+ res |= (static_cast<uint64_t>(r) & mask) << shift; \
+ } \
+ return res
+
+#define ASUB_S_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(Abs(ws_op - wt_op)) & mask) << shift; \
+ } \
+ return res
+
+#define ASUB_U_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(ws_op > wt_op ? ws_op - wt_op \
+ : wt_op - ws_op) & \
+ mask) \
+ << shift; \
+ } \
+ return res
+
+#define MULV_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ res |= (static_cast<uint64_t>(ws_op * wt_op) & mask) << shift; \
+ } \
+ return res
+
+#define MADDV_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ T wd_op = static_cast<T>((wd >> shift) & mask); \
+ res |= (static_cast<uint64_t>(wd_op + ws_op * wt_op) & mask) << shift; \
+ } \
+ return res
+
+#define MSUBV_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ T wd_op = static_cast<T>((wd >> shift) & mask); \
+ res |= (static_cast<uint64_t>(wd_op - ws_op * wt_op) & mask) << shift; \
+ } \
+ return res
+
+#define DIV_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ if (wt_op == 0) { \
+ res = Unpredictable; \
+ break; \
+ } \
+ res |= (static_cast<uint64_t>(ws_op / wt_op) & mask) << shift; \
+ } \
+ return res
+
+#define MOD_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T ws_op = static_cast<T>((ws >> shift) & mask); \
+ T wt_op = static_cast<T>((wt >> shift) & mask); \
+ if (wt_op == 0) { \
+ res = Unpredictable; \
+ break; \
+ } \
+ res |= (static_cast<uint64_t>(wt_op != 0 ? ws_op % wt_op : 0) & mask) \
+ << shift; \
+ } \
+ return res
+
+#define SRAR_DF(T, lanes, mask) \
+ uint64_t res = 0; \
+ int size_in_bits = kMSARegSize / lanes; \
+ for (int i = 0; i < lanes / 2; ++i) { \
+ uint64_t shift = size_in_bits * i; \
+ T src_op = static_cast<T>((ws >> shift) & mask); \
+ int shift_op = ((wt >> shift) & mask) % size_in_bits; \
+ uint32_t bit = shift_op == 0 ? 0 : src_op >> (shift_op - 1) & 1; \
+ res |= \
+ (static_cast<uint64_t>(ArithmeticShiftRight(src_op, shift_op) + bit) & \
+ mask) \
+ << shift; \
+ } \
+ return res
+
+#define TEST_CASE(V) \
+ V(sll_b, SLL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(sll_h, SLL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(sll_w, SLL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(sll_d, SLL_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(sra_b, SRA_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(sra_h, SRA_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(sra_w, SRA_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(sra_d, SRA_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(srl_b, SRL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(srl_h, SRL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(srl_w, SRL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(srl_d, SRL_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(bclr_b, BCRL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(bclr_h, BCRL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(bclr_w, BCRL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(bclr_d, BCRL_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(bset_b, BSET_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(bset_h, BSET_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(bset_w, BSET_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(bset_d, BSET_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(bneg_b, BNEG_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(bneg_h, BNEG_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(bneg_w, BNEG_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(bneg_d, BNEG_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(binsl_b, BINSL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(binsl_h, BINSL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(binsl_w, BINSL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(binsl_d, BINSL_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(binsr_b, BINSR_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(binsr_h, BINSR_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(binsr_w, BINSR_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(binsr_d, BINSR_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(addv_b, ADDV_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(addv_h, ADDV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(addv_w, ADDV_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(addv_d, ADDV_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(subv_b, SUBV_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(subv_h, SUBV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(subv_w, SUBV_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(subv_d, SUBV_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(max_s_b, MAX_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(max_s_h, MAX_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(max_s_w, MAX_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(max_s_d, MAX_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(max_u_b, MAX_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(max_u_h, MAX_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(max_u_w, MAX_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(max_u_d, MAX_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(min_s_b, MIN_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(min_s_h, MIN_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(min_s_w, MIN_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(min_s_d, MIN_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(min_u_b, MIN_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(min_u_h, MIN_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(min_u_w, MIN_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(min_u_d, MIN_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(max_a_b, MAXA_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(max_a_h, MAXA_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(max_a_w, MAXA_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(max_a_d, MAXA_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(min_a_b, MINA_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(min_a_h, MINA_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(min_a_w, MINA_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(min_a_d, MINA_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(ceq_b, CEQ_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(ceq_h, CEQ_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(ceq_w, CEQ_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(ceq_d, CEQ_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(clt_s_b, CLT_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(clt_s_h, CLT_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(clt_s_w, CLT_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(clt_s_d, CLT_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(clt_u_b, CLT_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(clt_u_h, CLT_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(clt_u_w, CLT_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(clt_u_d, CLT_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(cle_s_b, CLE_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(cle_s_h, CLE_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(cle_s_w, CLE_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(cle_s_d, CLE_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(cle_u_b, CLE_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(cle_u_h, CLE_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(cle_u_w, CLE_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(cle_u_d, CLE_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(add_a_b, ADD_A_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(add_a_h, ADD_A_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(add_a_w, ADD_A_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(add_a_d, ADD_A_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(adds_a_b, ADDS_A_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(adds_a_h, ADDS_A_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(adds_a_w, ADDS_A_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(adds_a_d, ADDS_A_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(adds_s_b, ADDS_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(adds_s_h, ADDS_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(adds_s_w, ADDS_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(adds_s_d, ADDS_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(adds_u_b, ADDS_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(adds_u_h, ADDS_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(adds_u_w, ADDS_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(adds_u_d, ADDS_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(ave_s_b, AVE_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(ave_s_h, AVE_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(ave_s_w, AVE_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(ave_s_d, AVE_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(ave_u_b, AVE_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(ave_u_h, AVE_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(ave_u_w, AVE_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(ave_u_d, AVE_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(aver_s_b, AVER_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(aver_s_h, AVER_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(aver_s_w, AVER_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(aver_s_d, AVER_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(aver_u_b, AVER_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(aver_u_h, AVER_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(aver_u_w, AVER_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(aver_u_d, AVER_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(subs_s_b, SUBS_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(subs_s_h, SUBS_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(subs_s_w, SUBS_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(subs_s_d, SUBS_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(subs_u_b, SUBS_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(subs_u_h, SUBS_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(subs_u_w, SUBS_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(subs_u_d, SUBS_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(subsus_u_b, SUBSUS_U_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(subsus_u_h, SUBSUS_U_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(subsus_u_w, SUBSUS_U_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(subsus_u_d, SUBSUS_U_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(subsuu_s_b, SUBSUU_S_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(subsuu_s_h, SUBSUU_S_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(subsuu_s_w, SUBSUU_S_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(subsuu_s_d, SUBSUU_S_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(asub_s_b, ASUB_S_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(asub_s_h, ASUB_S_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(asub_s_w, ASUB_S_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(asub_s_d, ASUB_S_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(asub_u_b, ASUB_U_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(asub_u_h, ASUB_U_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(asub_u_w, ASUB_U_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(asub_u_d, ASUB_U_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(mulv_b, MULV_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(mulv_h, MULV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(mulv_w, MULV_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(mulv_d, MULV_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(maddv_b, MADDV_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(maddv_h, MADDV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(maddv_w, MADDV_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(maddv_d, MADDV_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(msubv_b, MSUBV_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(msubv_h, MSUBV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(msubv_w, MSUBV_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(msubv_d, MSUBV_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(div_s_b, DIV_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(div_s_h, DIV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(div_s_w, DIV_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(div_s_d, DIV_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(div_u_b, DIV_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(div_u_h, DIV_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(div_u_w, DIV_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(div_u_d, DIV_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(mod_s_b, MOD_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(mod_s_h, MOD_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(mod_s_w, MOD_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(mod_s_d, MOD_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(mod_u_b, MOD_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(mod_u_h, MOD_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(mod_u_w, MOD_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(mod_u_d, MOD_DF, uint64_t, kMSALanesDword, UINT64_MAX) \
+ V(srar_b, SRAR_DF, int8_t, kMSALanesByte, UINT8_MAX) \
+ V(srar_h, SRAR_DF, int16_t, kMSALanesHalf, UINT16_MAX) \
+ V(srar_w, SRAR_DF, int32_t, kMSALanesWord, UINT32_MAX) \
+ V(srar_d, SRAR_DF, int64_t, kMSALanesDword, UINT64_MAX) \
+ V(srlr_b, SRAR_DF, uint8_t, kMSALanesByte, UINT8_MAX) \
+ V(srlr_h, SRAR_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \
+ V(srlr_w, SRAR_DF, uint32_t, kMSALanesWord, UINT32_MAX) \
+ V(srlr_d, SRAR_DF, uint64_t, kMSALanesDword, UINT64_MAX)
+
+#define RUN_TEST(instr, verify, type, lanes, mask) \
+ run_msa_3r(&tc[i], [](MacroAssembler& assm) { __ instr(w2, w1, w0); }, \
+ [](uint64_t ws, uint64_t wt, uint64_t wd) { \
+ verify(type, lanes, mask); \
+ });
+
+ for (size_t i = 0; i < arraysize(tc); ++i) {
+ TEST_CASE(RUN_TEST)
+ }
+
+#undef RUN_TEST
+#undef SLL_DF
+#undef SRL_DF
+#undef BCRL_DF
+#undef BSET_DF
+#undef BNEG_DF
+#undef BINSL_DF
+#undef BINSR_DF
+#undef ADDV_DF
+#undef SUBV_DF
+#undef MAX_DF
+#undef MIN_DF
+#undef MAXA_DF
+#undef MINA_DF
+#undef CEQ_DF
+#undef CLT_DF
+#undef CLE_DF
+#undef ADD_A_DF
+#undef ADDS_A_DF
+#undef ADDS_DF
+#undef AVE_DF
+#undef AVER_DF
+#undef SUBS_DF
+#undef SUBSUS_U_DF
+#undef SUBSUU_S_DF
+#undef ASUB_S_DF
+#undef ASUB_U_DF
+#undef MULV_DF
+#undef MADDV_DF
+#undef MSUBV_DF
+#undef DIV_DF
+#undef MOD_DF
+#undef SRAR_DF
+}
+
#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-assembler-ppc.cc b/deps/v8/test/cctest/test-assembler-ppc.cc
index 06bbb8966a..9a11523605 100644
--- a/deps/v8/test/cctest/test-assembler-ppc.cc
+++ b/deps/v8/test/cctest/test-assembler-ppc.cc
@@ -33,8 +33,8 @@
#include "src/ppc/simulator-ppc.h"
#include "test/cctest/cctest.h"
-using namespace v8::internal;
-
+namespace v8 {
+namespace internal {
// Define these function prototypes to match JSEntryFunction in execution.cc.
typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
@@ -60,8 +60,8 @@ TEST(0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -99,8 +99,8 @@ TEST(1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -150,8 +150,8 @@ TEST(2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -223,8 +223,8 @@ TEST(3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -338,7 +338,7 @@ TEST(4) {
assm.GetCode(isolate, &desc);
Object* code = isolate->heap()->CreateCode(
desc,
- Code::ComputeFlags(Code::STUB),
+ Code::STUB,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@@ -399,7 +399,7 @@ TEST(5) {
assm.GetCode(isolate, &desc);
Object* code = isolate->heap()->CreateCode(
desc,
- Code::ComputeFlags(Code::STUB),
+ Code::STUB,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@@ -435,7 +435,7 @@ TEST(6) {
assm.GetCode(isolate, &desc);
Object* code = isolate->heap()->CreateCode(
desc,
- Code::ComputeFlags(Code::STUB),
+ Code::STUB,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@@ -511,7 +511,7 @@ static void TestRoundingMode(VCVTTypes types,
assm.GetCode(isolate, &desc);
Object* code = isolate->heap()->CreateCode(
desc,
- Code::ComputeFlags(Code::STUB),
+ Code::STUB,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@@ -699,7 +699,7 @@ TEST(8) {
assm.GetCode(isolate, &desc);
Object* code = isolate->heap()->CreateCode(
desc,
- Code::ComputeFlags(Code::STUB),
+ Code::STUB,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@@ -815,7 +815,7 @@ TEST(9) {
assm.GetCode(isolate, &desc);
Object* code = isolate->heap()->CreateCode(
desc,
- Code::ComputeFlags(Code::STUB),
+ Code::STUB,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@@ -927,7 +927,7 @@ TEST(10) {
assm.GetCode(isolate, &desc);
Object* code = isolate->heap()->CreateCode(
desc,
- Code::ComputeFlags(Code::STUB),
+ Code::STUB,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@@ -1025,7 +1025,7 @@ TEST(11) {
assm.GetCode(isolate, &desc);
Object* code = isolate->heap()->CreateCode(
desc,
- Code::ComputeFlags(Code::STUB),
+ Code::STUB,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@@ -1058,3 +1058,6 @@ TEST(12) {
#endif
#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-assembler-s390.cc b/deps/v8/test/cctest/test-assembler-s390.cc
index 5cc6856483..da2727402e 100644
--- a/deps/v8/test/cctest/test-assembler-s390.cc
+++ b/deps/v8/test/cctest/test-assembler-s390.cc
@@ -34,7 +34,8 @@
#include "src/s390/simulator-s390.h"
#include "test/cctest/cctest.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
// Define these function prototypes to match JSEntryFunction in execution.cc.
typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
@@ -60,8 +61,8 @@ TEST(0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -100,8 +101,8 @@ TEST(1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -152,8 +153,8 @@ TEST(2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -208,8 +209,8 @@ TEST(3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -250,7 +251,7 @@ TEST(4) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -278,7 +279,7 @@ TEST(5) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -312,7 +313,7 @@ TEST(6) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -344,7 +345,7 @@ TEST(7) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -375,7 +376,7 @@ TEST(8) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -402,7 +403,7 @@ TEST(9) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -486,8 +487,8 @@ TEST(10) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -499,3 +500,6 @@ TEST(10) {
}
#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index f57a1c113c..8e7af93b2e 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -186,7 +186,7 @@ TEST(Regression684407) {
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
Address before = assm.pc();
__ cmpl(Operand(arg1, 0),
- Immediate(0, RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+ Immediate(0, RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE));
Address after = assm.pc();
size_t instruction_size = static_cast<size_t>(after - before);
// Check that the immediate is not encoded as uint8.
@@ -789,8 +789,8 @@ TEST(AssemblerMultiByteNop) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F0 f = FUNCTION_CAST<F0>(code->entry());
int res = f();
@@ -845,8 +845,8 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F0 f = FUNCTION_CAST<F0>(code->entry());
int res = f();
@@ -908,8 +908,8 @@ TEST(AssemblerX64Extractps) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -946,10 +946,8 @@ TEST(AssemblerX64SSE) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -1174,8 +1172,8 @@ TEST(AssemblerX64FMA_sd) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -1400,8 +1398,8 @@ TEST(AssemblerX64FMA_ss) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -1475,8 +1473,8 @@ TEST(AssemblerX64SSE_ss) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -1560,8 +1558,8 @@ TEST(AssemblerX64AVX_ss) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -1799,8 +1797,8 @@ TEST(AssemblerX64AVX_sd) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -1991,8 +1989,8 @@ TEST(AssemblerX64BMI1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -2051,8 +2049,8 @@ TEST(AssemblerX64LZCNT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -2111,8 +2109,8 @@ TEST(AssemblerX64POPCNT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -2374,8 +2372,8 @@ TEST(AssemblerX64BMI2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
@@ -2419,8 +2417,8 @@ TEST(AssemblerX64JumpTables1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2468,8 +2466,8 @@ TEST(AssemblerX64JumpTables2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2527,8 +2525,8 @@ TEST(AssemblerX64vmovups) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
OFStream os(stdout);
code->Print(os);
diff --git a/deps/v8/test/cctest/test-atomicops.cc b/deps/v8/test/cctest/test-atomicops.cc
index 07445cb2d1..add819f771 100644
--- a/deps/v8/test/cctest/test-atomicops.cc
+++ b/deps/v8/test/cctest/test-atomicops.cc
@@ -30,9 +30,8 @@
#include "src/base/atomicops.h"
#include "test/cctest/cctest.h"
-using namespace v8::base;
-using namespace v8::internal;
-
+namespace v8 {
+namespace base {
#define CHECK_EQU(v1, v2) \
CHECK_EQ(static_cast<int64_t>(v1), static_cast<int64_t>(v2))
@@ -287,3 +286,6 @@ TEST(Load) {
TestLoad<Atomic32>();
TestLoad<AtomicWord>();
}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-bignum-dtoa.cc b/deps/v8/test/cctest/test-bignum-dtoa.cc
index df84dcbf9b..42562958e6 100644
--- a/deps/v8/test/cctest/test-bignum-dtoa.cc
+++ b/deps/v8/test/cctest/test-bignum-dtoa.cc
@@ -40,6 +40,7 @@
namespace v8 {
namespace internal {
+namespace test_bignum_dtoa {
// Removes trailing '0' digits.
// Can return the empty string if all digits are 0.
@@ -314,5 +315,6 @@ TEST(BignumDtoaGayPrecision) {
}
}
+} // namespace test_bignum_dtoa
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-bignum.cc b/deps/v8/test/cctest/test-bignum.cc
index 8cd74eeb30..d9721b06e1 100644
--- a/deps/v8/test/cctest/test-bignum.cc
+++ b/deps/v8/test/cctest/test-bignum.cc
@@ -35,6 +35,7 @@
namespace v8 {
namespace internal {
+namespace test_bignum {
static const int kBufferSize = 1024;
@@ -1541,5 +1542,6 @@ TEST(AssignPowerUInt16) {
buffer));
}
+} // namespace test_bignum
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-cache.cc b/deps/v8/test/cctest/test-code-cache.cc
deleted file mode 100644
index 51052bd0c6..0000000000
--- a/deps/v8/test/cctest/test-code-cache.cc
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/factory.h -> src/objects-inl.h
-#include "src/objects-inl.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/feedback-vector.h ->
-// src/feedback-vector-inl.h
-#include "src/feedback-vector-inl.h"
-#include "test/cctest/cctest.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-static Handle<Code> GetDummyCode(Isolate* isolate) {
- CodeDesc desc = {nullptr, // buffer
- 0, // buffer_size
- 0, // instr_size
- 0, // reloc_size
- 0, // constant_pool_size
- nullptr, // unwinding_info
- 0, // unwinding_info_size
- nullptr}; // origin
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, kNoExtraICState);
- Handle<Code> self_ref;
- return isolate->factory()->NewCode(desc, flags, self_ref);
-}
-
-} // namespace
-
-TEST(CodeCache) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
- HandleScope handle_scope(isolate);
-
- Handle<Map> map =
- factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize, PACKED_ELEMENTS);
-
- // This number should be large enough to cause the code cache to use its
- // hash table storage format.
- static const int kEntries = 150;
-
- // Prepare name/code pairs.
- std::vector<Handle<Name>> names;
- std::vector<Handle<Code>> codes;
- names.reserve(kEntries);
- codes.reserve(kEntries);
- for (int i = 0; i < kEntries; i++) {
- names.push_back(isolate->factory()->NewSymbol());
- codes.push_back(GetDummyCode(isolate));
- }
- Handle<Name> bad_name = isolate->factory()->NewSymbol();
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, kNoExtraICState);
-
- // Cache name/code pairs.
- for (int i = 0; i < kEntries; i++) {
- Handle<Name> name = names.at(i);
- Handle<Code> code = codes.at(i);
- Map::UpdateCodeCache(map, name, code);
- CHECK_EQ(*code, map->LookupInCodeCache(*name, code->flags()));
- }
- CHECK_NULL(map->LookupInCodeCache(*bad_name, flags));
-
- // Check that lookup works not only right after storing.
- for (int i = 0; i < kEntries; i++) {
- Handle<Name> name = names.at(i);
- Handle<Code> code = codes.at(i);
- CHECK_EQ(*code, map->LookupInCodeCache(*name, code->flags()));
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-layout.cc b/deps/v8/test/cctest/test-code-layout.cc
index e4ee251465..caeeaf3283 100644
--- a/deps/v8/test/cctest/test-code-layout.cc
+++ b/deps/v8/test/cctest/test-code-layout.cc
@@ -4,14 +4,7 @@
#include "src/factory.h"
#include "src/isolate.h"
-#include "src/objects.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/factory.h -> src/objects-inl.h
#include "src/objects-inl.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/feedback-vector.h ->
-// src/feedback-vector-inl.h
-#include "src/feedback-vector-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -39,7 +32,7 @@ TEST(CodeLayoutWithoutUnwindingInfo) {
code_desc.unwinding_info_size = 0;
Handle<Code> code = CcTest::i_isolate()->factory()->NewCode(
- code_desc, 0, Handle<Object>::null());
+ code_desc, Code::STUB, Handle<Object>::null());
CHECK(!code->has_unwinding_info());
CHECK_EQ(code->instruction_size(), buffer_size);
@@ -76,7 +69,7 @@ TEST(CodeLayoutWithUnwindingInfo) {
code_desc.unwinding_info_size = unwinding_info_size;
Handle<Code> code = CcTest::i_isolate()->factory()->NewCode(
- code_desc, 0, Handle<Object>::null());
+ code_desc, Code::STUB, Handle<Object>::null());
CHECK(code->has_unwinding_info());
CHECK_EQ(code->instruction_size(), buffer_size);
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index e34b232244..1c48225a14 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -7,6 +7,8 @@
#include "src/api.h"
#include "src/base/utils/random-number-generator.h"
#include "src/builtins/builtins-promise-gen.h"
+#include "src/builtins/builtins-string-gen.h"
+#include "src/char-predicates.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/compiler/node.h"
@@ -78,7 +80,8 @@ TEST(CallCFunction3WithCallerSavedRegisters) {
Node* const result = m.CallCFunction3WithCallerSavedRegisters(
type_intptr, type_intptr, type_intptr, type_intptr, fun_constant,
- m.IntPtrConstant(0), m.IntPtrConstant(1), m.IntPtrConstant(2));
+ m.IntPtrConstant(0), m.IntPtrConstant(1), m.IntPtrConstant(2),
+ kSaveFPRegs);
m.Return(m.SmiTag(result));
}
@@ -2263,7 +2266,6 @@ TEST(AllocateFunctionWithMapAndContext) {
CHECK_EQ(isolate->heap()->the_hole_value(), fun->prototype_or_initial_map());
CHECK_EQ(*isolate->promise_resolve_shared_fun(), fun->shared());
CHECK_EQ(isolate->promise_resolve_shared_fun()->code(), fun->code());
- CHECK_EQ(isolate->heap()->undefined_value(), fun->next_function_link());
}
TEST(CreatePromiseGetCapabilitiesExecutorContext) {
@@ -2276,14 +2278,14 @@ TEST(CreatePromiseGetCapabilitiesExecutorContext) {
Node* const context = m.Parameter(kNumParams + 2);
Node* const native_context = m.LoadNativeContext(context);
- Node* const map = m.LoadRoot(Heap::kJSPromiseCapabilityMapRootIndex);
- Node* const capability = m.AllocateJSObjectFromMap(map);
+ Node* const map = m.LoadRoot(Heap::kPromiseCapabilityMapRootIndex);
+ Node* const capability = m.AllocateStruct(map);
m.StoreObjectFieldNoWriteBarrier(
- capability, JSPromiseCapability::kPromiseOffset, m.UndefinedConstant());
+ capability, PromiseCapability::kPromiseOffset, m.UndefinedConstant());
m.StoreObjectFieldNoWriteBarrier(
- capability, JSPromiseCapability::kResolveOffset, m.UndefinedConstant());
- m.StoreObjectFieldNoWriteBarrier(
- capability, JSPromiseCapability::kRejectOffset, m.UndefinedConstant());
+ capability, PromiseCapability::kResolveOffset, m.UndefinedConstant());
+ m.StoreObjectFieldNoWriteBarrier(capability, PromiseCapability::kRejectOffset,
+ m.UndefinedConstant());
Node* const executor_context =
m.CreatePromiseGetCapabilitiesExecutorContext(capability, native_context);
m.Return(executor_context);
@@ -2299,7 +2301,7 @@ TEST(CreatePromiseGetCapabilitiesExecutorContext) {
CHECK_EQ(isolate->heap()->the_hole_value(), context_js->extension());
CHECK_EQ(*isolate->native_context(), context_js->native_context());
CHECK(context_js->get(PromiseBuiltinsAssembler::kCapabilitySlot)
- ->IsJSPromiseCapability());
+ ->IsPromiseCapability());
}
TEST(NewPromiseCapability) {
@@ -2323,9 +2325,9 @@ TEST(NewPromiseCapability) {
Handle<Object> result_obj =
ft.Call(isolate->factory()->undefined_value()).ToHandleChecked();
- CHECK(result_obj->IsJSPromiseCapability());
- Handle<JSPromiseCapability> result =
- Handle<JSPromiseCapability>::cast(result_obj);
+ CHECK(result_obj->IsPromiseCapability());
+ Handle<PromiseCapability> result =
+ Handle<PromiseCapability>::cast(result_obj);
CHECK(result->promise()->IsJSPromise());
CHECK(result->resolve()->IsJSFunction());
@@ -2376,9 +2378,9 @@ TEST(NewPromiseCapability) {
Handle<Object> result_obj =
ft.Call(isolate->factory()->undefined_value(), constructor_fn)
.ToHandleChecked();
- CHECK(result_obj->IsJSPromiseCapability());
- Handle<JSPromiseCapability> result =
- Handle<JSPromiseCapability>::cast(result_obj);
+ CHECK(result_obj->IsPromiseCapability());
+ Handle<PromiseCapability> result =
+ Handle<PromiseCapability>::cast(result_obj);
CHECK(result->promise()->IsJSObject());
Handle<JSObject> promise(JSObject::cast(result->promise()));
@@ -2606,6 +2608,246 @@ TEST(LoadJSArrayElementsMap) {
}
}
+TEST(AllocateStruct) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 3;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeStubAssembler m(asm_tester.state());
+
+ {
+ Node* map = m.Parameter(0);
+ Node* result = m.AllocateStruct(map);
+
+ m.Return(result);
+ }
+
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ Handle<Map> maps[] = {
+ handle(isolate->heap()->promise_capability_map(), isolate),
+ handle(isolate->heap()->tuple2_map(), isolate),
+ };
+
+ {
+ for (size_t i = 0; i < 2; i++) {
+ Handle<Map> map = maps[i];
+ Handle<Struct> result =
+ Handle<Struct>::cast(ft.Call(map).ToHandleChecked());
+ CHECK_EQ(result->map(), *map);
+#ifdef VERIFY_HEAP
+ isolate->heap()->Verify();
+#endif
+ }
+ }
+}
+
+TEST(GotoIfNotWhiteSpaceOrLineTerminator) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 1;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ StringTrimAssembler m(asm_tester.state());
+
+ { // Returns true if whitespace, false otherwise.
+ Label if_not_whitespace(&m);
+
+ m.GotoIfNotWhiteSpaceOrLineTerminator(m.SmiToWord32(m.Parameter(0)),
+ &if_not_whitespace);
+ m.Return(m.TrueConstant());
+
+ m.BIND(&if_not_whitespace);
+ m.Return(m.FalseConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ Handle<Object> true_value = ft.true_value();
+ Handle<Object> false_value = ft.false_value();
+
+ for (uc16 c = 0; c < 0xFFFF; c++) {
+ Handle<Object> expected_value =
+ WhiteSpaceOrLineTerminator::Is(c) ? true_value : false_value;
+ ft.CheckCall(expected_value, handle(Smi::FromInt(c), isolate));
+ }
+}
+
+TEST(BranchIfNumericRelationalComparison) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ Factory* f = isolate->factory();
+ const int kNumParams = 2;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ {
+ CodeStubAssembler m(asm_tester.state());
+ Label return_true(&m), return_false(&m);
+ m.BranchIfNumericRelationalComparison(
+ CodeStubAssembler::kGreaterThanOrEqual, m.Parameter(0), m.Parameter(1),
+ &return_true, &return_false);
+ m.BIND(&return_true);
+ m.Return(m.BooleanConstant(true));
+ m.BIND(&return_false);
+ m.Return(m.BooleanConstant(false));
+ }
+
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ ft.CheckTrue(f->NewNumber(0), f->NewNumber(0));
+ ft.CheckTrue(f->NewNumber(1), f->NewNumber(0));
+ ft.CheckTrue(f->NewNumber(1), f->NewNumber(1));
+ ft.CheckFalse(f->NewNumber(0), f->NewNumber(1));
+ ft.CheckFalse(f->NewNumber(-1), f->NewNumber(0));
+ ft.CheckTrue(f->NewNumber(-1), f->NewNumber(-1));
+
+ ft.CheckTrue(f->NewNumber(-1), f->NewNumber(-1.5));
+ ft.CheckFalse(f->NewNumber(-1.5), f->NewNumber(-1));
+ ft.CheckTrue(f->NewNumber(-1.5), f->NewNumber(-1.5));
+}
+
+TEST(IsNumberArrayIndex) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 1;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ {
+ CodeStubAssembler m(asm_tester.state());
+ m.Return(m.SmiFromWord32(m.IsNumberArrayIndex(m.Parameter(0))));
+ }
+
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ double indices[] = {Smi::kMinValue,
+ -11,
+ -1,
+ 0,
+ 1,
+ 2,
+ Smi::kMaxValue,
+ -11.0,
+ -11.1,
+ -2.0,
+ -1.0,
+ -0.0,
+ 0.0,
+ 0.00001,
+ 0.1,
+ 1,
+ 2,
+ Smi::kMinValue - 1.0,
+ Smi::kMinValue + 1.0,
+ Smi::kMinValue + 1.2,
+ kMaxInt + 1.2,
+ kMaxInt - 10.0,
+ kMaxInt - 1.0,
+ kMaxInt,
+ kMaxInt + 1.0,
+ kMaxInt + 10.0};
+
+ for (size_t i = 0; i < arraysize(indices); i++) {
+ Handle<Object> index = isolate->factory()->NewNumber(indices[i]);
+ uint32_t array_index;
+ CHECK_EQ(index->ToArrayIndex(&array_index),
+ (ft.CallChecked<Smi>(index)->value() == 1));
+ }
+}
+
+TEST(NumberMinMax) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 2;
+ CodeAssemblerTester asm_tester_min(isolate, kNumParams);
+ {
+ CodeStubAssembler m(asm_tester_min.state());
+ m.Return(m.NumberMin(m.Parameter(0), m.Parameter(1)));
+ }
+ FunctionTester ft_min(asm_tester_min.GenerateCode(), kNumParams);
+
+ CodeAssemblerTester asm_tester_max(isolate, kNumParams);
+ {
+ CodeStubAssembler m(asm_tester_max.state());
+ m.Return(m.NumberMax(m.Parameter(0), m.Parameter(1)));
+ }
+ FunctionTester ft_max(asm_tester_max.GenerateCode(), kNumParams);
+
+ // Test smi values.
+ Handle<Smi> smi_1(Smi::FromInt(1), isolate);
+ Handle<Smi> smi_2(Smi::FromInt(2), isolate);
+ Handle<Smi> smi_5(Smi::FromInt(5), isolate);
+ CHECK_EQ(ft_min.CallChecked<Smi>(smi_1, smi_2)->value(), 1);
+ CHECK_EQ(ft_min.CallChecked<Smi>(smi_2, smi_1)->value(), 1);
+ CHECK_EQ(ft_max.CallChecked<Smi>(smi_1, smi_2)->value(), 2);
+ CHECK_EQ(ft_max.CallChecked<Smi>(smi_2, smi_1)->value(), 2);
+
+ // Test double values.
+ Handle<Object> double_a = isolate->factory()->NewNumber(2.5);
+ Handle<Object> double_b = isolate->factory()->NewNumber(3.5);
+ Handle<Object> nan =
+ isolate->factory()->NewNumber(std::numeric_limits<double>::quiet_NaN());
+ Handle<Object> infinity = isolate->factory()->NewNumber(V8_INFINITY);
+
+ CHECK_EQ(ft_min.CallChecked<HeapNumber>(double_a, double_b)->value(), 2.5);
+ CHECK_EQ(ft_min.CallChecked<HeapNumber>(double_b, double_a)->value(), 2.5);
+ CHECK_EQ(ft_min.CallChecked<HeapNumber>(infinity, double_a)->value(), 2.5);
+ CHECK_EQ(ft_min.CallChecked<HeapNumber>(double_a, infinity)->value(), 2.5);
+ CHECK(std::isnan(ft_min.CallChecked<HeapNumber>(nan, double_a)->value()));
+ CHECK(std::isnan(ft_min.CallChecked<HeapNumber>(double_a, nan)->value()));
+
+ CHECK_EQ(ft_max.CallChecked<HeapNumber>(double_a, double_b)->value(), 3.5);
+ CHECK_EQ(ft_max.CallChecked<HeapNumber>(double_b, double_a)->value(), 3.5);
+ CHECK_EQ(ft_max.CallChecked<HeapNumber>(infinity, double_a)->value(),
+ V8_INFINITY);
+ CHECK_EQ(ft_max.CallChecked<HeapNumber>(double_a, infinity)->value(),
+ V8_INFINITY);
+ CHECK(std::isnan(ft_max.CallChecked<HeapNumber>(nan, double_a)->value()));
+ CHECK(std::isnan(ft_max.CallChecked<HeapNumber>(double_a, nan)->value()));
+
+ // Mixed smi/double values.
+ CHECK_EQ(ft_max.CallChecked<HeapNumber>(smi_1, double_b)->value(), 3.5);
+ CHECK_EQ(ft_max.CallChecked<HeapNumber>(double_b, smi_1)->value(), 3.5);
+ CHECK_EQ(ft_min.CallChecked<HeapNumber>(smi_5, double_b)->value(), 3.5);
+ CHECK_EQ(ft_min.CallChecked<HeapNumber>(double_b, smi_5)->value(), 3.5);
+}
+
+TEST(NumberAddSub) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 2;
+ CodeAssemblerTester asm_tester_add(isolate, kNumParams);
+ {
+ CodeStubAssembler m(asm_tester_add.state());
+ m.Return(m.NumberAdd(m.Parameter(0), m.Parameter(1)));
+ }
+ FunctionTester ft_add(asm_tester_add.GenerateCode(), kNumParams);
+
+ CodeAssemblerTester asm_tester_sub(isolate, kNumParams);
+ {
+ CodeStubAssembler m(asm_tester_sub.state());
+ m.Return(m.NumberSub(m.Parameter(0), m.Parameter(1)));
+ }
+ FunctionTester ft_sub(asm_tester_sub.GenerateCode(), kNumParams);
+
+ // Test smi values.
+ Handle<Smi> smi_1(Smi::FromInt(1), isolate);
+ Handle<Smi> smi_2(Smi::FromInt(2), isolate);
+ CHECK_EQ(ft_add.CallChecked<Smi>(smi_1, smi_2)->value(), 3);
+ CHECK_EQ(ft_sub.CallChecked<Smi>(smi_2, smi_1)->value(), 1);
+
+ // Test double values.
+ Handle<Object> double_a = isolate->factory()->NewNumber(2.5);
+ Handle<Object> double_b = isolate->factory()->NewNumber(3.0);
+ CHECK_EQ(ft_add.CallChecked<HeapNumber>(double_a, double_b)->value(), 5.5);
+ CHECK_EQ(ft_sub.CallChecked<HeapNumber>(double_a, double_b)->value(), -.5);
+
+ // Test overflow.
+ Handle<Smi> smi_max(Smi::FromInt(Smi::kMaxValue), isolate);
+ Handle<Smi> smi_min(Smi::FromInt(Smi::kMinValue), isolate);
+ CHECK_EQ(ft_add.CallChecked<HeapNumber>(smi_max, smi_1)->value(),
+ static_cast<double>(Smi::kMaxValue) + 1);
+ CHECK_EQ(ft_sub.CallChecked<HeapNumber>(smi_min, smi_1)->value(),
+ static_cast<double>(Smi::kMinValue) - 1);
+
+ // Test mixed smi/double values.
+ CHECK_EQ(ft_add.CallChecked<HeapNumber>(smi_1, double_a)->value(), 3.5);
+ CHECK_EQ(ft_add.CallChecked<HeapNumber>(double_a, smi_1)->value(), 3.5);
+ CHECK_EQ(ft_sub.CallChecked<HeapNumber>(smi_1, double_a)->value(), -1.5);
+ CHECK_EQ(ft_sub.CallChecked<HeapNumber>(double_a, smi_1)->value(), 1.5);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs-arm.cc b/deps/v8/test/cctest/test-code-stubs-arm.cc
index 1a34e7d4ab..cb80382901 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm.cc
@@ -38,7 +38,8 @@
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
#define __ masm.
@@ -71,7 +72,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Push the double argument.
__ sub(sp, sp, Operand(kDoubleSize));
__ vstr(d0, sp, 0);
- if (!source_reg.is(sp)) {
+ if (source_reg != sp) {
__ mov(source_reg, sp);
}
@@ -81,7 +82,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
for (; reg_num < Register::kNumRegisters; ++reg_num) {
if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(reg_num)) {
Register reg = Register::from_code(reg_num);
- if (!reg.is(destination_reg)) {
+ if (reg != destination_reg) {
__ push(reg);
source_reg_offset += kPointerSize;
}
@@ -96,7 +97,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
if (inline_fastpath) {
__ vldr(d0, MemOperand(source_reg));
__ TryInlineTruncateDoubleToI(destination_reg, d0, &done);
- if (destination_reg.is(source_reg) && !source_reg.is(sp)) {
+ if (destination_reg == source_reg && source_reg != sp) {
// Restore clobbered source_reg.
__ add(source_reg, sp, Operand(source_reg_offset));
}
@@ -110,7 +111,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
for (--reg_num; reg_num >= 0; --reg_num) {
if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(reg_num)) {
Register reg = Register::from_code(reg_num);
- if (!reg.is(destination_reg)) {
+ if (reg != destination_reg) {
__ ldr(ip, MemOperand(sp, 0));
__ cmp(reg, ip);
__ Assert(eq, kRegisterWasClobbered);
@@ -121,8 +122,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ add(sp, sp, Operand(kDoubleSize));
- if (!destination_reg.is(r0))
- __ mov(r0, destination_reg);
+ if (destination_reg != r0) __ mov(r0, destination_reg);
// Restore callee save registers.
__ Pop(lr);
@@ -188,3 +188,6 @@ TEST(ConvertDToI) {
}
}
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs-arm64.cc b/deps/v8/test/cctest/test-code-stubs-arm64.cc
index 2b668cc277..64435703c9 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm64.cc
@@ -38,7 +38,8 @@
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
#define __ masm.
@@ -193,3 +194,6 @@ TEST(ConvertDToI) {
}
}
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs-ia32.cc b/deps/v8/test/cctest/test-code-stubs-ia32.cc
index 66ddf46f2a..3ddf7323ad 100644
--- a/deps/v8/test/cctest/test-code-stubs-ia32.cc
+++ b/deps/v8/test/cctest/test-code-stubs-ia32.cc
@@ -35,10 +35,12 @@
#include "src/code-stubs.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
#define __ assm.
@@ -53,8 +55,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
HandleScope handles(isolate);
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size),
v8::internal::CodeObjectRequired::kYes);
- int offset =
- source_reg.is(esp) ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
+ int offset = source_reg == esp ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
DoubleToIStub stub(isolate, source_reg, destination_reg, offset, true);
byte* start = stub.GetCode()->instruction_start();
@@ -64,7 +65,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ push(esi);
__ push(edi);
- if (!source_reg.is(esp)) {
+ if (source_reg != esp) {
__ lea(source_reg, MemOperand(esp, 6 * kPointerSize - offset));
}
@@ -74,7 +75,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
for (; reg_num < Register::kNumRegisters; ++reg_num) {
if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(reg_num)) {
Register reg = Register::from_code(reg_num);
- if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
+ if (reg != esp && reg != ebp && reg != destination_reg) {
__ push(reg);
param_offset += kPointerSize;
}
@@ -94,7 +95,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
for (--reg_num; reg_num >= 0; --reg_num) {
if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(reg_num)) {
Register reg = Register::from_code(reg_num);
- if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
+ if (reg != esp && reg != ebp && reg != destination_reg) {
__ cmp(reg, MemOperand(esp, 0));
__ Assert(equal, kRegisterWasClobbered);
__ add(esp, Immediate(kPointerSize));
@@ -151,3 +152,6 @@ TEST(ConvertDToI) {
}
}
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs-mips.cc b/deps/v8/test/cctest/test-code-stubs-mips.cc
index 78f70cf601..39aa88bea2 100644
--- a/deps/v8/test/cctest/test-code-stubs-mips.cc
+++ b/deps/v8/test/cctest/test-code-stubs-mips.cc
@@ -34,12 +34,14 @@
#include "src/factory.h"
#include "src/macro-assembler.h"
#include "src/mips/constants-mips.h"
+#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/simulator.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
#define __ masm.
@@ -84,7 +86,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
for (; reg_num < Register::kNumRegisters; ++reg_num) {
if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(reg_num)) {
Register reg = Register::from_code(reg_num);
- if (!reg.is(destination_reg)) {
+ if (reg != destination_reg) {
__ push(reg);
source_reg_offset += kPointerSize;
}
@@ -99,7 +101,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
if (inline_fastpath) {
__ Ldc1(f12, MemOperand(source_reg));
__ TryInlineTruncateDoubleToI(destination_reg, f12, &done);
- if (destination_reg.is(source_reg) && !source_reg.is(sp)) {
+ if (destination_reg == source_reg && source_reg != sp) {
// Restore clobbered source_reg.
__ Addu(source_reg, sp, Operand(source_reg_offset));
}
@@ -113,7 +115,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
for (--reg_num; reg_num >= 2; --reg_num) {
if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(reg_num)) {
Register reg = Register::from_code(reg_num);
- if (!reg.is(destination_reg)) {
+ if (reg != destination_reg) {
__ lw(at, MemOperand(sp, 0));
__ Assert(eq, kRegisterWasClobbered, reg, Operand(at));
__ Addu(sp, sp, Operand(kPointerSize));
@@ -201,3 +203,6 @@ TEST(ConvertDToI) {
}
}
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs-mips64.cc b/deps/v8/test/cctest/test-code-stubs-mips64.cc
index 6048678029..ee6388e316 100644
--- a/deps/v8/test/cctest/test-code-stubs-mips64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-mips64.cc
@@ -34,12 +34,14 @@
#include "src/factory.h"
#include "src/macro-assembler.h"
#include "src/mips64/constants-mips64.h"
+#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/simulator.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
#define __ masm.
@@ -84,7 +86,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (; reg_num < config->num_allocatable_general_registers(); ++reg_num) {
Register reg = Register::from_code(reg_num);
- if (!reg.is(destination_reg)) {
+ if (reg != destination_reg) {
__ push(reg);
source_reg_offset += kPointerSize;
}
@@ -98,7 +100,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
if (inline_fastpath) {
__ Ldc1(f12, MemOperand(source_reg));
__ TryInlineTruncateDoubleToI(destination_reg, f12, &done);
- if (destination_reg.is(source_reg) && !source_reg.is(sp)) {
+ if (destination_reg == source_reg && source_reg != sp) {
// Restore clobbered source_reg.
__ Daddu(source_reg, sp, Operand(source_reg_offset));
}
@@ -111,7 +113,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Make sure no registers have been unexpectedly clobbered
for (--reg_num; reg_num >= 2; --reg_num) {
Register reg = Register::from_code(reg_num);
- if (!reg.is(destination_reg)) {
+ if (reg != destination_reg) {
__ Ld(at, MemOperand(sp, 0));
__ Assert(eq, kRegisterWasClobbered, reg, Operand(at));
__ Daddu(sp, sp, Operand(kPointerSize));
@@ -199,3 +201,6 @@ TEST(ConvertDToI) {
}
}
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs-x64.cc b/deps/v8/test/cctest/test-code-stubs-x64.cc
index a58cb6949a..bd29e7ab5c 100644
--- a/deps/v8/test/cctest/test-code-stubs-x64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-x64.cc
@@ -38,8 +38,9 @@
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
-using namespace v8::internal;
-
+namespace v8 {
+namespace internal {
+namespace test_code_stubs_x64 {
#define __ assm.
@@ -54,8 +55,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
HandleScope handles(isolate);
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size),
v8::internal::CodeObjectRequired::kYes);
- int offset =
- source_reg.is(rsp) ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
+ int offset = source_reg == rsp ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
DoubleToIStub stub(isolate, source_reg, destination_reg, offset, true);
byte* start = stub.GetCode()->instruction_start();
@@ -66,7 +66,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ pushq(rdi);
const RegisterConfiguration* config = RegisterConfiguration::Default();
- if (!source_reg.is(rsp)) {
+ if (source_reg != rsp) {
// The argument we pass to the stub is not a heap number, but instead
// stack-allocated and offset-wise made to look like a heap number for
// the stub. We create that "heap number" after pushing all allocatable
@@ -82,7 +82,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
for (; reg_num < config->num_allocatable_general_registers(); ++reg_num) {
Register reg =
Register::from_code(config->GetAllocatableGeneralCode(reg_num));
- if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) {
+ if (reg != rsp && reg != rbp && reg != destination_reg) {
__ pushq(reg);
}
}
@@ -100,7 +100,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
for (--reg_num; reg_num >= 0; --reg_num) {
Register reg =
Register::from_code(config->GetAllocatableGeneralCode(reg_num));
- if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) {
+ if (reg != rsp && reg != rbp && reg != destination_reg) {
__ cmpq(reg, MemOperand(rsp, 0));
__ Assert(equal, kRegisterWasClobbered);
__ addq(rsp, Immediate(kPointerSize));
@@ -156,3 +156,7 @@ TEST(ConvertDToI) {
}
}
}
+
+} // namespace test_code_stubs_x64
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs.cc b/deps/v8/test/cctest/test-code-stubs.cc
index 0a8b24e4d3..27f411c56c 100644
--- a/deps/v8/test/cctest/test-code-stubs.cc
+++ b/deps/v8/test/cctest/test-code-stubs.cc
@@ -40,8 +40,8 @@
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
-using namespace v8::internal;
-
+namespace v8 {
+namespace internal {
int STDCALL ConvertDToICVersion(double d) {
#if defined(V8_TARGET_BIG_ENDIAN)
@@ -199,3 +199,6 @@ TEST(CodeStubMajorKeys) {
}
CODE_STUB_LIST(CHECK_STUB);
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs.h b/deps/v8/test/cctest/test-code-stubs.h
index 0cfa0ec7c8..54182d0c45 100644
--- a/deps/v8/test/cctest/test-code-stubs.h
+++ b/deps/v8/test/cctest/test-code-stubs.h
@@ -28,6 +28,9 @@
#ifndef V8_TEST_CODE_STUBS_H_
#define V8_TEST_CODE_STUBS_H_
+namespace v8 {
+namespace internal {
+
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if __GNUC__
#define STDCALL __attribute__((stdcall))
@@ -50,4 +53,7 @@ void RunAllTruncationTests(ConvertDToIFunc func);
void RunAllTruncationTests(ConvertDToICallWrapper callWrapper,
ConvertDToIFunc func);
+} // namespace internal
+} // namespace v8
+
#endif
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 8667aaf458..8a50ff0f57 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -63,11 +63,13 @@ static Handle<JSFunction> Compile(const char* source) {
Isolate* isolate = CcTest::i_isolate();
Handle<String> source_code = isolate->factory()->NewStringFromUtf8(
CStrVector(source)).ToHandleChecked();
- Handle<SharedFunctionInfo> shared = Compiler::GetSharedFunctionInfoForScript(
- source_code, Handle<String>(), 0, 0, v8::ScriptOriginOptions(),
- Handle<Object>(), Handle<Context>(isolate->native_context()), NULL, NULL,
- v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE,
- Handle<FixedArray>());
+ Handle<SharedFunctionInfo> shared =
+ Compiler::GetSharedFunctionInfoForScript(
+ source_code, MaybeHandle<String>(), 0, 0, v8::ScriptOriginOptions(),
+ MaybeHandle<Object>(), Handle<Context>(isolate->native_context()),
+ NULL, NULL, v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE,
+ MaybeHandle<FixedArray>())
+ .ToHandleChecked();
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared, isolate->native_context());
}
@@ -632,63 +634,6 @@ TEST(CompileFunctionInContextHarmonyFunctionToString) {
#undef CHECK_NOT_CAUGHT
}
-#ifdef ENABLE_DISASSEMBLER
-static Handle<JSFunction> GetJSFunction(v8::Local<v8::Object> obj,
- const char* property_name) {
- v8::Local<v8::Function> fun = v8::Local<v8::Function>::Cast(
- obj->Get(CcTest::isolate()->GetCurrentContext(), v8_str(property_name))
- .ToLocalChecked());
- return Handle<JSFunction>::cast(v8::Utils::OpenHandle(*fun));
-}
-
-
-static void CheckCodeForUnsafeLiteral(Handle<JSFunction> f) {
- // Create a disassembler with default name lookup.
- disasm::NameConverter name_converter;
- disasm::Disassembler d(name_converter);
-
- if (f->code()->kind() == Code::FUNCTION) {
- Address pc = f->code()->instruction_start();
- int decode_size =
- Min(f->code()->instruction_size(),
- static_cast<int>(f->code()->back_edge_table_offset()));
- if (FLAG_enable_embedded_constant_pool) {
- decode_size = Min(decode_size, f->code()->constant_pool_offset());
- }
- Address end = pc + decode_size;
-
- v8::internal::EmbeddedVector<char, 128> decode_buffer;
- v8::internal::EmbeddedVector<char, 128> smi_hex_buffer;
- Smi* smi = Smi::FromInt(12345678);
- SNPrintF(smi_hex_buffer, "0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(smi));
- while (pc < end) {
- int num_const = d.ConstantPoolSizeAt(pc);
- if (num_const >= 0) {
- pc += (num_const + 1) * kPointerSize;
- } else {
- pc += d.InstructionDecode(decode_buffer, pc);
- CHECK(strstr(decode_buffer.start(), smi_hex_buffer.start()) == NULL);
- }
- }
- }
-}
-
-
-TEST(SplitConstantsInFullCompiler) {
- LocalContext context;
- v8::HandleScope scope(CcTest::isolate());
-
- CompileRun("function f() { a = 12345678 }; f();");
- CheckCodeForUnsafeLiteral(GetJSFunction(context->Global(), "f"));
- CompileRun("function f(x) { a = 12345678 + x}; f(1);");
- CheckCodeForUnsafeLiteral(GetJSFunction(context->Global(), "f"));
- CompileRun("function f(x) { var arguments = 1; x += 12345678}; f(1);");
- CheckCodeForUnsafeLiteral(GetJSFunction(context->Global(), "f"));
- CompileRun("function f(x) { var arguments = 1; x = 12345678}; f(1);");
- CheckCodeForUnsafeLiteral(GetJSFunction(context->Global(), "f"));
-}
-#endif
-
TEST(InvocationCount) {
FLAG_allow_natives_syntax = true;
FLAG_always_opt = false;
diff --git a/deps/v8/test/cctest/test-conversions.cc b/deps/v8/test/cctest/test-conversions.cc
index db080d2215..dc6e9fcb9d 100644
--- a/deps/v8/test/cctest/test-conversions.cc
+++ b/deps/v8/test/cctest/test-conversions.cc
@@ -29,16 +29,9 @@
#include "src/base/platform/platform.h"
#include "src/conversions.h"
-#include "src/factory.h"
+#include "src/factory-inl.h"
#include "src/isolate.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/factory.h -> src/objects-inl.h
-#include "src/objects-inl.h"
#include "src/objects.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/feedback-vector.h ->
-// src/feedback-vector-inl.h
-#include "src/feedback-vector-inl.h"
#include "src/unicode-cache.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 689305f30e..0a297d9f0c 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -44,17 +44,9 @@
#include "include/libplatform/v8-tracing.h"
#include "src/tracing/trace-event.h"
-using i::CodeEntry;
-using i::CpuProfile;
-using i::CpuProfiler;
-using i::CpuProfilesCollection;
-using i::Heap;
-using i::ProfileGenerator;
-using i::ProfileNode;
-using i::ProfilerEventsProcessor;
-using i::ProfilerListener;
-using i::ScopedVector;
-using i::Vector;
+namespace v8 {
+namespace internal {
+namespace test_cpu_profiler {
// Helper methods
static v8::Local<v8::Function> GetFunction(v8::Local<v8::Context> env,
@@ -786,11 +778,12 @@ class TestApiCallbacks {
private:
void Wait() {
if (is_warming_up_) return;
- double start = v8::base::OS::TimeCurrentMillis();
+ v8::Platform* platform = v8::internal::V8::GetCurrentPlatform();
+ double start = platform->CurrentClockTimeMillis();
double duration = 0;
while (duration < min_duration_ms_) {
v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(1));
- duration = v8::base::OS::TimeCurrentMillis() - start;
+ duration = platform->CurrentClockTimeMillis() - start;
}
}
@@ -2191,3 +2184,59 @@ TEST(TracingCpuProfiler) {
i::V8::SetPlatformForTesting(old_platform);
}
+
+TEST(Issue763073) {
+ class AllowNativesSyntax {
+ public:
+ AllowNativesSyntax()
+ : allow_natives_syntax_(i::FLAG_allow_natives_syntax),
+ trace_deopt_(i::FLAG_trace_deopt) {
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_trace_deopt = true;
+ }
+
+ ~AllowNativesSyntax() {
+ i::FLAG_allow_natives_syntax = allow_natives_syntax_;
+ i::FLAG_trace_deopt = trace_deopt_;
+ }
+
+ private:
+ bool allow_natives_syntax_;
+ bool trace_deopt_;
+ };
+
+ AllowNativesSyntax allow_natives_syntax_scope;
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ CompileRun(
+ "function f() { return function g(x) { }; }"
+ // Create first closure, optimize it, and deoptimize it.
+ "var g = f();"
+ "g(1);"
+ "%OptimizeFunctionOnNextCall(g);"
+ "g(1);"
+ "%DeoptimizeFunction(g);"
+ // Create second closure, and optimize it. This will create another
+ // optimized code object and put in the (shared) type feedback vector.
+ "var h = f();"
+ "h(1);"
+ "%OptimizeFunctionOnNextCall(h);"
+ "h(1);");
+
+ // Start profiling.
+ v8::CpuProfiler* cpu_profiler = v8::CpuProfiler::New(env->GetIsolate());
+ v8::Local<v8::String> profile_name = v8_str("test");
+
+ // Here we test that the heap iteration upon profiling start is not
+ // confused by having a deoptimized code object for a closure while
+ // having a different optimized code object in the type feedback vector.
+ cpu_profiler->StartProfiling(profile_name);
+ v8::CpuProfile* p = cpu_profiler->StopProfiling(profile_name);
+ p->Delete();
+ cpu_profiler->Dispose();
+}
+
+} // namespace test_cpu_profiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 8c3818e8e9..794bc9c841 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -6518,22 +6518,115 @@ TEST(DebugCoverage) {
CHECK_EQ(2, function_data.Count());
}
+namespace {
+v8::debug::Coverage::ScriptData GetScriptDataAndDeleteCoverage(
+ v8::Isolate* isolate) {
+ v8::debug::Coverage coverage = v8::debug::Coverage::CollectPrecise(isolate);
+ CHECK_EQ(1u, coverage.ScriptCount());
+ return coverage.GetScriptData(0);
+}
+} // namespace
+
+TEST(DebugCoverageWithCoverageOutOfScope) {
+ i::FLAG_always_opt = false;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::debug::Coverage::SelectMode(isolate, v8::debug::Coverage::kPreciseCount);
+ v8::Local<v8::String> source = v8_str(
+ "function f() {\n"
+ "}\n"
+ "f();\n"
+ "f();");
+ CompileRun(source);
+ v8::debug::Coverage::ScriptData script_data =
+ GetScriptDataAndDeleteCoverage(isolate);
+ v8::Local<v8::debug::Script> script = script_data.GetScript();
+ CHECK(script->Source()
+ .ToLocalChecked()
+ ->Equals(env.local(), source)
+ .FromMaybe(false));
+
+ CHECK_EQ(2u, script_data.FunctionCount());
+ v8::debug::Coverage::FunctionData function_data =
+ script_data.GetFunctionData(0);
+
+ CHECK_EQ(0, function_data.StartOffset());
+ CHECK_EQ(26, function_data.EndOffset());
+
+ v8::debug::Location start =
+ script->GetSourceLocation(function_data.StartOffset());
+ v8::debug::Location end =
+ script->GetSourceLocation(function_data.EndOffset());
+ CHECK_EQ(0, start.GetLineNumber());
+ CHECK_EQ(0, start.GetColumnNumber());
+ CHECK_EQ(3, end.GetLineNumber());
+ CHECK_EQ(4, end.GetColumnNumber());
+ CHECK_EQ(1, function_data.Count());
+
+ function_data = script_data.GetFunctionData(1);
+ start = script->GetSourceLocation(function_data.StartOffset());
+ end = script->GetSourceLocation(function_data.EndOffset());
+
+ CHECK_EQ(0, function_data.StartOffset());
+ CHECK_EQ(16, function_data.EndOffset());
+
+ CHECK_EQ(0, start.GetLineNumber());
+ CHECK_EQ(0, start.GetColumnNumber());
+ CHECK_EQ(1, end.GetLineNumber());
+ CHECK_EQ(1, end.GetColumnNumber());
+ CHECK_EQ(2, function_data.Count());
+}
+
+namespace {
+v8::debug::Coverage::FunctionData GetFunctionDataAndDeleteCoverage(
+ v8::Isolate* isolate) {
+ v8::debug::Coverage coverage = v8::debug::Coverage::CollectPrecise(isolate);
+ CHECK_EQ(1u, coverage.ScriptCount());
+
+ v8::debug::Coverage::ScriptData script_data = coverage.GetScriptData(0);
+
+ CHECK_EQ(2u, script_data.FunctionCount());
+ v8::debug::Coverage::FunctionData function_data =
+ script_data.GetFunctionData(0);
+ CHECK_EQ(1, function_data.Count());
+ CHECK_EQ(0, function_data.StartOffset());
+ CHECK_EQ(26, function_data.EndOffset());
+ return function_data;
+}
+} // namespace
+
+TEST(DebugCoverageWithScriptDataOutOfScope) {
+ i::FLAG_always_opt = false;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::debug::Coverage::SelectMode(isolate, v8::debug::Coverage::kPreciseCount);
+ v8::Local<v8::String> source = v8_str(
+ "function f() {\n"
+ "}\n"
+ "f();\n"
+ "f();");
+ CompileRun(source);
+
+ v8::debug::Coverage::FunctionData function_data =
+ GetFunctionDataAndDeleteCoverage(isolate);
+ CHECK_EQ(1, function_data.Count());
+ CHECK_EQ(0, function_data.StartOffset());
+ CHECK_EQ(26, function_data.EndOffset());
+}
+
TEST(BuiltinsExceptionPrediction) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate);
- // TODO(gsathya): Fix catch prediction for the following.
- std::set<int> whitelist(
- {i::Builtins::kPromiseThenFinally, i::Builtins::kPromiseCatchFinally});
-
i::Builtins* builtins = CcTest::i_isolate()->builtins();
bool fail = false;
for (int i = 0; i < i::Builtins::builtin_count; i++) {
- Code* builtin = builtins->builtin(static_cast<i::Builtins::Name>(i));
+ Code* builtin = builtins->builtin(i);
if (builtin->kind() != Code::BUILTIN) continue;
- if (whitelist.find(i) != whitelist.end()) continue;
auto prediction = builtin->GetBuiltinCatchPrediction();
USE(prediction);
@@ -6574,19 +6667,19 @@ TEST(DebugEvaluateNoSideEffect) {
LocalContext env;
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
- i::List<i::Handle<i::JSFunction>> list;
+ std::vector<i::Handle<i::JSFunction>> all_functions;
{
i::HeapIterator iterator(isolate->heap());
while (i::HeapObject* obj = iterator.next()) {
if (!obj->IsJSFunction()) continue;
i::JSFunction* fun = i::JSFunction::cast(obj);
- list.Add(i::Handle<i::JSFunction>(fun));
+ all_functions.emplace_back(fun);
}
}
// Perform side effect check on all built-in functions. The side effect check
// itself contains additional sanity checks.
- for (i::Handle<i::JSFunction> fun : list) {
+ for (i::Handle<i::JSFunction> fun : all_functions) {
bool failed = false;
{
i::NoSideEffectScope scope(isolate, true);
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index d73e1ac139..c8c9daa0e2 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -40,7 +40,8 @@
#include "src/v8.h"
#include "test/cctest/cctest.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
template <typename... S>
bool DisassembleAndCompare(byte* begin, S... expected_strings) {
@@ -1577,3 +1578,6 @@ TEST(LoadStoreExclusive) {
VERIFY_RUN();
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
index 313c18d338..efebac208c 100644
--- a/deps/v8/test/cctest/test-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -42,7 +42,8 @@
#include "src/arm64/macro-assembler-arm64.h"
#include "src/arm64/utils-arm64.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
#define TEST_(name) TEST(DISASM_##name)
@@ -4958,3 +4959,6 @@ TEST(neon_shift_immediate) {
CLEANUP();
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index eae020023d..84940c51b7 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -37,8 +37,8 @@
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
-using namespace v8::internal;
-
+namespace v8 {
+namespace internal {
#define __ assm.
@@ -846,8 +846,8 @@ TEST(DisasmIa320) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
USE(code);
#ifdef OBJECT_PRINT
OFStream os(stdout);
@@ -859,3 +859,6 @@ TEST(DisasmIa320) {
}
#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-disasm-mips.cc b/deps/v8/test/cctest/test-disasm-mips.cc
index f525d44985..04f007fcb3 100644
--- a/deps/v8/test/cctest/test-disasm-mips.cc
+++ b/deps/v8/test/cctest/test-disasm-mips.cc
@@ -37,7 +37,8 @@
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
bool prev_instr_compact_branch = false;
@@ -1814,3 +1815,6 @@ TEST(MSA_BIT) {
}
VERIFY_RUN();
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-disasm-mips64.cc b/deps/v8/test/cctest/test-disasm-mips64.cc
index 9ccf630c35..0405a82e5d 100644
--- a/deps/v8/test/cctest/test-disasm-mips64.cc
+++ b/deps/v8/test/cctest/test-disasm-mips64.cc
@@ -37,7 +37,8 @@
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
bool prev_instr_compact_branch = false;
@@ -2017,3 +2018,6 @@ TEST(MSA_BIT) {
}
VERIFY_RUN();
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-disasm-ppc.cc b/deps/v8/test/cctest/test-disasm-ppc.cc
index 8abba45643..9650fee7a6 100644
--- a/deps/v8/test/cctest/test-disasm-ppc.cc
+++ b/deps/v8/test/cctest/test-disasm-ppc.cc
@@ -37,8 +37,8 @@
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
-using namespace v8::internal;
-
+namespace v8 {
+namespace internal {
bool DisassembleAndCompare(byte* pc, const char* compare_string) {
disasm::NameConverter converter;
@@ -153,3 +153,6 @@ TEST(DisasmPPC) {
VERIFY_RUN();
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-disasm-s390.cc b/deps/v8/test/cctest/test-disasm-s390.cc
index 5ff343cf13..e4f8c55307 100644
--- a/deps/v8/test/cctest/test-disasm-s390.cc
+++ b/deps/v8/test/cctest/test-disasm-s390.cc
@@ -37,7 +37,8 @@
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
bool DisassembleAndCompare(byte* pc, const char* compare_string) {
disasm::NameConverter converter;
@@ -296,3 +297,6 @@ TEST(SixBytes) {
VERIFY_RUN();
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 1a4e577ad2..0c7ebbc1a7 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -946,8 +946,8 @@ TEST(DisasmX64) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
USE(code);
#ifdef OBJECT_PRINT
OFStream os(stdout);
diff --git a/deps/v8/test/cctest/test-dtoa.cc b/deps/v8/test/cctest/test-dtoa.cc
index ff917b0815..8f156d9f2a 100644
--- a/deps/v8/test/cctest/test-dtoa.cc
+++ b/deps/v8/test/cctest/test-dtoa.cc
@@ -40,6 +40,7 @@
namespace v8 {
namespace internal {
+namespace test_dtoa {
// Removes trailing '0' digits.
static void TrimRepresentation(Vector<char> representation) {
@@ -329,5 +330,6 @@ TEST(DtoaGayPrecision) {
}
}
+} // namespace test_dtoa
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-elements-kind.cc b/deps/v8/test/cctest/test-elements-kind.cc
index e23d889072..b1d86e6367 100644
--- a/deps/v8/test/cctest/test-elements-kind.cc
+++ b/deps/v8/test/cctest/test-elements-kind.cc
@@ -18,6 +18,7 @@
namespace v8 {
namespace internal {
+namespace test_elements_kind {
//
// Helper functions.
@@ -479,5 +480,6 @@ TEST(JSArrayAddingElementsGeneralizingiFastDoubleElements) {
CHECK_EQ(array->map(), *previous_map);
}
+} // namespace test_elements_kind
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-fast-dtoa.cc b/deps/v8/test/cctest/test-fast-dtoa.cc
index ad9b7aaff5..c063d6348b 100644
--- a/deps/v8/test/cctest/test-fast-dtoa.cc
+++ b/deps/v8/test/cctest/test-fast-dtoa.cc
@@ -39,6 +39,7 @@
namespace v8 {
namespace internal {
+namespace test_fast_dtoa {
static const int kBufferSize = 100;
@@ -293,5 +294,6 @@ TEST(FastDtoaGayPrecision) {
CHECK_GT(succeeded_15*1.0/total_15, 0.9999);
}
+} // namespace test_fast_dtoa
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index 47b0d81649..119ca4c150 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -43,7 +43,7 @@ TEST(VectorStructure) {
{
FeedbackVectorSpec one_slot(&zone);
- one_slot.AddGeneralSlot();
+ one_slot.AddForInSlot();
vector = NewFeedbackVector(isolate, &one_slot);
FeedbackVectorHelper helper(vector);
CHECK_EQ(1, helper.slot_count());
@@ -60,7 +60,7 @@ TEST(VectorStructure) {
{
FeedbackVectorSpec spec(&zone);
for (int i = 0; i < 3; i++) {
- spec.AddGeneralSlot();
+ spec.AddForInSlot();
}
for (int i = 0; i < 5; i++) {
spec.AddCallICSlot();
@@ -87,9 +87,9 @@ TEST(VectorStructure) {
{
FeedbackVectorSpec spec(&zone);
- spec.AddGeneralSlot();
+ spec.AddForInSlot();
spec.AddCreateClosureSlot();
- spec.AddGeneralSlot();
+ spec.AddForInSlot();
vector = NewFeedbackVector(isolate, &spec);
FeedbackVectorHelper helper(vector);
CHECK_EQ(1,
@@ -113,7 +113,7 @@ TEST(VectorICMetadata) {
for (int i = 0; i < 40; i++) {
switch (i % 4) {
case 0:
- spec.AddGeneralSlot();
+ spec.AddForInSlot();
break;
case 1:
spec.AddCallICSlot();
@@ -140,7 +140,7 @@ TEST(VectorICMetadata) {
FeedbackSlotKind kind = vector->GetKind(helper.slot(i));
switch (i % 4) {
case 0:
- CHECK_EQ(FeedbackSlotKind::kGeneral, kind);
+ CHECK_EQ(FeedbackSlotKind::kForIn, kind);
break;
case 1:
CHECK_EQ(FeedbackSlotKind::kCall, kind);
@@ -156,43 +156,6 @@ TEST(VectorICMetadata) {
}
-TEST(VectorSlotClearing) {
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
- Zone zone(isolate->allocator(), ZONE_NAME);
-
- CompileRun("function f() {};");
- Handle<JSFunction> f = GetFunction("f");
-
- // We only test clearing of a FeedbackSlotKind::kGeneral slots because all
- // the other slot kinds require a host function for clearing.
- FeedbackVectorSpec spec(&zone);
- for (int i = 0; i < 5; i++) {
- spec.AddGeneralSlot();
- }
- Handle<FeedbackVector> vector = NewFeedbackVector(isolate, &spec);
- FeedbackVectorHelper helper(vector);
-
- // Fill with information
- vector->Set(helper.slot(0), Smi::FromInt(1));
- Handle<WeakCell> cell = factory->NewWeakCell(factory->fixed_array_map());
- vector->Set(helper.slot(1), *cell);
- Handle<AllocationSite> site = factory->NewAllocationSite();
- vector->Set(helper.slot(2), *site);
-
- vector->ClearSlots(*f);
-
- // The feedback vector slots are cleared. AllocationSites are still granted
- // an exemption from clearing, as are smis.
- CHECK_EQ(Smi::FromInt(1), vector->Get(helper.slot(0)));
- CHECK_EQ(*FeedbackVector::UninitializedSentinel(isolate),
- vector->Get(helper.slot(1)));
- CHECK(vector->Get(helper.slot(2))->IsAllocationSite());
-}
-
-
TEST(VectorCallICStates) {
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index 3310c5c1d1..89845e9bf5 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -24,6 +24,7 @@
namespace v8 {
namespace internal {
+namespace test_field_type_tracking {
// TODO(ishell): fix this once TransitionToPrototype stops generalizing
// all field representations (similar to crbug/448711 where elements kind
@@ -109,6 +110,20 @@ class Expectations {
CHECK(index < MAX_PROPERTIES);
kinds_[index] = kind;
locations_[index] = location;
+ if (kind == kData && location == kField &&
+ IsTransitionableFastElementsKind(elements_kind_) &&
+ Map::IsInplaceGeneralizableField(constness, representation,
+ FieldType::cast(*value))) {
+ // Maps with transitionable elements kinds must have non in-place
+ // generalizable fields.
+ if (FLAG_track_constant_fields && FLAG_modify_map_inplace &&
+ constness == kConst) {
+ constness = kMutable;
+ }
+ if (representation.IsHeapObject() && !FieldType::cast(*value)->IsAny()) {
+ value = FieldType::Any(isolate_);
+ }
+ }
constnesses_[index] = constness;
attributes_[index] = attributes;
representations_[index] = representation;
@@ -317,27 +332,14 @@ class Expectations {
Handle<Map> AddDataField(Handle<Map> map, PropertyAttributes attributes,
PropertyConstness constness,
Representation representation,
- Handle<FieldType> heap_type) {
+ Handle<FieldType> field_type) {
CHECK_EQ(number_of_properties_, map->NumberOfOwnDescriptors());
int property_index = number_of_properties_++;
- PropertyConstness expected_constness = constness;
- Representation expected_representation = representation;
- Handle<FieldType> expected_heap_type = heap_type;
- if (IsTransitionableFastElementsKind(map->elements_kind())) {
- // Maps with transitionable elements kinds must have non in-place
- // generalizable fields.
- if (FLAG_track_constant_fields && FLAG_modify_map_inplace) {
- expected_constness = kMutable;
- }
- if (representation.IsHeapObject() && heap_type->IsClass()) {
- expected_heap_type = FieldType::Any(isolate_);
- }
- }
- SetDataField(property_index, attributes, expected_constness,
- expected_representation, expected_heap_type);
+ SetDataField(property_index, attributes, constness, representation,
+ field_type);
Handle<String> name = MakeName("prop", property_index);
- return Map::CopyWithField(map, name, heap_type, attributes, constness,
+ return Map::CopyWithField(map, name, field_type, attributes, constness,
representation, INSERT_TRANSITION)
.ToHandleChecked();
}
@@ -1715,6 +1717,7 @@ static void TestReconfigureElementsKind_GeneralizeField(
// Create a map, add required properties to it and initialize expectations.
Handle<Map> initial_map = Map::Create(isolate, 0);
+ initial_map->set_instance_type(JS_ARRAY_TYPE);
initial_map->set_elements_kind(PACKED_SMI_ELEMENTS);
Handle<Map> map = initial_map;
@@ -1808,6 +1811,7 @@ static void TestReconfigureElementsKind_GeneralizeFieldTrivial(
// Create a map, add required properties to it and initialize expectations.
Handle<Map> initial_map = Map::Create(isolate, 0);
+ initial_map->set_instance_type(JS_ARRAY_TYPE);
initial_map->set_elements_kind(PACKED_SMI_ELEMENTS);
Handle<Map> map = initial_map;
@@ -1990,7 +1994,6 @@ TEST(ReconfigureElementsKind_GeneralizeHeapObjFieldToHeapObj) {
{kConst, Representation::HeapObject(), current_type},
{kConst, Representation::HeapObject(), new_type},
{kConst, Representation::HeapObject(), expected_type});
-
if (FLAG_modify_map_inplace) {
// kConst to kMutable migration does not create a new map, therefore
// trivial generalization.
@@ -2742,5 +2745,6 @@ TEST(HoleyMutableHeapNumber) {
CHECK_EQ(kHoleNanInt64, HeapNumber::cast(*obj)->value_as_bits());
}
+} // namespace test_field_type_tracking
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-fuzz-arm64.cc b/deps/v8/test/cctest/test-fuzz-arm64.cc
index bdf143c055..059eda46fd 100644
--- a/deps/v8/test/cctest/test-fuzz-arm64.cc
+++ b/deps/v8/test/cctest/test-fuzz-arm64.cc
@@ -29,7 +29,8 @@
#include "src/arm64/decoder-arm64-inl.h"
#include "src/arm64/disasm-arm64.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
TEST(FUZZ_decoder) {
// Feed noise into the decoder to check that it doesn't crash.
@@ -69,3 +70,6 @@ TEST(FUZZ_disasm) {
decoder.Decode(buffer);
}
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index 4550618de6..d3e229530f 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -185,5 +185,42 @@ TEST(PhatomHandlesWithoutCallbacks) {
CHECK_EQ(0u, isolate->NumberOfPhantomHandleResetsSinceLastCall());
}
+namespace {
+
+void ResurrectingFinalizer(
+ const v8::WeakCallbackInfo<v8::Global<v8::Object>>& data) {
+ data.GetParameter()->ClearWeak();
+}
+
+} // namespace
+
+TEST(Regress772299) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+
+ v8::Global<v8::Object> g1, g2;
+ {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> o1 =
+ v8::Local<v8::Object>::New(isolate, v8::Object::New(isolate));
+ v8::Local<v8::Object> o2 =
+ v8::Local<v8::Object>::New(isolate, v8::Object::New(isolate));
+ o1->Set(isolate->GetCurrentContext(), v8_str("link"), o2).FromJust();
+ g1.Reset(isolate, o1);
+ g2.Reset(isolate, o2);
+ // g1 will be finalized but resurrected.
+ g1.SetWeak(&g1, ResurrectingFinalizer, v8::WeakCallbackType::kFinalizer);
+ // g2 will be a phantom handle that should not be reset as g1 transitively
+ // keeps it alive.
+ g2.SetWeak();
+ }
+
+ CcTest::CollectAllAvailableGarbage();
+ // Both, g1 and g2, should stay alive as the finalizer resurrects the root
+ // object that transitively keeps the other one alive.
+ CHECK(!g1.IsEmpty());
+ CHECK(!g2.IsEmpty());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-hashmap.cc b/deps/v8/test/cctest/test-hashmap.cc
index b1adc7b005..163bc09f19 100644
--- a/deps/v8/test/cctest/test-hashmap.cc
+++ b/deps/v8/test/cctest/test-hashmap.cc
@@ -32,12 +32,12 @@
#include "src/base/hashmap.h"
-using namespace v8::internal;
-
+namespace v8 {
+namespace internal {
+namespace test_hashmap {
typedef uint32_t (*IntKeyHash)(uint32_t key);
-
class IntSet {
public:
explicit IntSet(IntKeyHash hash) : hash_(hash) {}
@@ -173,3 +173,7 @@ TEST(HashSet) {
TestSet(Hash, 100);
TestSet(CollisionHash, 50);
}
+
+} // namespace test_hashmap
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index ebc78a5ed4..b089fa8521 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -2433,14 +2433,14 @@ static AllocationTraceNode* FindNode(
AllocationTraceNode* node = tracker->trace_tree()->root();
for (int i = 0; node != NULL && i < names.length(); i++) {
const char* name = names[i];
- Vector<AllocationTraceNode*> children = node->children();
+ const std::vector<AllocationTraceNode*>& children = node->children();
node = NULL;
- for (int j = 0; j < children.length(); j++) {
- unsigned index = children[j]->function_info_index();
+ for (AllocationTraceNode* child : children) {
+ unsigned index = child->function_info_index();
AllocationTracker::FunctionInfo* info =
tracker->function_info_list()[index];
if (info && strcmp(info->name, name) == 0) {
- node = children[j];
+ node = child;
break;
}
}
@@ -3063,3 +3063,77 @@ TEST(SamplingHeapProfilerLeftTrimming) {
heap_profiler->StopSamplingHeapProfiler();
}
+
+TEST(SamplingHeapProfilerPretenuredInlineAllocations) {
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_expose_gc = true;
+
+ CcTest::InitializeVM();
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
+ if (i::FLAG_gc_global || i::FLAG_stress_compaction ||
+ i::FLAG_stress_incremental_marking) {
+ return;
+ }
+
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+ LocalContext env;
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+
+ // Suppress randomness to avoid flakiness in tests.
+ v8::internal::FLAG_sampling_heap_profiler_suppress_randomness = true;
+
+ // Grow new space unitl maximum capacity reached.
+ while (!CcTest::heap()->new_space()->IsAtMaximumCapacity()) {
+ CcTest::heap()->new_space()->Grow();
+ }
+
+ i::ScopedVector<char> source(1024);
+ i::SNPrintF(source,
+ "var number_elements = %d;"
+ "var elements = new Array(number_elements);"
+ "function f() {"
+ " for (var i = 0; i < number_elements; i++) {"
+ " elements[i] = [{}, {}, {}];"
+ " }"
+ " return elements[number_elements - 1];"
+ "};"
+ "f(); gc();"
+ "f(); f();"
+ "%%OptimizeFunctionOnNextCall(f);"
+ "f();"
+ "f;",
+ i::AllocationSite::kPretenureMinimumCreated + 1);
+
+ v8::Local<v8::Function> f =
+ v8::Local<v8::Function>::Cast(CompileRun(source.start()));
+
+ // Make sure the function is producing pre-tenured objects.
+ auto res = f->Call(env.local(), env->Global(), 0, NULL).ToLocalChecked();
+ i::Handle<i::JSObject> o = i::Handle<i::JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
+ CHECK(CcTest::heap()->InOldSpace(o->elements()));
+ CHECK(CcTest::heap()->InOldSpace(*o));
+
+ // Call the function and profile it.
+ heap_profiler->StartSamplingHeapProfiler(64);
+ for (int i = 0; i < 100; ++i) {
+ f->Call(env.local(), env->Global(), 0, NULL).ToLocalChecked();
+ }
+
+ std::unique_ptr<v8::AllocationProfile> profile(
+ heap_profiler->GetAllocationProfile());
+ CHECK(profile);
+ heap_profiler->StopSamplingHeapProfiler();
+
+ const char* names[] = {"f"};
+ auto node_f = FindAllocationProfileNode(env->GetIsolate(), *profile,
+ ArrayVector(names));
+ CHECK(node_f);
+
+ int count = 0;
+ for (auto allocation : node_f->allocations) {
+ count += allocation.count;
+ }
+
+ CHECK_GE(count, 9000);
+}
diff --git a/deps/v8/test/cctest/test-identity-map.cc b/deps/v8/test/cctest/test-identity-map.cc
index f7d1c313e2..8a0d99d1dc 100644
--- a/deps/v8/test/cctest/test-identity-map.cc
+++ b/deps/v8/test/cctest/test-identity-map.cc
@@ -4,19 +4,11 @@
#include <set>
-#include "src/factory.h"
+#include "src/factory-inl.h"
#include "src/identity-map.h"
#include "src/isolate.h"
#include "src/objects.h"
#include "src/zone/zone.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/factory.h -> src/objects-inl.h
-#include "src/objects-inl.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/feedback-vector.h ->
-// src/feedback-vector-inl.h
-#include "src/feedback-vector-inl.h"
-#include "src/v8.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -773,6 +765,7 @@ TEST(CanonicalHandleScope) {
}
TEST(GCShortCutting) {
+ ManualGCScope manual_gc_scope;
IdentityMapTester t;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
index e8b20a7702..9586d72456 100644
--- a/deps/v8/test/cctest/test-inobject-slack-tracking.cc
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -13,8 +13,9 @@
#include "test/cctest/cctest.h"
-using namespace v8::base;
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
+namespace test_inobject_slack_tracking {
static const int kMaxInobjectProperties = JSObject::kMaxInObjectProperties;
@@ -62,15 +63,13 @@ Handle<T> GetLexical(const std::string& name) {
return GetLexical<T>(name.c_str());
}
-
template <typename T>
-static inline Handle<T> Run(v8::Local<v8::Script> script) {
+static inline Handle<T> RunI(v8::Local<v8::Script> script) {
return OpenHandle<T>(Run(script));
}
-
template <typename T>
-static inline Handle<T> CompileRun(const char* script) {
+static inline Handle<T> CompileRunI(const char* script) {
return OpenHandle<T>(CompileRun(script));
}
@@ -135,7 +134,7 @@ TEST(JSObjectBasic) {
v8::Local<v8::Script> new_A_script = v8_compile("new A();");
- Handle<JSObject> obj = Run<JSObject>(new_A_script);
+ Handle<JSObject> obj = RunI<JSObject>(new_A_script);
CHECK(func->has_initial_map());
Handle<Map> initial_map(func->initial_map());
@@ -155,7 +154,7 @@ TEST(JSObjectBasic) {
// Create several objects to complete the tracking.
for (int i = 1; i < Map::kGenerousAllocationCount; i++) {
CHECK(initial_map->IsInobjectSlackTrackingInProgress());
- Handle<JSObject> tmp = Run<JSObject>(new_A_script);
+ Handle<JSObject> tmp = RunI<JSObject>(new_A_script);
CHECK_EQ(initial_map->IsInobjectSlackTrackingInProgress(),
IsObjectShrinkable(*tmp));
}
@@ -194,9 +193,9 @@ TEST(JSObjectComplex) {
// Zero instances were created so far.
CHECK(!func->has_initial_map());
- Handle<JSObject> obj1 = CompileRun<JSObject>("new A(1);");
- Handle<JSObject> obj3 = CompileRun<JSObject>("new A(3);");
- Handle<JSObject> obj5 = CompileRun<JSObject>("new A(5);");
+ Handle<JSObject> obj1 = CompileRunI<JSObject>("new A(1);");
+ Handle<JSObject> obj3 = CompileRunI<JSObject>("new A(3);");
+ Handle<JSObject> obj5 = CompileRunI<JSObject>("new A(5);");
CHECK(func->has_initial_map());
Handle<Map> initial_map(func->initial_map());
@@ -237,9 +236,9 @@ TEST(JSObjectComplex) {
CHECK_EQ(0, obj5->map()->unused_property_fields());
// Since slack tracking is complete, the new objects should not be shrinkable.
- obj1 = CompileRun<JSObject>("new A(1);");
- obj3 = CompileRun<JSObject>("new A(3);");
- obj5 = CompileRun<JSObject>("new A(5);");
+ obj1 = CompileRunI<JSObject>("new A(1);");
+ obj3 = CompileRunI<JSObject>("new A(3);");
+ obj5 = CompileRunI<JSObject>("new A(5);");
CHECK(!IsObjectShrinkable(*obj1));
CHECK(!IsObjectShrinkable(*obj3));
@@ -281,7 +280,7 @@ TEST(JSGeneratorObjectBasic) {
v8::Local<v8::Script> new_A_script = v8_compile("CreateGenerator();");
- Handle<JSObject> obj = Run<JSObject>(new_A_script);
+ Handle<JSObject> obj = RunI<JSObject>(new_A_script);
CHECK(func->has_initial_map());
Handle<Map> initial_map(func->initial_map());
@@ -301,7 +300,7 @@ TEST(JSGeneratorObjectBasic) {
// Create several objects to complete the tracking.
for (int i = 1; i < Map::kGenerousAllocationCount; i++) {
CHECK(initial_map->IsInobjectSlackTrackingInProgress());
- Handle<JSObject> tmp = Run<JSObject>(new_A_script);
+ Handle<JSObject> tmp = RunI<JSObject>(new_A_script);
CHECK_EQ(initial_map->IsInobjectSlackTrackingInProgress(),
IsObjectShrinkable(*tmp));
}
@@ -357,7 +356,7 @@ TEST(SubclassBasicNoBaseClassInstances) {
v8::Local<v8::Script> new_B_script = v8_compile("new B();");
- Handle<JSObject> obj = Run<JSObject>(new_B_script);
+ Handle<JSObject> obj = RunI<JSObject>(new_B_script);
CHECK(a_func->has_initial_map());
Handle<Map> a_initial_map(a_func->initial_map());
@@ -388,7 +387,7 @@ TEST(SubclassBasicNoBaseClassInstances) {
// Create several subclass instances to complete the tracking.
for (int i = 1; i < Map::kGenerousAllocationCount; i++) {
CHECK(b_initial_map->IsInobjectSlackTrackingInProgress());
- Handle<JSObject> tmp = Run<JSObject>(new_B_script);
+ Handle<JSObject> tmp = RunI<JSObject>(new_B_script);
CHECK_EQ(b_initial_map->IsInobjectSlackTrackingInProgress(),
IsObjectShrinkable(*tmp));
}
@@ -451,8 +450,8 @@ TEST(SubclassBasic) {
v8::Local<v8::Script> new_A_script = v8_compile("new A();");
v8::Local<v8::Script> new_B_script = v8_compile("new B();");
- Handle<JSObject> a_obj = Run<JSObject>(new_A_script);
- Handle<JSObject> b_obj = Run<JSObject>(new_B_script);
+ Handle<JSObject> a_obj = RunI<JSObject>(new_A_script);
+ Handle<JSObject> b_obj = RunI<JSObject>(new_B_script);
CHECK(a_func->has_initial_map());
Handle<Map> a_initial_map(a_func->initial_map());
@@ -473,7 +472,7 @@ TEST(SubclassBasic) {
// Create several base class instances to complete the tracking.
for (int i = 1; i < Map::kGenerousAllocationCount; i++) {
CHECK(a_initial_map->IsInobjectSlackTrackingInProgress());
- Handle<JSObject> tmp = Run<JSObject>(new_A_script);
+ Handle<JSObject> tmp = RunI<JSObject>(new_A_script);
CHECK_EQ(a_initial_map->IsInobjectSlackTrackingInProgress(),
IsObjectShrinkable(*tmp));
}
@@ -496,7 +495,7 @@ TEST(SubclassBasic) {
// Create several subclass instances to complete the tracking.
for (int i = 1; i < Map::kGenerousAllocationCount; i++) {
CHECK(b_initial_map->IsInobjectSlackTrackingInProgress());
- Handle<JSObject> tmp = Run<JSObject>(new_B_script);
+ Handle<JSObject> tmp = RunI<JSObject>(new_B_script);
CHECK_EQ(b_initial_map->IsInobjectSlackTrackingInProgress(),
IsObjectShrinkable(*tmp));
}
@@ -575,7 +574,7 @@ static void TestClassHierarchy(const std::vector<int>& hierarchy_desc, int n) {
Handle<JSFunction> func = GetLexical<JSFunction>(class_name);
- Handle<JSObject> obj = Run<JSObject>(new_script);
+ Handle<JSObject> obj = RunI<JSObject>(new_script);
CHECK(func->has_initial_map());
Handle<Map> initial_map(func->initial_map());
@@ -594,7 +593,7 @@ static void TestClassHierarchy(const std::vector<int>& hierarchy_desc, int n) {
// Create several instances to complete the tracking.
for (int i = 1; i < Map::kGenerousAllocationCount; i++) {
CHECK(initial_map->IsInobjectSlackTrackingInProgress());
- Handle<JSObject> tmp = Run<JSObject>(new_script);
+ Handle<JSObject> tmp = RunI<JSObject>(new_script);
CHECK_EQ(initial_map->IsInobjectSlackTrackingInProgress(),
IsObjectShrinkable(*tmp));
CHECK_EQ(Map::kSlackTrackingCounterStart - i - 1,
@@ -675,7 +674,7 @@ TEST(InobjectPropetiesCountOverflowInSubclass) {
Handle<JSFunction> func = GetLexical<JSFunction>(class_name);
- Handle<JSObject> obj = Run<JSObject>(new_script);
+ Handle<JSObject> obj = RunI<JSObject>(new_script);
CHECK(func->has_initial_map());
Handle<Map> initial_map(func->initial_map());
@@ -692,7 +691,7 @@ TEST(InobjectPropetiesCountOverflowInSubclass) {
// Create several instances to complete the tracking.
for (int i = 1; i < Map::kGenerousAllocationCount; i++) {
CHECK(initial_map->IsInobjectSlackTrackingInProgress());
- Handle<JSObject> tmp = Run<JSObject>(new_script);
+ Handle<JSObject> tmp = RunI<JSObject>(new_script);
CHECK(!IsObjectShrinkable(*tmp));
}
CHECK(!initial_map->IsInobjectSlackTrackingInProgress());
@@ -859,7 +858,7 @@ TEST(SlowModeSubclass) {
Handle<JSFunction> func = GetLexical<JSFunction>(class_name);
- Handle<JSObject> obj = Run<JSObject>(new_script);
+ Handle<JSObject> obj = RunI<JSObject>(new_script);
CHECK(func->has_initial_map());
Handle<Map> initial_map(func->initial_map());
@@ -876,7 +875,7 @@ TEST(SlowModeSubclass) {
// Create several instances to complete the tracking.
for (int i = 1; i < Map::kGenerousAllocationCount; i++) {
CHECK(initial_map->IsInobjectSlackTrackingInProgress());
- Handle<JSObject> tmp = Run<JSObject>(new_script);
+ Handle<JSObject> tmp = RunI<JSObject>(new_script);
CHECK(!IsObjectShrinkable(*tmp));
}
CHECK(!initial_map->IsInobjectSlackTrackingInProgress());
@@ -925,7 +924,7 @@ static void TestSubclassBuiltin(const char* subclass_name,
new_script = v8_compile(os.str().c_str());
}
- Run<JSObject>(new_script);
+ RunI<JSObject>(new_script);
CHECK(func->has_initial_map());
Handle<Map> initial_map(func->initial_map());
@@ -939,7 +938,7 @@ static void TestSubclassBuiltin(const char* subclass_name,
// Create two instances in order to ensure that |obj|.o is a data field
// in case of Function subclassing.
- Handle<JSObject> obj = Run<JSObject>(new_script);
+ Handle<JSObject> obj = RunI<JSObject>(new_script);
// Two instances of a subclass created.
CHECK_EQ(Map::kSlackTrackingCounterStart - 2,
@@ -956,7 +955,7 @@ static void TestSubclassBuiltin(const char* subclass_name,
// Create several subclass instances to complete the tracking.
for (int i = 2; i < Map::kGenerousAllocationCount; i++) {
CHECK(initial_map->IsInobjectSlackTrackingInProgress());
- Handle<JSObject> tmp = Run<JSObject>(new_script);
+ Handle<JSObject> tmp = RunI<JSObject>(new_script);
CHECK_EQ(initial_map->IsInobjectSlackTrackingInProgress(),
IsObjectShrinkable(*tmp));
}
@@ -1203,3 +1202,7 @@ TEST(SubclassPromiseBuiltinNoInlineNew) {
FLAG_inline_new = false;
TestSubclassPromiseBuiltin();
}
+
+} // namespace test_inobject_slack_tracking
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-javascript-arm64.cc b/deps/v8/test/cctest/test-javascript-arm64.cc
index d23b63e0a9..3b1f1a1d12 100644
--- a/deps/v8/test/cctest/test-javascript-arm64.cc
+++ b/deps/v8/test/cctest/test-javascript-arm64.cc
@@ -39,40 +39,23 @@
#include "src/utils.h"
#include "test/cctest/cctest.h"
-using ::v8::Context;
-using ::v8::Extension;
-using ::v8::Function;
-using ::v8::FunctionTemplate;
-using ::v8::HandleScope;
-using ::v8::Local;
-using ::v8::Message;
-using ::v8::MessageCallback;
-using ::v8::Object;
-using ::v8::ObjectTemplate;
-using ::v8::Persistent;
-using ::v8::Script;
-using ::v8::StackTrace;
-using ::v8::String;
-using ::v8::TryCatch;
-using ::v8::Undefined;
-using ::v8::V8;
-using ::v8::Value;
-
-static void ExpectBoolean(Local<Context> context, bool expected,
+namespace v8 {
+namespace internal {
+namespace test_javascript_arm64 {
+
+static void ExpectBoolean(Local<v8::Context> context, bool expected,
Local<Value> result) {
CHECK(result->IsBoolean());
CHECK_EQ(expected, result->BooleanValue(context).FromJust());
}
-
-static void ExpectInt32(Local<Context> context, int32_t expected,
+static void ExpectInt32(Local<v8::Context> context, int32_t expected,
Local<Value> result) {
CHECK(result->IsInt32());
CHECK_EQ(expected, result->Int32Value(context).FromJust());
}
-
-static void ExpectNumber(Local<Context> context, double expected,
+static void ExpectNumber(Local<v8::Context> context, double expected,
Local<Value> result) {
CHECK(result->IsNumber());
CHECK_EQ(expected, result->NumberValue(context).FromJust());
@@ -125,7 +108,7 @@ TEST(binary_op) {
ExpectInt32(env.local(), 0x2468, result);
}
-static void if_comparison_testcontext_helper(Local<Context> context,
+static void if_comparison_testcontext_helper(Local<v8::Context> context,
char const* op, char const* lhs,
char const* rhs, int expect) {
char buffer[256];
@@ -139,7 +122,7 @@ static void if_comparison_testcontext_helper(Local<Context> context,
ExpectInt32(context, expect, result);
}
-static void if_comparison_effectcontext_helper(Local<Context> context,
+static void if_comparison_effectcontext_helper(Local<v8::Context> context,
char const* op, char const* lhs,
char const* rhs, int expect) {
char buffer[256];
@@ -154,7 +137,7 @@ static void if_comparison_effectcontext_helper(Local<Context> context,
ExpectInt32(context, expect, result);
}
-static void if_comparison_helper(Local<Context> context, char const* op,
+static void if_comparison_helper(Local<v8::Context> context, char const* op,
int expect_when_lt, int expect_when_eq,
int expect_when_gt) {
// TODO(all): Non-SMI tests.
@@ -259,3 +242,7 @@ TEST(unary_not) {
result = CompileRun("var a = 1; if ( !a ) { 1; } else { 0; }");
ExpectInt32(env.local(), 0, result);
}
+
+} // namespace test_javascript_arm64
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-js-arm64-variables.cc b/deps/v8/test/cctest/test-js-arm64-variables.cc
index dbcf8f94ac..442407a79e 100644
--- a/deps/v8/test/cctest/test-js-arm64-variables.cc
+++ b/deps/v8/test/cctest/test-js-arm64-variables.cc
@@ -41,26 +41,11 @@
#include "src/utils.h"
#include "test/cctest/cctest.h"
-using ::v8::Context;
-using ::v8::Extension;
-using ::v8::Function;
-using ::v8::FunctionTemplate;
-using ::v8::HandleScope;
-using ::v8::Local;
-using ::v8::Message;
-using ::v8::MessageCallback;
-using ::v8::Object;
-using ::v8::ObjectTemplate;
-using ::v8::Persistent;
-using ::v8::Script;
-using ::v8::StackTrace;
-using ::v8::String;
-using ::v8::TryCatch;
-using ::v8::Undefined;
-using ::v8::V8;
-using ::v8::Value;
-
-static void ExpectInt32(Local<Context> context, int32_t expected,
+namespace v8 {
+namespace internal {
+namespace test_js_arm64_variables {
+
+static void ExpectInt32(Local<v8::Context> context, int32_t expected,
Local<Value> result) {
CHECK(result->IsInt32());
CHECK_EQ(expected, result->Int32Value(context).FromJust());
@@ -140,3 +125,7 @@ TEST(lookup_slots) {
"f5(5);");
ExpectInt32(env.local(), 5, result);
}
+
+} // namespace test_js_arm64_variables
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-list.cc b/deps/v8/test/cctest/test-list.cc
deleted file mode 100644
index c943d10e26..0000000000
--- a/deps/v8/test/cctest/test-list.cc
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-#include <string.h>
-#include "src/list-inl.h"
-#include "src/list.h"
-#include "src/v8.h"
-#include "test/cctest/cctest.h"
-
-namespace v8 {
-namespace internal {
-
-// Use a testing allocator that clears memory before deletion.
-class ZeroingAllocationPolicy {
- public:
- void* New(size_t size) {
- // Stash the size in the first word to use for Delete.
- size_t true_size = size + sizeof(size_t);
- size_t* result = reinterpret_cast<size_t*>(malloc(true_size));
- if (result == NULL) return result;
- *result = true_size;
- return result + 1;
- }
-
- static void Delete(void* ptr) {
- size_t* true_ptr = reinterpret_cast<size_t*>(ptr) - 1;
- memset(true_ptr, 0, *true_ptr);
- free(true_ptr);
- }
-};
-
-
-// Check that we can add (a reference to) an element of the list
-// itself.
-TEST(ListAdd) {
- // Add elements to the list to grow it to its capacity.
- List<int, ZeroingAllocationPolicy> list(4);
- list.Add(1);
- list.Add(2);
- list.Add(3);
- list.Add(4);
-
- // Add an existing element, the backing store should have to grow.
- list.Add(list[0]);
- CHECK_EQ(1, list[4]);
-}
-
-
-// Test that we can add all elements from a list to another list.
-TEST(ListAddAll) {
- List<int, ZeroingAllocationPolicy> list(4);
- list.Add(0);
- list.Add(1);
- list.Add(2);
-
- CHECK_EQ(3, list.length());
- for (int i = 0; i < 3; i++) {
- CHECK_EQ(i, list[i]);
- }
-
- List<int, ZeroingAllocationPolicy> other_list(4);
-
- // Add no elements to list since other_list is empty.
- list.AddAll(other_list);
- CHECK_EQ(3, list.length());
- for (int i = 0; i < 3; i++) {
- CHECK_EQ(i, list[i]);
- }
-
- // Add three elements to other_list.
- other_list.Add(0);
- other_list.Add(1);
- other_list.Add(2);
-
- // Copy the three elements from other_list to list.
- list.AddAll(other_list);
- CHECK_EQ(6, list.length());
- for (int i = 0; i < 6; i++) {
- CHECK_EQ(i % 3, list[i]);
- }
-}
-
-
-TEST(RemoveLast) {
- List<int> list(4);
- CHECK_EQ(0, list.length());
- list.Add(1);
- CHECK_EQ(1, list.length());
- CHECK_EQ(1, list.last());
- list.RemoveLast();
- CHECK_EQ(0, list.length());
- list.Add(2);
- list.Add(3);
- CHECK_EQ(2, list.length());
- CHECK_EQ(3, list.last());
- list.RemoveLast();
- CHECK_EQ(1, list.length());
- CHECK_EQ(2, list.last());
- list.RemoveLast();
- CHECK_EQ(0, list.length());
-
- const int kElements = 100;
- for (int i = 0; i < kElements; i++) list.Add(i);
- for (int j = kElements - 1; j >= 0; j--) {
- CHECK_EQ(j + 1, list.length());
- CHECK_EQ(j, list.last());
- list.RemoveLast();
- CHECK_EQ(j, list.length());
- }
-}
-
-
-TEST(Allocate) {
- List<int> list(4);
- list.Add(1);
- CHECK_EQ(1, list.length());
- list.Allocate(100);
- CHECK_EQ(100, list.length());
- CHECK_LE(100, list.capacity());
- list[99] = 123;
- CHECK_EQ(123, list[99]);
-}
-
-
-TEST(Clear) {
- List<int> list(4);
- CHECK_EQ(0, list.length());
- for (int i = 0; i < 4; ++i) list.Add(i);
- CHECK_EQ(4, list.length());
- list.Clear();
- CHECK_EQ(0, list.length());
-}
-
-
-TEST(DeleteEmpty) {
- {
- List<int>* list = new List<int>(0);
- delete list;
- }
- {
- List<int> list(0);
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index a310bfd684..ebfbe88de2 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -41,19 +41,247 @@
#include "src/utils.h"
#include "test/cctest/cctest.h"
-using ::v8::Context;
-using ::v8::Extension;
-using ::v8::Function;
-using ::v8::HandleScope;
-using ::v8::Local;
-using ::v8::Object;
-using ::v8::ObjectTemplate;
-using ::v8::Persistent;
-using ::v8::Script;
-using ::v8::String;
-using ::v8::Value;
-using ::v8::V8;
+namespace {
+class DeoptimizeCodeThread : public v8::base::Thread {
+ public:
+ DeoptimizeCodeThread(v8::Isolate* isolate, v8::Local<v8::Context> context,
+ const char* trigger)
+ : Thread(Options("DeoptimizeCodeThread")),
+ isolate_(isolate),
+ context_(isolate, context),
+ source_(trigger) {}
+
+ void Run() {
+ v8::Locker locker(isolate_);
+ isolate_->Enter();
+ v8::HandleScope handle_scope(isolate_);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate_, context_);
+ v8::Context::Scope context_scope(context);
+ CHECK_EQ(isolate_, v8::Isolate::GetCurrent());
+ // This code triggers deoptimization of some function that will be
+ // used in a different thread.
+ CompileRun(source_);
+ isolate_->Exit();
+ }
+
+ private:
+ v8::Isolate* isolate_;
+ v8::Persistent<v8::Context> context_;
+ // The code that triggers the deoptimization.
+ const char* source_;
+};
+
+void UnlockForDeoptimization(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ // Gets the pointer to the thread that will trigger the deoptimization of the
+ // code.
+ DeoptimizeCodeThread* deoptimizer =
+ reinterpret_cast<DeoptimizeCodeThread*>(isolate->GetData(0));
+ {
+ // Exits and unlocks the isolate.
+ isolate->Exit();
+ v8::Unlocker unlocker(isolate);
+ // Starts the deoptimizing thread.
+ deoptimizer->Start();
+ // Waits for deoptimization to finish.
+ deoptimizer->Join();
+ }
+ // The deoptimizing thread has finished its work, and the isolate
+ // will now be used by the current thread.
+ isolate->Enter();
+}
+
+void UnlockForDeoptimizationIfReady(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ bool* ready_to_deoptimize = reinterpret_cast<bool*>(isolate->GetData(1));
+ if (*ready_to_deoptimize) {
+ // The test should enter here only once, so put the flag back to false.
+ *ready_to_deoptimize = false;
+ // Gets the pointer to the thread that will trigger the deoptimization of
+ // the code.
+ DeoptimizeCodeThread* deoptimizer =
+ reinterpret_cast<DeoptimizeCodeThread*>(isolate->GetData(0));
+ {
+ // Exits and unlocks the thread.
+ isolate->Exit();
+ v8::Unlocker unlocker(isolate);
+ // Starts the thread that deoptimizes the function.
+ deoptimizer->Start();
+ // Waits for the deoptimizing thread to finish.
+ deoptimizer->Join();
+ }
+ // The deoptimizing thread has finished its work, and the isolate
+ // will now be used by the current thread.
+ isolate->Enter();
+ }
+}
+} // namespace
+
+namespace v8 {
+namespace internal {
+namespace test_lockers {
+
+TEST(LazyDeoptimizationMultithread) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ {
+ v8::Locker locker(isolate);
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ const char* trigger_deopt = "obj = { y: 0, x: 1 };";
+
+ // We use the isolate to pass arguments to the UnlockForDeoptimization
+ // function. Namely, we pass a pointer to the deoptimizing thread.
+ DeoptimizeCodeThread deoptimize_thread(isolate, context, trigger_deopt);
+ isolate->SetData(0, &deoptimize_thread);
+ v8::Context::Scope context_scope(context);
+
+ // Create the function templace for C++ code that is invoked from
+ // JavaScript code.
+ Local<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate, UnlockForDeoptimization);
+ Local<Function> fun = fun_templ->GetFunction(context).ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context, v8_str("unlock_for_deoptimization"), fun)
+ .FromJust());
+
+ // Optimizes a function f, which will be deoptimized in another
+ // thread.
+ CompileRun(
+ "var b = false; var obj = { x: 1 };"
+ "function f() { g(); return obj.x; }"
+ "function g() { if (b) { unlock_for_deoptimization(); } }"
+ "%NeverOptimizeFunction(g);"
+ "f(); f(); %OptimizeFunctionOnNextCall(f);"
+ "f();");
+
+ // Trigger the unlocking.
+ Local<Value> v = CompileRun("b = true; f();");
+
+ // Once the isolate has been unlocked, the thread will wait for the
+ // other thread to finish its task. Once this happens, this thread
+ // continues with its execution, that is, with the execution of the
+ // function g, which then returns to f. The function f should have
+ // also been deoptimized. If the replacement did not happen on this
+ // thread's stack, then the test will fail here.
+ CHECK(v->IsNumber());
+ CHECK_EQ(1, static_cast<int>(v->NumberValue(context).FromJust()));
+ }
+ isolate->Dispose();
+}
+
+TEST(LazyDeoptimizationMultithreadWithNatives) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ {
+ v8::Locker locker(isolate);
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ const char* trigger_deopt = "%DeoptimizeFunction(f);";
+
+ // We use the isolate to pass arguments to the UnlockForDeoptimization
+ // function. Namely, we pass a pointer to the deoptimizing thread.
+ DeoptimizeCodeThread deoptimize_thread(isolate, context, trigger_deopt);
+ isolate->SetData(0, &deoptimize_thread);
+ bool ready_to_deopt = false;
+ isolate->SetData(1, &ready_to_deopt);
+ v8::Context::Scope context_scope(context);
+
+ // Create the function templace for C++ code that is invoked from
+ // JavaScript code.
+ Local<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate, UnlockForDeoptimizationIfReady);
+ Local<Function> fun = fun_templ->GetFunction(context).ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context, v8_str("unlock_for_deoptimization"), fun)
+ .FromJust());
+
+ // Optimizes a function f, which will be deoptimized in another
+ // thread.
+ CompileRun(
+ "var obj = { x: 1 };"
+ "function f() { g(); return obj.x;}"
+ "function g() { "
+ " unlock_for_deoptimization(); }"
+ "%NeverOptimizeFunction(g);"
+ "f(); f(); %OptimizeFunctionOnNextCall(f);");
+
+ // Trigger the unlocking.
+ ready_to_deopt = true;
+ isolate->SetData(1, &ready_to_deopt);
+ Local<Value> v = CompileRun("f();");
+
+ // Once the isolate has been unlocked, the thread will wait for the
+ // other thread to finish its task. Once this happens, this thread
+ // continues with its execution, that is, with the execution of the
+ // function g, which then returns to f. The function f should have
+ // also been deoptimized. Otherwise, the test will fail here.
+ CHECK(v->IsNumber());
+ CHECK_EQ(1, static_cast<int>(v->NumberValue(context).FromJust()));
+ }
+ isolate->Dispose();
+}
+
+TEST(EagerDeoptimizationMultithread) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ {
+ v8::Locker locker(isolate);
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ const char* trigger_deopt = "f({y: 0, x: 1});";
+
+ // We use the isolate to pass arguments to the UnlockForDeoptimization
+ // function. Namely, we pass a pointer to the deoptimizing thread.
+ DeoptimizeCodeThread deoptimize_thread(isolate, context, trigger_deopt);
+ isolate->SetData(0, &deoptimize_thread);
+ bool ready_to_deopt = false;
+ isolate->SetData(1, &ready_to_deopt);
+ v8::Context::Scope context_scope(context);
+
+ // Create the function templace for C++ code that is invoked from
+ // JavaScript code.
+ Local<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate, UnlockForDeoptimizationIfReady);
+ Local<Function> fun = fun_templ->GetFunction(context).ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context, v8_str("unlock_for_deoptimization"), fun)
+ .FromJust());
+
+ // Optimizes a function f, which will be deoptimized by another thread.
+ CompileRun(
+ "function f(obj) { unlock_for_deoptimization(); return obj.x; }"
+ "f({x: 1}); f({x: 1});"
+ "%OptimizeFunctionOnNextCall(f);"
+ "f({x: 1});");
+
+ // Trigger the unlocking.
+ ready_to_deopt = true;
+ isolate->SetData(1, &ready_to_deopt);
+ Local<Value> v = CompileRun("f({x: 1});");
+
+ // Once the isolate has been unlocked, the thread will wait for the
+ // other thread to finish its task. Once this happens, this thread
+ // continues with its execution, that is, with the execution of the
+ // function g, which then returns to f. The function f should have
+ // also been deoptimized. Otherwise, the test will fail here.
+ CHECK(v->IsNumber());
+ CHECK_EQ(1, static_cast<int>(v->NumberValue(context).FromJust()));
+ }
+ isolate->Dispose();
+}
// Migrating an isolate
class KangarooThread : public v8::base::Thread {
@@ -92,7 +320,7 @@ class KangarooThread : public v8::base::Thread {
private:
v8::Isolate* isolate_;
- Persistent<v8::Context> context_;
+ v8::Persistent<v8::Context> context_;
};
@@ -332,7 +560,7 @@ class LockIsolateAndCalculateFibSharedContextThread : public JoinableThread {
virtual void Run() {
v8::Locker lock(isolate_);
v8::Isolate::Scope isolate_scope(isolate_);
- HandleScope handle_scope(isolate_);
+ v8::HandleScope handle_scope(isolate_);
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate_, context_);
v8::Context::Scope context_scope(context);
@@ -340,7 +568,7 @@ class LockIsolateAndCalculateFibSharedContextThread : public JoinableThread {
}
private:
v8::Isolate* isolate_;
- Persistent<v8::Context> context_;
+ v8::Persistent<v8::Context> context_;
};
class LockerUnlockerThread : public JoinableThread {
@@ -661,12 +889,12 @@ TEST(Regress1433) {
v8::Locker lock(isolate);
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
- v8::Local<Context> context = v8::Context::New(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- v8::Local<String> source = v8_str("1+1");
- v8::Local<Script> script =
+ v8::Local<v8::String> source = v8_str("1+1");
+ v8::Local<v8::Script> script =
v8::Script::Compile(context, source).ToLocalChecked();
- v8::Local<Value> result = script->Run(context).ToLocalChecked();
+ v8::Local<v8::Value> result = script->Run(context).ToLocalChecked();
v8::String::Utf8Value utf8(isolate, result);
}
isolate->Dispose();
@@ -744,3 +972,7 @@ TEST(ExtensionsRegistration) {
}
StartJoinAndDeleteThreads(threads);
}
+
+} // namespace test_lockers
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index 2bbcb7a566..97579674c0 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -34,7 +34,9 @@
#include "src/v8.h"
#include "test/cctest/cctest.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
+namespace test_macro_assembler_arm {
typedef void* (*F)(int x, int y, int p2, int p3, int p4);
@@ -125,8 +127,8 @@ TEST(LoadAndStoreWithRepresentation) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
// Call the function from C++.
F5 f = FUNCTION_CAST<F5>(code->entry());
@@ -235,8 +237,8 @@ TEST(ExtractLane) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -370,8 +372,8 @@ TEST(ReplaceLane) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
@@ -404,3 +406,7 @@ TEST(ReplaceLane) {
}
#undef __
+
+} // namespace test_macro_assembler_arm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index c00b8e65a1..f1a0b2a1a6 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -28,17 +28,18 @@
#include <stdlib.h>
#include <iostream> // NOLINT(readability/streams)
+#include "src/api.h"
#include "src/base/utils/random-number-generator.h"
#include "src/macro-assembler.h"
#include "src/mips/macro-assembler-mips.h"
#include "src/mips/simulator-mips.h"
+#include "src/objects-inl.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
+namespace v8 {
+namespace internal {
-using namespace v8::internal;
-
-typedef void* (*F)(int x, int y, int p2, int p3, int p4);
typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4);
@@ -93,9 +94,9 @@ TEST(BYTESWAP) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
- ::F3 f = FUNCTION_CAST<::F3>(code->entry());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
t.r1 = 0x781A15C3;
t.r2 = 0x2CDE;
t.r3 = 0x9F;
@@ -202,8 +203,8 @@ TEST(jump_tables4) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -269,8 +270,8 @@ TEST(jump_tables5) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -360,8 +361,8 @@ TEST(jump_tables6) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -387,8 +388,8 @@ static uint32_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
CodeDesc desc;
assembler.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F1 f = FUNCTION_CAST<F1>(code->entry());
@@ -517,8 +518,8 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
@@ -622,8 +623,8 @@ static bool runOverflow(IN_TYPE valLeft, IN_TYPE valRight,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
@@ -1051,9 +1052,9 @@ TEST(min_max_nan) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
- ::F3 f = FUNCTION_CAST<::F3>(code->entry());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
@@ -1087,8 +1088,8 @@ bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
@@ -1335,8 +1336,8 @@ bool run_Sltu(uint32_t rs, uint32_t rd, Func GenerateSltuInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
int32_t res = reinterpret_cast<int32_t>(
@@ -1366,7 +1367,7 @@ TEST(Sltu) {
}
template <typename T, typename Inputs, typename Results>
-static ::F4 GenerateMacroFloat32MinMax(MacroAssembler* masm) {
+static F4 GenerateMacroFloat32MinMax(MacroAssembler* masm) {
T a = T::from_code(4); // f4
T b = T::from_code(6); // f6
T c = T::from_code(8); // f8
@@ -1430,13 +1431,13 @@ static ::F4 GenerateMacroFloat32MinMax(MacroAssembler* masm) {
CodeDesc desc;
masm->GetCode(masm->isolate(), &desc);
- Handle<Code> code = masm->isolate()->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ masm->isolate()->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
#endif
- return FUNCTION_CAST<::F4>(code->entry());
+ return FUNCTION_CAST<F4>(code->entry());
}
TEST(macro_float_minmax_f32) {
@@ -1465,7 +1466,7 @@ TEST(macro_float_minmax_f32) {
float max_aba_;
};
- ::F4 f = GenerateMacroFloat32MinMax<FPURegister, Inputs, Results>(masm);
+ F4 f = GenerateMacroFloat32MinMax<FPURegister, Inputs, Results>(masm);
Object* dummy = nullptr;
USE(dummy);
@@ -1509,7 +1510,7 @@ TEST(macro_float_minmax_f32) {
}
template <typename T, typename Inputs, typename Results>
-static ::F4 GenerateMacroFloat64MinMax(MacroAssembler* masm) {
+static F4 GenerateMacroFloat64MinMax(MacroAssembler* masm) {
T a = T::from_code(4); // f4
T b = T::from_code(6); // f6
T c = T::from_code(8); // f8
@@ -1573,13 +1574,13 @@ static ::F4 GenerateMacroFloat64MinMax(MacroAssembler* masm) {
CodeDesc desc;
masm->GetCode(masm->isolate(), &desc);
- Handle<Code> code = masm->isolate()->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ masm->isolate()->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
#endif
- return FUNCTION_CAST<::F4>(code->entry());
+ return FUNCTION_CAST<F4>(code->entry());
}
TEST(macro_float_minmax_f64) {
@@ -1608,7 +1609,7 @@ TEST(macro_float_minmax_f64) {
double max_aba_;
};
- ::F4 f = GenerateMacroFloat64MinMax<DoubleRegister, Inputs, Results>(masm);
+ F4 f = GenerateMacroFloat64MinMax<DoubleRegister, Inputs, Results>(masm);
Object* dummy = nullptr;
USE(dummy);
@@ -1652,3 +1653,6 @@ TEST(macro_float_minmax_f64) {
}
#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index 9432584c4d..58e5b32dd6 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -35,11 +35,12 @@
#include "src/macro-assembler.h"
#include "src/mips64/macro-assembler-mips64.h"
#include "src/mips64/simulator-mips64.h"
+#include "src/objects-inl.h"
+namespace v8 {
+namespace internal {
-using namespace v8::internal;
-
-typedef void* (*F)(int64_t x, int64_t y, int p2, int p3, int p4);
+typedef void* (*FV)(int64_t x, int64_t y, int p2, int p3, int p4);
typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4);
@@ -108,9 +109,9 @@ TEST(BYTESWAP) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
- ::F3 f = FUNCTION_CAST<::F3>(code->entry());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
t.r1 = 0x5612FFCD9D327ACC;
t.r2 = 0x781A15C3;
t.r3 = 0xFCDE;
@@ -160,10 +161,10 @@ TEST(LoadConstants) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- ::F f = FUNCTION_CAST< ::F>(code->entry());
+ FV f = FUNCTION_CAST<FV>(code->entry());
(void)CALL_GENERATED_CODE(isolate, f, reinterpret_cast<int64_t>(result), 0, 0,
0, 0);
// Check results.
@@ -205,10 +206,10 @@ TEST(LoadAddress) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- ::F f = FUNCTION_CAST< ::F>(code->entry());
+ FV f = FUNCTION_CAST<FV>(code->entry());
(void)CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0);
// Check results.
}
@@ -263,8 +264,8 @@ TEST(jump_tables4) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -337,8 +338,8 @@ TEST(jump_tables5) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -429,8 +430,8 @@ TEST(jump_tables6) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -456,8 +457,8 @@ static uint64_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
CodeDesc desc;
assembler.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F1 f = FUNCTION_CAST<F1>(code->entry());
@@ -538,10 +539,10 @@ static uint64_t run_dlsa(uint64_t rt, uint64_t rs, int8_t sa) {
CodeDesc desc;
assembler.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- ::F f = FUNCTION_CAST<::F>(code->entry());
+ FV f = FUNCTION_CAST<FV>(code->entry());
uint64_t res = reinterpret_cast<uint64_t>(
CALL_GENERATED_CODE(isolate, f, rt, rs, 0, 0, 0));
@@ -690,8 +691,8 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
@@ -853,8 +854,8 @@ static bool runOverflow(IN_TYPE valLeft, IN_TYPE valRight,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
@@ -1209,9 +1210,9 @@ TEST(min_max_nan) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
- ::F3 f = FUNCTION_CAST<::F3>(code->entry());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
@@ -1245,8 +1246,8 @@ bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
@@ -1610,8 +1611,8 @@ bool run_Sltu(uint64_t rs, uint64_t rd, Func GenerateSltuInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
int64_t res = reinterpret_cast<int64_t>(
@@ -1641,7 +1642,7 @@ TEST(Sltu) {
}
template <typename T, typename Inputs, typename Results>
-static ::F4 GenerateMacroFloat32MinMax(MacroAssembler* masm) {
+static F4 GenerateMacroFloat32MinMax(MacroAssembler* masm) {
T a = T::from_code(4); // f4
T b = T::from_code(6); // f6
T c = T::from_code(8); // f8
@@ -1705,13 +1706,13 @@ static ::F4 GenerateMacroFloat32MinMax(MacroAssembler* masm) {
CodeDesc desc;
masm->GetCode(masm->isolate(), &desc);
- Handle<Code> code = masm->isolate()->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ masm->isolate()->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
#endif
- return FUNCTION_CAST<::F4>(code->entry());
+ return FUNCTION_CAST<F4>(code->entry());
}
TEST(macro_float_minmax_f32) {
@@ -1740,7 +1741,7 @@ TEST(macro_float_minmax_f32) {
float max_aba_;
};
- ::F4 f = GenerateMacroFloat32MinMax<FPURegister, Inputs, Results>(masm);
+ F4 f = GenerateMacroFloat32MinMax<FPURegister, Inputs, Results>(masm);
Object* dummy = nullptr;
USE(dummy);
@@ -1784,7 +1785,7 @@ TEST(macro_float_minmax_f32) {
}
template <typename T, typename Inputs, typename Results>
-static ::F4 GenerateMacroFloat64MinMax(MacroAssembler* masm) {
+static F4 GenerateMacroFloat64MinMax(MacroAssembler* masm) {
T a = T::from_code(4); // f4
T b = T::from_code(6); // f6
T c = T::from_code(8); // f8
@@ -1848,13 +1849,13 @@ static ::F4 GenerateMacroFloat64MinMax(MacroAssembler* masm) {
CodeDesc desc;
masm->GetCode(masm->isolate(), &desc);
- Handle<Code> code = masm->isolate()->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ masm->isolate()->factory()->NewCode(desc, Code::STUB, Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
#endif
- return FUNCTION_CAST<::F4>(code->entry());
+ return FUNCTION_CAST<F4>(code->entry());
}
TEST(macro_float_minmax_f64) {
@@ -1883,7 +1884,7 @@ TEST(macro_float_minmax_f64) {
double max_aba_;
};
- ::F4 f = GenerateMacroFloat64MinMax<DoubleRegister, Inputs, Results>(masm);
+ F4 f = GenerateMacroFloat64MinMax<DoubleRegister, Inputs, Results>(masm);
Object* dummy = nullptr;
USE(dummy);
@@ -1927,3 +1928,6 @@ TEST(macro_float_minmax_f64) {
}
#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 08839193cb..6da2ee492f 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -35,71 +35,9 @@
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
-namespace i = v8::internal;
-using i::Address;
-using i::Assembler;
-using i::CodeDesc;
-using i::Condition;
-using i::FUNCTION_CAST;
-using i::HandleScope;
-using i::Immediate;
-using i::Isolate;
-using i::Label;
-using i::MacroAssembler;
-using i::Operand;
-using i::RelocInfo;
-using i::Representation;
-using i::Smi;
-using i::SmiIndex;
-using i::byte;
-using i::carry;
-using i::greater;
-using i::greater_equal;
-using i::kIntSize;
-using i::kFloatSize;
-using i::kDoubleSize;
-using i::kPointerSize;
-using i::kSimd128Size;
-using i::kSmiTagMask;
-using i::kSmiValueSize;
-using i::less_equal;
-using i::negative;
-using i::not_carry;
-using i::not_equal;
-using i::equal;
-using i::not_zero;
-using i::positive;
-using i::r11;
-using i::r13;
-using i::r14;
-using i::r15;
-using i::r8;
-using i::r9;
-using i::rax;
-using i::rbp;
-using i::rbx;
-using i::rcx;
-using i::rdi;
-using i::rdx;
-using i::rsi;
-using i::rsp;
-using i::xmm0;
-using i::xmm1;
-using i::xmm2;
-using i::xmm3;
-using i::xmm4;
-using i::xmm5;
-using i::xmm6;
-using i::xmm7;
-using i::xmm8;
-using i::xmm9;
-using i::xmm10;
-using i::xmm11;
-using i::xmm12;
-using i::xmm13;
-using i::xmm14;
-using i::xmm15;
-using i::times_pointer_size;
+namespace v8 {
+namespace internal {
+namespace test_macro_assembler_x64 {
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
@@ -117,15 +55,11 @@ typedef int (*F0)();
static void EntryCode(MacroAssembler* masm) {
// Smi constant register is callee save.
- __ pushq(i::kRootRegister);
+ __ pushq(kRootRegister);
__ InitializeRootRegister();
}
-
-static void ExitCode(MacroAssembler* masm) {
- __ popq(i::kRootRegister);
-}
-
+static void ExitCode(MacroAssembler* masm) { __ popq(kRootRegister); }
TEST(Smi) {
// Check that C++ Smi operations work as expected.
@@ -461,39 +395,6 @@ TEST(SmiCheck) {
cond = masm->CheckSmi(rcx);
__ j(cond, &exit);
- // CheckBothSmi
-
- __ incq(rax);
- __ movq(rcx, Immediate(Smi::kMaxValue));
- __ Integer32ToSmi(rcx, rcx);
- __ movq(rdx, Immediate(Smi::kMinValue));
- __ Integer32ToSmi(rdx, rdx);
- cond = masm->CheckBothSmi(rcx, rdx);
- __ j(NegateCondition(cond), &exit);
-
- __ incq(rax);
- __ xorq(rcx, Immediate(kSmiTagMask));
- cond = masm->CheckBothSmi(rcx, rdx);
- __ j(cond, &exit);
-
- __ incq(rax);
- __ xorq(rdx, Immediate(kSmiTagMask));
- cond = masm->CheckBothSmi(rcx, rdx);
- __ j(cond, &exit);
-
- __ incq(rax);
- __ xorq(rcx, Immediate(kSmiTagMask));
- cond = masm->CheckBothSmi(rcx, rdx);
- __ j(cond, &exit);
-
- __ incq(rax);
- cond = masm->CheckBothSmi(rcx, rcx);
- __ j(NegateCondition(cond), &exit);
-
- __ incq(rax);
- cond = masm->CheckBothSmi(rdx, rdx);
- __ j(cond, &exit);
-
// Success
__ xorq(rax, rax);
@@ -545,9 +446,9 @@ static void SmiAddTest(MacroAssembler* masm,
__ movl(rcx, Immediate(first));
__ Integer32ToSmi(rcx, rcx);
- i::SmiOperationConstraints constraints =
- i::SmiOperationConstraint::kPreserveSourceRegister |
- i::SmiOperationConstraint::kBailoutOnOverflow;
+ SmiOperationConstraints constraints =
+ SmiOperationConstraint::kPreserveSourceRegister |
+ SmiOperationConstraint::kBailoutOnOverflow;
__ incq(rax);
__ SmiAddConstant(r9, rcx, Smi::FromInt(second), constraints, exit);
__ cmpq(r9, r8);
@@ -561,8 +462,8 @@ static void SmiAddTest(MacroAssembler* masm,
__ movl(rcx, Immediate(first));
__ Integer32ToSmi(rcx, rcx);
- constraints = i::SmiOperationConstraint::kPreserveSourceRegister |
- i::SmiOperationConstraint::kBailoutOnNoOverflow;
+ constraints = SmiOperationConstraint::kPreserveSourceRegister |
+ SmiOperationConstraint::kBailoutOnNoOverflow;
Label done;
__ incq(rax);
__ SmiAddConstant(rcx, rcx, Smi::FromInt(second), constraints, &done);
@@ -607,9 +508,9 @@ static void SmiAddOverflowTest(MacroAssembler* masm,
__ j(not_equal, exit);
}
- i::SmiOperationConstraints constraints =
- i::SmiOperationConstraint::kPreserveSourceRegister |
- i::SmiOperationConstraint::kBailoutOnOverflow;
+ SmiOperationConstraints constraints =
+ SmiOperationConstraint::kPreserveSourceRegister |
+ SmiOperationConstraint::kBailoutOnOverflow;
__ movq(rcx, r11);
{
Label overflow_ok;
@@ -669,7 +570,7 @@ static void SmiAddOverflowTest(MacroAssembler* masm,
__ j(not_equal, exit);
}
- constraints = i::SmiOperationConstraint::kBailoutOnOverflow;
+ constraints = SmiOperationConstraint::kBailoutOnOverflow;
{
Label overflow_ok;
__ incq(rax);
@@ -760,9 +661,9 @@ static void SmiSubTest(MacroAssembler* masm,
__ cmpq(rcx, r8);
__ j(not_equal, exit);
- i::SmiOperationConstraints constraints =
- i::SmiOperationConstraint::kPreserveSourceRegister |
- i::SmiOperationConstraint::kBailoutOnOverflow;
+ SmiOperationConstraints constraints =
+ SmiOperationConstraint::kPreserveSourceRegister |
+ SmiOperationConstraint::kBailoutOnOverflow;
__ Move(rcx, Smi::FromInt(first));
__ incq(rax); // Test 4.
__ SmiSubConstant(rcx, rcx, Smi::FromInt(second), constraints, exit);
@@ -775,8 +676,8 @@ static void SmiSubTest(MacroAssembler* masm,
__ cmpq(r9, r8);
__ j(not_equal, exit);
- constraints = i::SmiOperationConstraint::kPreserveSourceRegister |
- i::SmiOperationConstraint::kBailoutOnNoOverflow;
+ constraints = SmiOperationConstraint::kPreserveSourceRegister |
+ SmiOperationConstraint::kBailoutOnNoOverflow;
__ Move(rcx, Smi::FromInt(first));
Label done;
__ incq(rax); // Test 6.
@@ -822,9 +723,9 @@ static void SmiSubOverflowTest(MacroAssembler* masm,
__ j(not_equal, exit);
}
- i::SmiOperationConstraints constraints =
- i::SmiOperationConstraint::kPreserveSourceRegister |
- i::SmiOperationConstraint::kBailoutOnOverflow;
+ SmiOperationConstraints constraints =
+ SmiOperationConstraint::kPreserveSourceRegister |
+ SmiOperationConstraint::kBailoutOnOverflow;
__ movq(rcx, r11);
{
@@ -885,7 +786,7 @@ static void SmiSubOverflowTest(MacroAssembler* masm,
__ j(not_equal, exit);
}
- constraints = i::SmiOperationConstraint::kBailoutOnOverflow;
+ constraints = SmiOperationConstraint::kBailoutOnOverflow;
__ movq(rcx, r11);
{
Label overflow_ok;
@@ -951,7 +852,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
for (int i = 0; i < 8; i++) {
__ Move(rcx, Smi::FromInt(x));
SmiIndex index = masm->SmiToIndex(rdx, rcx, i);
- CHECK(index.reg.is(rcx) || index.reg.is(rdx));
+ CHECK(index.reg == rcx || index.reg == rdx);
__ shlq(index.reg, Immediate(index.scale));
__ Set(r8, static_cast<intptr_t>(x) << i);
__ cmpq(index.reg, r8);
@@ -959,7 +860,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
__ incq(rax);
__ Move(rcx, Smi::FromInt(x));
index = masm->SmiToIndex(rcx, rcx, i);
- CHECK(index.reg.is(rcx));
+ CHECK(index.reg == rcx);
__ shlq(rcx, Immediate(index.scale));
__ Set(r8, static_cast<intptr_t>(x) << i);
__ cmpq(rcx, r8);
@@ -1001,75 +902,6 @@ TEST(SmiIndex) {
CHECK_EQ(0, result);
}
-void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) {
- __ movl(rax, Immediate(id));
- __ Move(rcx, Smi::FromInt(x));
- __ Move(rdx, Smi::FromInt(y));
- __ xorq(rdx, Immediate(kSmiTagMask));
- __ SelectNonSmi(r9, rcx, rdx, exit);
-
- __ incq(rax);
- __ cmpq(r9, rdx);
- __ j(not_equal, exit);
-
- __ incq(rax);
- __ Move(rcx, Smi::FromInt(x));
- __ Move(rdx, Smi::FromInt(y));
- __ xorq(rcx, Immediate(kSmiTagMask));
- __ SelectNonSmi(r9, rcx, rdx, exit);
-
- __ incq(rax);
- __ cmpq(r9, rcx);
- __ j(not_equal, exit);
-
- __ incq(rax);
- Label fail_ok;
- __ Move(rcx, Smi::FromInt(x));
- __ Move(rdx, Smi::FromInt(y));
- __ xorq(rcx, Immediate(kSmiTagMask));
- __ xorq(rdx, Immediate(kSmiTagMask));
- __ SelectNonSmi(r9, rcx, rdx, &fail_ok);
- __ jmp(exit);
- __ bind(&fail_ok);
-}
-
-TEST(SmiSelectNonSmi) {
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize * 2, &actual_size, true));
- CHECK(buffer);
- Isolate* isolate = CcTest::i_isolate();
- HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
- v8::internal::CodeObjectRequired::kYes);
-
- MacroAssembler* masm = &assembler;
- EntryCode(masm);
- Label exit;
-
- TestSelectNonSmi(masm, &exit, 0x10, 0, 0);
- TestSelectNonSmi(masm, &exit, 0x20, 0, 1);
- TestSelectNonSmi(masm, &exit, 0x30, 1, 0);
- TestSelectNonSmi(masm, &exit, 0x40, 0, -1);
- TestSelectNonSmi(masm, &exit, 0x50, -1, 0);
- TestSelectNonSmi(masm, &exit, 0x60, -1, -1);
- TestSelectNonSmi(masm, &exit, 0x70, 1, 1);
- TestSelectNonSmi(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue);
- TestSelectNonSmi(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue);
-
- __ xorq(rax, rax); // Success.
- __ bind(&exit);
- ExitCode(masm);
- __ ret(0);
-
- CodeDesc desc;
- masm->GetCode(isolate, &desc);
- // Call the function from C++.
- int result = FUNCTION_CAST<F0>(buffer)();
- CHECK_EQ(0, result);
-}
-
void TestPositiveSmiPowerUp(MacroAssembler* masm, Label* exit, int id, int x) {
CHECK(x >= 0);
int powers[] = { 0, 1, 2, 3, 8, 16, 24, 31 };
@@ -1791,3 +1623,7 @@ TEST(SIMDMacros) {
}
#undef __
+
+} // namespace test_macro_assembler_x64
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-managed.cc b/deps/v8/test/cctest/test-managed.cc
index bfba366663..11eb7bad9a 100644
--- a/deps/v8/test/cctest/test-managed.cc
+++ b/deps/v8/test/cctest/test-managed.cc
@@ -38,7 +38,7 @@ TEST(ManagedCollect) {
isolate->RegisterForReleaseAtTeardown(&finalizer);
{
HandleScope scope(isolate);
- auto handle = Managed<DeleteRecorder>::New(isolate, d1);
+ auto handle = Managed<DeleteRecorder>::From(isolate, d1);
USE(handle);
}
@@ -65,7 +65,7 @@ TEST(DisposeCollect) {
DeleteRecorder* d2 = new DeleteRecorder(&deleted2);
{
HandleScope scope(i_isolate);
- auto handle = Managed<DeleteRecorder>::New(i_isolate, d1);
+ auto handle = Managed<DeleteRecorder>::From(i_isolate, d1);
USE(handle);
}
Isolate::ManagedObjectFinalizer finalizer(d2, DeleteRecorder::Deleter);
diff --git a/deps/v8/test/cctest/test-mementos.cc b/deps/v8/test/cctest/test-mementos.cc
index d60dc23c8c..d5a9a9c4cd 100644
--- a/deps/v8/test/cctest/test-mementos.cc
+++ b/deps/v8/test/cctest/test-mementos.cc
@@ -28,13 +28,7 @@
#include "src/factory.h"
#include "src/heap/heap.h"
#include "src/isolate.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/factory.h -> src/objects-inl.h
#include "src/objects-inl.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/feedback-vector.h ->
-// src/feedback-vector-inl.h
-#include "src/feedback-vector-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-object.cc b/deps/v8/test/cctest/test-object.cc
index 0efc67e9e6..088803ec36 100644
--- a/deps/v8/test/cctest/test-object.cc
+++ b/deps/v8/test/cctest/test-object.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/api.h"
#include "src/factory.h"
#include "src/handles-inl.h"
#include "src/handles.h"
@@ -60,8 +61,8 @@ TEST(NoSideEffectsToString) {
CheckObject(isolate, factory->null_value(), "null");
CheckObject(isolate, factory->error_to_string(), "[object Error]");
- CheckObject(isolate, factory->stack_trace_symbol(),
- "Symbol(stack_trace_symbol)");
+ CheckObject(isolate, factory->unscopables_symbol(),
+ "Symbol(Symbol.unscopables)");
CheckObject(isolate, factory->NewError(isolate->error_function(),
factory->empty_string()),
"Error");
@@ -73,5 +74,179 @@ TEST(NoSideEffectsToString) {
"#<Object>");
}
+TEST(EnumCache) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ i::Factory* factory = CcTest::i_isolate()->factory();
+ v8::HandleScope scope(isolate);
+
+ // Create a nice transition tree:
+ // (a) --> (b) --> (c) shared DescriptorArray 1
+ // |
+ // +---> (cc) shared DescriptorArray 2
+ CompileRun(
+ "function O(a) { this.a = 1 };"
+
+ "a = new O();"
+
+ "b = new O();"
+ "b.b = 2;"
+
+ "c = new O();"
+ "c.b = 2;"
+ "c.c = 3;"
+
+ "cc = new O();"
+ "cc.b = 2;"
+ "cc.cc = 4;");
+
+ Handle<JSObject> a = Handle<JSObject>::cast(v8::Utils::OpenHandle(
+ *env->Global()->Get(env.local(), v8_str("a")).ToLocalChecked()));
+ Handle<JSObject> b = Handle<JSObject>::cast(v8::Utils::OpenHandle(
+ *env->Global()->Get(env.local(), v8_str("b")).ToLocalChecked()));
+ Handle<JSObject> c = Handle<JSObject>::cast(v8::Utils::OpenHandle(
+ *env->Global()->Get(env.local(), v8_str("c")).ToLocalChecked()));
+ Handle<JSObject> cc = Handle<JSObject>::cast(v8::Utils::OpenHandle(
+ *env->Global()->Get(env.local(), v8_str("cc")).ToLocalChecked()));
+
+ // Check the transition tree.
+ CHECK_EQ(a->map()->instance_descriptors(), b->map()->instance_descriptors());
+ CHECK_EQ(b->map()->instance_descriptors(), c->map()->instance_descriptors());
+ CHECK_NE(c->map()->instance_descriptors(), cc->map()->instance_descriptors());
+ CHECK_NE(b->map()->instance_descriptors(), cc->map()->instance_descriptors());
+
+ // Check that the EnumLength is unset.
+ CHECK_EQ(a->map()->EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(b->map()->EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(c->map()->EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(cc->map()->EnumLength(), kInvalidEnumCacheSentinel);
+
+ // Check that the EnumCache is empty.
+ CHECK_EQ(a->map()->instance_descriptors()->GetEnumCache(),
+ *factory->empty_enum_cache());
+ CHECK_EQ(b->map()->instance_descriptors()->GetEnumCache(),
+ *factory->empty_enum_cache());
+ CHECK_EQ(c->map()->instance_descriptors()->GetEnumCache(),
+ *factory->empty_enum_cache());
+ CHECK_EQ(cc->map()->instance_descriptors()->GetEnumCache(),
+ *factory->empty_enum_cache());
+
+ // The EnumCache is shared on the DescriptorArray, creating it on {cc} has no
+ // effect on the other maps.
+ CompileRun("var s = 0; for (let key in cc) { s += cc[key] };");
+ {
+ CHECK_EQ(a->map()->EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(b->map()->EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(c->map()->EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(cc->map()->EnumLength(), 3);
+
+ CHECK_EQ(a->map()->instance_descriptors()->GetEnumCache(),
+ *factory->empty_enum_cache());
+ CHECK_EQ(b->map()->instance_descriptors()->GetEnumCache(),
+ *factory->empty_enum_cache());
+ CHECK_EQ(c->map()->instance_descriptors()->GetEnumCache(),
+ *factory->empty_enum_cache());
+
+ EnumCache* enum_cache = cc->map()->instance_descriptors()->GetEnumCache();
+ CHECK_NE(enum_cache, *factory->empty_enum_cache());
+ CHECK_EQ(enum_cache->keys()->length(), 3);
+ CHECK_EQ(enum_cache->indices()->length(), 3);
+ }
+
+ // Initializing the EnumCache for the the topmost map {a} will not create the
+ // cache for the other maps.
+ CompileRun("var s = 0; for (let key in a) { s += a[key] };");
+ {
+ CHECK_EQ(a->map()->EnumLength(), 1);
+ CHECK_EQ(b->map()->EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(c->map()->EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(cc->map()->EnumLength(), 3);
+
+ // The enum cache is shared on the descriptor array of maps {a}, {b} and
+ // {c} only.
+ EnumCache* enum_cache = a->map()->instance_descriptors()->GetEnumCache();
+ CHECK_NE(enum_cache, *factory->empty_enum_cache());
+ CHECK_NE(cc->map()->instance_descriptors()->GetEnumCache(),
+ *factory->empty_enum_cache());
+ CHECK_NE(cc->map()->instance_descriptors()->GetEnumCache(), enum_cache);
+ CHECK_EQ(a->map()->instance_descriptors()->GetEnumCache(), enum_cache);
+ CHECK_EQ(b->map()->instance_descriptors()->GetEnumCache(), enum_cache);
+ CHECK_EQ(c->map()->instance_descriptors()->GetEnumCache(), enum_cache);
+
+ CHECK_EQ(enum_cache->keys()->length(), 1);
+ CHECK_EQ(enum_cache->indices()->length(), 1);
+ }
+
+ // Creating the EnumCache for {c} will create a new EnumCache on the shared
+ // DescriptorArray.
+ Handle<EnumCache> previous_enum_cache(
+ a->map()->instance_descriptors()->GetEnumCache());
+ Handle<FixedArray> previous_keys(previous_enum_cache->keys());
+ Handle<FixedArray> previous_indices(previous_enum_cache->indices());
+ CompileRun("var s = 0; for (let key in c) { s += c[key] };");
+ {
+ CHECK_EQ(a->map()->EnumLength(), 1);
+ CHECK_EQ(b->map()->EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(c->map()->EnumLength(), 3);
+ CHECK_EQ(cc->map()->EnumLength(), 3);
+
+ EnumCache* enum_cache = c->map()->instance_descriptors()->GetEnumCache();
+ CHECK_NE(enum_cache, *factory->empty_enum_cache());
+ // The keys and indices caches are updated.
+ CHECK_EQ(enum_cache, *previous_enum_cache);
+ CHECK_NE(enum_cache->keys(), *previous_keys);
+ CHECK_NE(enum_cache->indices(), *previous_indices);
+ CHECK_EQ(previous_keys->length(), 1);
+ CHECK_EQ(previous_indices->length(), 1);
+ CHECK_EQ(enum_cache->keys()->length(), 3);
+ CHECK_EQ(enum_cache->indices()->length(), 3);
+
+ // The enum cache is shared on the descriptor array of maps {a}, {b} and
+ // {c} only.
+ CHECK_NE(cc->map()->instance_descriptors()->GetEnumCache(),
+ *factory->empty_enum_cache());
+ CHECK_NE(cc->map()->instance_descriptors()->GetEnumCache(), enum_cache);
+ CHECK_NE(cc->map()->instance_descriptors()->GetEnumCache(),
+ *previous_enum_cache);
+ CHECK_EQ(a->map()->instance_descriptors()->GetEnumCache(), enum_cache);
+ CHECK_EQ(b->map()->instance_descriptors()->GetEnumCache(), enum_cache);
+ CHECK_EQ(c->map()->instance_descriptors()->GetEnumCache(), enum_cache);
+ }
+
+ // {b} can reuse the existing EnumCache, hence we only need to set the correct
+ // EnumLength on the map without modifying the cache itself.
+ previous_enum_cache =
+ handle(a->map()->instance_descriptors()->GetEnumCache());
+ previous_keys = handle(previous_enum_cache->keys());
+ previous_indices = handle(previous_enum_cache->indices());
+ CompileRun("var s = 0; for (let key in b) { s += b[key] };");
+ {
+ CHECK_EQ(a->map()->EnumLength(), 1);
+ CHECK_EQ(b->map()->EnumLength(), 2);
+ CHECK_EQ(c->map()->EnumLength(), 3);
+ CHECK_EQ(cc->map()->EnumLength(), 3);
+
+ EnumCache* enum_cache = c->map()->instance_descriptors()->GetEnumCache();
+ CHECK_NE(enum_cache, *factory->empty_enum_cache());
+ // The keys and indices caches are not updated.
+ CHECK_EQ(enum_cache, *previous_enum_cache);
+ CHECK_EQ(enum_cache->keys(), *previous_keys);
+ CHECK_EQ(enum_cache->indices(), *previous_indices);
+ CHECK_EQ(enum_cache->keys()->length(), 3);
+ CHECK_EQ(enum_cache->indices()->length(), 3);
+
+ // The enum cache is shared on the descriptor array of maps {a}, {b} and
+ // {c} only.
+ CHECK_NE(cc->map()->instance_descriptors()->GetEnumCache(),
+ *factory->empty_enum_cache());
+ CHECK_NE(cc->map()->instance_descriptors()->GetEnumCache(), enum_cache);
+ CHECK_NE(cc->map()->instance_descriptors()->GetEnumCache(),
+ *previous_enum_cache);
+ CHECK_EQ(a->map()->instance_descriptors()->GetEnumCache(), enum_cache);
+ CHECK_EQ(b->map()->instance_descriptors()->GetEnumCache(), enum_cache);
+ CHECK_EQ(c->map()->instance_descriptors()->GetEnumCache(), enum_cache);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-orderedhashtable.cc b/deps/v8/test/cctest/test-orderedhashtable.cc
index 6e6fe5348a..d4809f4562 100644
--- a/deps/v8/test/cctest/test-orderedhashtable.cc
+++ b/deps/v8/test/cctest/test-orderedhashtable.cc
@@ -9,6 +9,7 @@
namespace v8 {
namespace internal {
+namespace test_orderedhashtable {
static Isolate* GetIsolateFrom(LocalContext* context) {
return reinterpret_cast<Isolate*>((*context)->GetIsolate());
@@ -896,5 +897,6 @@ TEST(OrderedHashSetDuplicateHashCodeDeletion) {
CHECK(!OrderedHashSet::HasKey(isolate, *set, *key2));
}
+} // namespace test_orderedhashtable
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 8c7d168778..e604040ffd 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -33,6 +33,7 @@
#include "src/v8.h"
+#include "src/api.h"
#include "src/ast/ast-numbering.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
@@ -56,6 +57,21 @@
#include "test/cctest/scope-test-helper.h"
#include "test/cctest/unicode-helpers.h"
+namespace v8 {
+namespace internal {
+namespace test_parsing {
+
+namespace {
+
+int* global_use_counts = NULL;
+
+void MockUseCounterCallback(v8::Isolate* isolate,
+ v8::Isolate::UseCounterFeature feature) {
+ ++global_use_counts[feature];
+}
+
+} // namespace
+
TEST(ScanKeywords) {
struct KeywordToken {
const char* keyword;
@@ -77,7 +93,7 @@ TEST(ScanKeywords) {
CHECK(static_cast<int>(sizeof(buffer)) >= length);
{
auto stream = i::ScannerStream::ForTesting(keyword, length);
- i::Scanner scanner(&unicode_cache);
+ i::Scanner scanner(&unicode_cache, global_use_counts);
scanner.Initialize(stream.get(), false);
CHECK_EQ(key_token.token, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -85,7 +101,7 @@ TEST(ScanKeywords) {
// Removing characters will make keyword matching fail.
{
auto stream = i::ScannerStream::ForTesting(keyword, length - 1);
- i::Scanner scanner(&unicode_cache);
+ i::Scanner scanner(&unicode_cache, global_use_counts);
scanner.Initialize(stream.get(), false);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -96,7 +112,7 @@ TEST(ScanKeywords) {
i::MemMove(buffer, keyword, length);
buffer[length] = chars_to_append[j];
auto stream = i::ScannerStream::ForTesting(buffer, length + 1);
- i::Scanner scanner(&unicode_cache);
+ i::Scanner scanner(&unicode_cache, global_use_counts);
scanner.Initialize(stream.get(), false);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -106,7 +122,7 @@ TEST(ScanKeywords) {
i::MemMove(buffer, keyword, length);
buffer[length - 1] = '_';
auto stream = i::ScannerStream::ForTesting(buffer, length);
- i::Scanner scanner(&unicode_cache);
+ i::Scanner scanner(&unicode_cache, global_use_counts);
scanner.Initialize(stream.get(), false);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -172,7 +188,7 @@ TEST(ScanHTMLEndComments) {
for (int i = 0; tests[i]; i++) {
const char* source = tests[i];
auto stream = i::ScannerStream::ForTesting(source);
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), global_use_counts);
scanner.Initialize(stream.get(), false);
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
@@ -191,7 +207,7 @@ TEST(ScanHTMLEndComments) {
for (int i = 0; fail_tests[i]; i++) {
const char* source = fail_tests[i];
auto stream = i::ScannerStream::ForTesting(source);
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), global_use_counts);
scanner.Initialize(stream.get(), false);
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
@@ -216,7 +232,7 @@ TEST(ScanHtmlComments) {
// Disallow HTML comments.
{
auto stream = i::ScannerStream::ForTesting(src);
- i::Scanner scanner(&unicode_cache);
+ i::Scanner scanner(&unicode_cache, global_use_counts);
scanner.Initialize(stream.get(), true);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::ILLEGAL, scanner.Next());
@@ -225,7 +241,7 @@ TEST(ScanHtmlComments) {
// Skip HTML comments:
{
auto stream = i::ScannerStream::ForTesting(src);
- i::Scanner scanner(&unicode_cache);
+ i::Scanner scanner(&unicode_cache, global_use_counts);
scanner.Initialize(stream.get(), false);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -384,7 +400,7 @@ TEST(StandAlonePreParser) {
uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
auto stream = i::ScannerStream::ForTesting(programs[i]);
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), global_use_counts);
scanner.Initialize(stream.get(), false);
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
@@ -420,7 +436,7 @@ TEST(StandAlonePreParserNoNatives) {
uintptr_t stack_limit = isolate->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
auto stream = i::ScannerStream::ForTesting(programs[i]);
- i::Scanner scanner(isolate->unicode_cache());
+ i::Scanner scanner(isolate->unicode_cache(), global_use_counts);
scanner.Initialize(stream.get(), false);
// Preparser defaults to disallowing natives syntax.
@@ -490,7 +506,7 @@ TEST(RegressChromium62639) {
// failed in debug mode, and sometimes crashed in release mode.
auto stream = i::ScannerStream::ForTesting(program);
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), global_use_counts);
scanner.Initialize(stream.get(), false);
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
@@ -565,7 +581,7 @@ TEST(PreParseOverflow) {
uintptr_t stack_limit = isolate->stack_guard()->real_climit();
auto stream = i::ScannerStream::ForTesting(program.get(), kProgramSize);
- i::Scanner scanner(isolate->unicode_cache());
+ i::Scanner scanner(isolate->unicode_cache(), global_use_counts);
scanner.Initialize(stream.get(), false);
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
@@ -585,7 +601,7 @@ void TestStreamScanner(i::Utf16CharacterStream* stream,
i::Token::Value* expected_tokens,
int skip_pos = 0, // Zero means not skipping.
int skip_to = 0) {
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), global_use_counts);
scanner.Initialize(stream, false);
int i = 0;
@@ -663,7 +679,7 @@ TEST(StreamScanner) {
void TestScanRegExp(const char* re_source, const char* expected) {
auto stream = i::ScannerStream::ForTesting(re_source);
i::HandleScope scope(CcTest::i_isolate());
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), global_use_counts);
scanner.Initialize(stream.get(), false);
i::Token::Value start = scanner.peek();
@@ -853,8 +869,14 @@ TEST(ScopeUsesArgumentsSuperThis) {
!scope->AsDeclarationScope()->is_arrow_scope()) {
CHECK_NOT_NULL(scope->AsDeclarationScope()->arguments());
}
- CHECK_EQ((source_data[i].expected & SUPER_PROPERTY) != 0,
- scope->AsDeclarationScope()->uses_super_property());
+ if (IsClassConstructor(scope->AsDeclarationScope()->function_kind())) {
+ CHECK_EQ((source_data[i].expected & SUPER_PROPERTY) != 0 ||
+ (source_data[i].expected & EVAL) != 0,
+ scope->AsDeclarationScope()->NeedsHomeObject());
+ } else {
+ CHECK_EQ((source_data[i].expected & SUPER_PROPERTY) != 0,
+ scope->AsDeclarationScope()->NeedsHomeObject());
+ }
if ((source_data[i].expected & THIS) != 0) {
// Currently the is_used() flag is conservative; all variables in a
// script scope are marked as used.
@@ -1275,6 +1297,7 @@ enum ParserFlag {
kAllowHarmonyDynamicImport,
kAllowHarmonyAsyncIteration,
kAllowHarmonyTemplateEscapes,
+ kAllowHarmonyImportMeta,
};
enum ParserSyncTestResult {
@@ -1292,6 +1315,7 @@ void SetGlobalFlags(i::EnumSet<ParserFlag> flags) {
i::FLAG_harmony_object_rest_spread =
flags.Contains(kAllowHarmonyObjectRestSpread);
i::FLAG_harmony_dynamic_import = flags.Contains(kAllowHarmonyDynamicImport);
+ i::FLAG_harmony_import_meta = flags.Contains(kAllowHarmonyImportMeta);
i::FLAG_harmony_async_iteration = flags.Contains(kAllowHarmonyAsyncIteration);
i::FLAG_harmony_template_escapes =
flags.Contains(kAllowHarmonyTemplateEscapes);
@@ -1309,6 +1333,8 @@ void SetParserFlags(i::PreParser* parser, i::EnumSet<ParserFlag> flags) {
flags.Contains(kAllowHarmonyObjectRestSpread));
parser->set_allow_harmony_dynamic_import(
flags.Contains(kAllowHarmonyDynamicImport));
+ parser->set_allow_harmony_import_meta(
+ flags.Contains(kAllowHarmonyImportMeta));
parser->set_allow_harmony_async_iteration(
flags.Contains(kAllowHarmonyAsyncIteration));
parser->set_allow_harmony_template_escapes(
@@ -1328,7 +1354,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
// Preparse the data.
i::PendingCompilationErrorHandler pending_error_handler;
if (test_preparser) {
- i::Scanner scanner(isolate->unicode_cache());
+ i::Scanner scanner(isolate->unicode_cache(), global_use_counts);
std::unique_ptr<i::Utf16CharacterStream> stream(
i::ScannerStream::For(source));
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
@@ -3958,16 +3984,6 @@ TEST(AsmModuleFlag) {
CHECK(s->IsAsmModule() && s->AsDeclarationScope()->asm_module());
}
-namespace {
-
-int* global_use_counts = NULL;
-
-void MockUseCounterCallback(v8::Isolate* isolate,
- v8::Isolate::UseCounterFeature feature) {
- ++global_use_counts[feature];
-}
-
-} // namespace
TEST(UseAsmUseCount) {
i::Isolate* isolate = CcTest::i_isolate();
@@ -4027,6 +4043,34 @@ TEST(BothModesUseCount) {
CHECK_LT(0, use_counts[v8::Isolate::kStrictMode]);
}
+TEST(LineOrParagraphSeparatorAsLineTerminator) {
+ // Tests that both preparsing and parsing accept U+2028 LINE SEPARATOR and
+ // U+2029 PARAGRAPH SEPARATOR as LineTerminator symbols.
+ const char* context_data[][2] = {{"", ""}, {nullptr, nullptr}};
+ const char* statement_data[] = {"\x31\xE2\x80\xA8\x32", // "1<U+2028>2"
+ "\x31\xE2\x80\xA9\x32", // "1<U+2029>2"
+ nullptr};
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+TEST(LineOrParagraphSeparatorAsLineTerminatorUseCount) {
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+ LocalContext env;
+ int use_counts[v8::Isolate::kUseCounterFeatureCount] = {};
+ global_use_counts = use_counts;
+ CcTest::isolate()->SetUseCounterCallback(MockUseCounterCallback);
+ CompileRun("");
+ CHECK_EQ(0, use_counts[v8::Isolate::UseCounterFeature::
+ kLineOrParagraphSeparatorAsLineTerminator]);
+ CompileRun("// Foo\xE2\x80\xA8"); // "// Foo<U+2028>"
+ CHECK_LT(0, use_counts[v8::Isolate::UseCounterFeature::
+ kLineOrParagraphSeparatorAsLineTerminator]);
+ CompileRun("// Foo\xE2\x80\xA9"); // "// Foo<U+2029>"
+ CHECK_LT(1, use_counts[v8::Isolate::UseCounterFeature::
+ kLineOrParagraphSeparatorAsLineTerminator]);
+}
TEST(ErrorsArrowFormalParameters) {
const char* context_data[][2] = {
@@ -7873,6 +7917,9 @@ TEST(DestructuringAssignmentNegativeTests) {
"{ new.target }",
"{ x: new.target }",
"{ x: new.target = 1 }",
+ "{ import.meta }",
+ "{ x: import.meta }",
+ "{ x: import.meta = 1 }",
"[x--]",
"[--x = 1]",
"[x()]",
@@ -7880,6 +7927,8 @@ TEST(DestructuringAssignmentNegativeTests) {
"[this = 1]",
"[new.target]",
"[new.target = 1]",
+ "[import.meta]",
+ "[import.meta = 1]",
"[super]",
"[super = 1]",
"[function f() {}]",
@@ -8280,6 +8329,106 @@ TEST(NewTarget) {
RunParserSyncTest(bad_context_data, data, kError);
}
+TEST(ImportMetaSuccess) {
+ // clang-format off
+ const char* context_data[][2] = {
+ {"", ""},
+ {"'use strict';", ""},
+ {"function f() {", "}"},
+ {"'use strict'; function f() {", "}"},
+ {"var f = function() {", "}"},
+ {"'use strict'; var f = function() {", "}"},
+ {"({m: function() {", "}})"},
+ {"'use strict'; ({m: function() {", "}})"},
+ {"({m() {", "}})"},
+ {"'use strict'; ({m() {", "}})"},
+ {"({get x() {", "}})"},
+ {"'use strict'; ({get x() {", "}})"},
+ {"({set x(_) {", "}})"},
+ {"'use strict'; ({set x(_) {", "}})"},
+ {"class C {m() {", "}}"},
+ {"class C {get x() {", "}}"},
+ {"class C {set x(_) {", "}}"},
+ {NULL}
+ };
+
+ const char* data[] = {
+ "import.meta",
+ "() => { import.meta }",
+ "() => import.meta",
+ "if (1) { import.meta }",
+ "if (1) {} else { import.meta }",
+ "while (0) { import.meta }",
+ "do { import.meta } while (0)",
+ "import.meta.url",
+ "import.meta[0]",
+ "import.meta.couldBeMutable = true",
+ "import.meta()",
+ "new import.meta.MagicClass",
+ "new import.meta",
+ "t = [...import.meta]",
+ "f = {...import.meta}",
+ "delete import.meta",
+ NULL
+ };
+
+ // clang-format on
+
+ // Making sure the same *wouldn't* parse without the flags
+ RunModuleParserSyncTest(context_data, data, kError, NULL, 0, NULL, 0, NULL, 0,
+ true, true);
+
+ static const ParserFlag flags[] = {
+ kAllowHarmonyImportMeta, kAllowHarmonyDynamicImport,
+ kAllowHarmonyObjectRestSpread,
+ };
+ // 2.1.1 Static Semantics: Early Errors
+ // ImportMeta
+ // * It is an early Syntax Error if Module is not the syntactic goal symbol.
+ RunParserSyncTest(context_data, data, kError, NULL, 0, flags,
+ arraysize(flags));
+ // Making sure the same wouldn't parse without the flags either
+ RunParserSyncTest(context_data, data, kError);
+
+ RunModuleParserSyncTest(context_data, data, kSuccess, NULL, 0, flags,
+ arraysize(flags));
+}
+
+TEST(ImportMetaFailure) {
+ // clang-format off
+ const char* context_data[][2] = {
+ {"var ", ""},
+ {"let ", ""},
+ {"const ", ""},
+ {"var [", "] = [1]"},
+ {"([", "] = [1])"},
+ {"({", "} = {1})"},
+ {"var {", " = 1} = 1"},
+ {"for (var ", " of [1]) {}"},
+ {NULL}
+ };
+
+ const char* data[] = {
+ "import.meta",
+ NULL
+ };
+
+ // clang-format on
+
+ static const ParserFlag flags[] = {
+ kAllowHarmonyImportMeta, kAllowHarmonyDynamicImport,
+ kAllowHarmonyObjectRestSpread,
+ };
+
+ RunParserSyncTest(context_data, data, kError, NULL, 0, flags,
+ arraysize(flags));
+ RunModuleParserSyncTest(context_data, data, kError, NULL, 0, flags,
+ arraysize(flags));
+
+ RunModuleParserSyncTest(context_data, data, kError, NULL, 0, NULL, 0, NULL, 0,
+ true, true);
+ RunParserSyncTest(context_data, data, kError);
+}
TEST(ConstSloppy) {
// clang-format off
@@ -10404,3 +10553,7 @@ TEST(LexicalLoopVariable) {
});
}
}
+
+} // namespace test_parsing
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-platform-linux.cc b/deps/v8/test/cctest/test-platform-linux.cc
index c358227397..d41222e2a5 100644
--- a/deps/v8/test/cctest/test-platform-linux.cc
+++ b/deps/v8/test/cctest/test-platform-linux.cc
@@ -25,32 +25,28 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Tests of the TokenLock class from lock.h
-
-#include <pthread.h>
-#include <stdlib.h>
-#include <unistd.h> // for usleep()
-
-#include "src/v8.h"
#include "src/base/platform/platform.h"
#include "test/cctest/cctest.h"
+using OS = v8::base::OS;
+
namespace v8 {
namespace internal {
-TEST(VirtualMemory) {
- v8::base::VirtualMemory* vm =
- new v8::base::VirtualMemory(1 * MB, v8::base::OS::GetRandomMmapAddr());
- CHECK(vm->IsReserved());
- void* block_addr = vm->address();
+TEST(OSReserveMemory) {
+ size_t mem_size = 0;
+ void* mem_addr = OS::ReserveAlignedRegion(1 * MB, OS::AllocateAlignment(),
+ GetRandomMmapAddr(), &mem_size);
+ CHECK_NE(0, mem_size);
+ CHECK_NOT_NULL(mem_addr);
size_t block_size = 4 * KB;
- CHECK(vm->Commit(block_addr, block_size, false));
+ CHECK(OS::CommitRegion(mem_addr, block_size, false));
// Check whether we can write to memory.
- int* addr = static_cast<int*>(block_addr);
- addr[KB-1] = 2;
- CHECK(vm->Uncommit(block_addr, block_size));
- delete vm;
+ int* addr = static_cast<int*>(mem_addr);
+ addr[KB - 1] = 2;
+ CHECK(OS::UncommitRegion(mem_addr, block_size));
+ OS::ReleaseRegion(mem_addr, mem_size);
}
} // namespace internal
diff --git a/deps/v8/test/cctest/test-platform-win32.cc b/deps/v8/test/cctest/test-platform-win32.cc
index 484b187e21..d41222e2a5 100644
--- a/deps/v8/test/cctest/test-platform-win32.cc
+++ b/deps/v8/test/cctest/test-platform-win32.cc
@@ -25,29 +25,29 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Tests of the TokenLock class from lock.h
-
-#include <stdlib.h>
-
-#include "src/v8.h"
#include "src/base/platform/platform.h"
-#include "src/base/win32-headers.h"
#include "test/cctest/cctest.h"
-using namespace ::v8::internal;
+using OS = v8::base::OS;
+namespace v8 {
+namespace internal {
-TEST(VirtualMemory) {
- v8::base::VirtualMemory* vm =
- new v8::base::VirtualMemory(1 * MB, v8::base::OS::GetRandomMmapAddr());
- CHECK(vm->IsReserved());
- void* block_addr = vm->address();
+TEST(OSReserveMemory) {
+ size_t mem_size = 0;
+ void* mem_addr = OS::ReserveAlignedRegion(1 * MB, OS::AllocateAlignment(),
+ GetRandomMmapAddr(), &mem_size);
+ CHECK_NE(0, mem_size);
+ CHECK_NOT_NULL(mem_addr);
size_t block_size = 4 * KB;
- CHECK(vm->Commit(block_addr, block_size, false));
+ CHECK(OS::CommitRegion(mem_addr, block_size, false));
// Check whether we can write to memory.
- int* addr = static_cast<int*>(block_addr);
- addr[KB-1] = 2;
- CHECK(vm->Uncommit(block_addr, block_size));
- delete vm;
+ int* addr = static_cast<int*>(mem_addr);
+ addr[KB - 1] = 2;
+ CHECK(OS::UncommitRegion(mem_addr, block_size));
+ OS::ReleaseRegion(mem_addr, mem_size);
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 67d2893024..6a16cca906 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -36,17 +36,9 @@
#include "test/cctest/cctest.h"
#include "test/cctest/profiler-extension.h"
-using i::CodeEntry;
-using i::CodeMap;
-using i::CpuProfile;
-using i::CpuProfiler;
-using i::CpuProfilesCollection;
-using i::ProfileNode;
-using i::ProfileTree;
-using i::ProfileGenerator;
-using i::TickSample;
-using i::Vector;
-
+namespace v8 {
+namespace internal {
+namespace test_profile_generator {
TEST(ProfileNodeFindOrAddChild) {
CcTest::InitializeVM();
@@ -733,3 +725,7 @@ TEST(BailoutReason) {
CHECK(const_cast<v8::CpuProfileNode*>(current));
CHECK(!strcmp("Optimization disabled for test", current->GetBailoutReason()));
}
+
+} // namespace test_profile_generator
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 0da513d4af..5d28a577ff 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -32,6 +32,7 @@
#include "include/v8.h"
#include "src/v8.h"
+#include "src/api.h"
#include "src/ast/ast.h"
#include "src/char-predicates-inl.h"
#include "src/objects-inl.h"
@@ -42,6 +43,8 @@
#include "src/regexp/regexp-parser.h"
#include "src/splay-tree-inl.h"
#include "src/string-stream.h"
+#include "src/unicode-inl.h"
+
#ifdef V8_INTERPRETED_REGEXP
#include "src/regexp/interpreter-irregexp.h"
#else // V8_INTERPRETED_REGEXP
@@ -91,6 +94,7 @@
namespace v8 {
namespace internal {
+namespace test_regexp {
static bool CheckParse(const char* input) {
v8::HandleScope scope(CcTest::isolate());
@@ -2033,5 +2037,6 @@ TEST(UncachedExternalString) {
ExpectString("external.substring(1).match(re)[1]", "z");
}
+} // namespace test_regexp
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc b/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
index accbf88e51..fe01199f5f 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
@@ -15,15 +15,15 @@
#include "test/cctest/compiler/c-signature.h"
#include "test/cctest/compiler/call-tester.h"
-using namespace v8::base;
-using namespace v8::internal;
-using namespace v8::internal::compiler;
+namespace v8 {
+namespace internal {
+namespace wasm {
#define __ assm.
static int32_t DummyStaticFunction(Object* result) { return 1; }
-TEST(WasmRelocationArmMemoryReference) {
+TEST(WasmRelocationArmContextReference) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::internal::byte buffer[4096];
@@ -32,16 +32,16 @@ TEST(WasmRelocationArmMemoryReference) {
Assembler assm(isolate, buffer, sizeof buffer);
- __ mov(r0, Operand(imm, RelocInfo::WASM_MEMORY_REFERENCE));
+ __ mov(r0, Operand(imm, RelocInfo::WASM_CONTEXT_REFERENCE));
__ mov(pc, Operand(lr));
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- CSignature0<int32_t> csig;
- CodeRunner<int32_t> runnable(isolate, code, &csig);
+ compiler::CSignature0<int32_t> csig;
+ compiler::CodeRunner<int32_t> runnable(isolate, code, &csig);
int32_t ret_value = runnable.Call();
CHECK_EQ(ret_value, imm);
@@ -53,12 +53,12 @@ TEST(WasmRelocationArmMemoryReference) {
int offset = 1234;
// Relocating references by offset
- int mode_mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE);
+ int mode_mask = (1 << RelocInfo::WASM_CONTEXT_REFERENCE);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- DCHECK(RelocInfo::IsWasmMemoryReference(it.rinfo()->rmode()));
- it.rinfo()->update_wasm_memory_reference(
- isolate, it.rinfo()->wasm_memory_reference(),
- it.rinfo()->wasm_memory_reference() + offset, SKIP_ICACHE_FLUSH);
+ DCHECK(RelocInfo::IsWasmContextReference(it.rinfo()->rmode()));
+ it.rinfo()->set_wasm_context_reference(
+ isolate, it.rinfo()->wasm_context_reference() + offset,
+ SKIP_ICACHE_FLUSH);
}
// Call into relocated code object
@@ -71,56 +71,8 @@ TEST(WasmRelocationArmMemoryReference) {
#endif
}
-TEST(WasmRelocationArmMemorySizeReference) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
- v8::internal::byte buffer[4096];
- DummyStaticFunction(NULL);
- int32_t size = 512;
- Label fail;
-
- Assembler assm(isolate, buffer, sizeof buffer);
-
- __ mov(r0, Operand(size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
- __ cmp(r0, Operand(size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
- __ b(ne, &fail);
- __ mov(pc, Operand(lr));
- __ bind(&fail);
- __ mov(r0, Operand(0xdeadbeef));
- __ mov(pc, Operand(lr));
-
- CodeDesc desc;
- assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
-
- CSignature0<int32_t> csig;
- CodeRunner<int32_t> runnable(isolate, code, &csig);
- int32_t ret_value = runnable.Call();
- CHECK_NE(ret_value, bit_cast<int32_t>(0xdeadbeef));
-
-#ifdef DEBUG
- OFStream os(stdout);
- code->Print(os);
- ::printf("f() = %d\n\n", ret_value);
-#endif
- size_t diff = 512;
-
- int mode_mask = (1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- DCHECK(RelocInfo::IsWasmMemorySizeReference(it.rinfo()->rmode()));
- it.rinfo()->update_wasm_memory_size(
- isolate, it.rinfo()->wasm_memory_size_reference(),
- it.rinfo()->wasm_memory_size_reference() + diff, SKIP_ICACHE_FLUSH);
- }
-
- ret_value = runnable.Call();
- CHECK_NE(ret_value, bit_cast<int32_t>(0xdeadbeef));
-
-#ifdef DEBUG
- code->Print(os);
- ::printf("f() = %d\n\n", ret_value);
-#endif
-}
#undef __
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc b/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
index 71a069cd11..7448250ed6 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
@@ -19,15 +19,15 @@
#include "test/cctest/compiler/c-signature.h"
#include "test/cctest/compiler/call-tester.h"
-using namespace v8::base;
-using namespace v8::internal;
-using namespace v8::internal::compiler;
+namespace v8 {
+namespace internal {
+namespace wasm {
#define __ masm.
static int64_t DummyStaticFunction(Object* result) { return 1; }
-TEST(WasmRelocationArm64MemoryReference) {
+TEST(WasmRelocationArm64ContextReference) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::internal::byte buffer[4096];
@@ -37,16 +37,16 @@ TEST(WasmRelocationArm64MemoryReference) {
MacroAssembler masm(isolate, buffer, sizeof buffer,
v8::internal::CodeObjectRequired::kYes);
- __ Mov(x0, Immediate(imm, RelocInfo::WASM_MEMORY_REFERENCE));
+ __ Mov(x0, Immediate(imm, RelocInfo::WASM_CONTEXT_REFERENCE));
__ Ret();
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- CSignature0<int64_t> csig;
- CodeRunner<int64_t> runnable(isolate, code, &csig);
+ compiler::CSignature0<int64_t> csig;
+ compiler::CodeRunner<int64_t> runnable(isolate, code, &csig);
int64_t ret_value = runnable.Call();
CHECK_EQ(ret_value, imm);
@@ -58,12 +58,12 @@ TEST(WasmRelocationArm64MemoryReference) {
int offset = 1234;
// Relocating reference by offset
- int mode_mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE);
+ int mode_mask = (1 << RelocInfo::WASM_CONTEXT_REFERENCE);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- DCHECK(RelocInfo::IsWasmMemoryReference(it.rinfo()->rmode()));
- it.rinfo()->update_wasm_memory_reference(
- isolate, it.rinfo()->wasm_memory_reference(),
- it.rinfo()->wasm_memory_reference() + offset, SKIP_ICACHE_FLUSH);
+ DCHECK(RelocInfo::IsWasmContextReference(it.rinfo()->rmode()));
+ it.rinfo()->set_wasm_context_reference(
+ isolate, it.rinfo()->wasm_context_reference() + offset,
+ SKIP_ICACHE_FLUSH);
}
// Call into relocated code object
@@ -76,58 +76,8 @@ TEST(WasmRelocationArm64MemoryReference) {
#endif
}
-TEST(WasmRelocationArm64MemorySizeReference) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
- v8::internal::byte buffer[4096];
- DummyStaticFunction(NULL);
- Immediate size = Immediate(512, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
- Label fail;
-
- MacroAssembler masm(isolate, buffer, sizeof buffer,
- v8::internal::CodeObjectRequired::kYes);
-
- __ Mov(x0, size);
- __ Cmp(x0, size);
- __ B(ne, &fail);
- __ Ret();
- __ Bind(&fail);
- __ Mov(x0, Immediate(0xdeadbeef));
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
-
- CSignature0<int64_t> csig;
- CodeRunner<int64_t> runnable(isolate, code, &csig);
- int64_t ret_value = runnable.Call();
- CHECK_NE(ret_value, 0xdeadbeef);
-
-#ifdef DEBUG
- OFStream os(stdout);
- code->Print(os);
- ::printf("f() = %" PRIx64 "\n\n", ret_value);
-#endif
- int32_t diff = 512;
-
- int mode_mask = (1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- DCHECK(RelocInfo::IsWasmMemorySizeReference(it.rinfo()->rmode()));
- it.rinfo()->update_wasm_memory_size(
- isolate, it.rinfo()->wasm_memory_size_reference(),
- it.rinfo()->wasm_memory_size_reference() + diff, SKIP_ICACHE_FLUSH);
- }
-
- ret_value = runnable.Call();
- CHECK_NE(ret_value, 0xdeadbeef);
-
-#ifdef DEBUG
- code->Print(os);
- ::printf("f() = %" PRIx64 "\n\n", ret_value);
-#endif
-}
-
#undef __
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc b/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
index a78efa8fca..a59dbfec8a 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
@@ -12,18 +12,21 @@
#include "src/frame-constants.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
+#include "src/ostreams.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/c-signature.h"
#include "test/cctest/compiler/call-tester.h"
-using namespace v8::internal;
-using namespace v8::internal::compiler;
+namespace v8 {
+namespace internal {
+namespace wasm {
#define __ assm.
static int32_t DummyStaticFunction(Object* result) { return 1; }
-TEST(WasmRelocationIa32MemoryReference) {
+TEST(WasmRelocationIa32ContextReference) {
Isolate* isolate = CcTest::i_isolate();
Zone zone(isolate->allocator(), ZONE_NAME);
HandleScope scope(isolate);
@@ -33,18 +36,18 @@ TEST(WasmRelocationIa32MemoryReference) {
int32_t imm = 1234567;
__ mov(eax, Immediate(reinterpret_cast<Address>(imm),
- RelocInfo::WASM_MEMORY_REFERENCE));
+ RelocInfo::WASM_CONTEXT_REFERENCE));
__ nop();
__ ret(0);
- CSignature0<int32_t> csig;
+ compiler::CSignature0<int32_t> csig;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
USE(code);
- CodeRunner<int32_t> runnable(isolate, code, &csig);
+ compiler::CodeRunner<int32_t> runnable(isolate, code, &csig);
int32_t ret_value = runnable.Call();
CHECK_EQ(ret_value, imm);
@@ -59,12 +62,12 @@ TEST(WasmRelocationIa32MemoryReference) {
int offset = 1234;
// Relocating references by offset
- int mode_mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE);
+ int mode_mask = (1 << RelocInfo::WASM_CONTEXT_REFERENCE);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- DCHECK(RelocInfo::IsWasmMemoryReference(it.rinfo()->rmode()));
- it.rinfo()->update_wasm_memory_reference(
- isolate, it.rinfo()->wasm_memory_reference(),
- it.rinfo()->wasm_memory_reference() + offset, SKIP_ICACHE_FLUSH);
+ DCHECK(RelocInfo::IsWasmContextReference(it.rinfo()->rmode()));
+ it.rinfo()->set_wasm_context_reference(
+ isolate, it.rinfo()->wasm_context_reference() + offset,
+ SKIP_ICACHE_FLUSH);
}
// Check if immediate is updated correctly
@@ -79,64 +82,8 @@ TEST(WasmRelocationIa32MemoryReference) {
#endif
}
-TEST(WasmRelocationIa32MemorySizeReference) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- Zone zone(isolate->allocator(), ZONE_NAME);
- HandleScope scope(isolate);
- v8::internal::byte buffer[4096];
- Assembler assm(isolate, buffer, sizeof buffer);
- DummyStaticFunction(NULL);
- int32_t size = 80;
- Label fail;
-
- __ mov(eax, Immediate(reinterpret_cast<Address>(size),
- RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
- __ cmp(eax, Immediate(reinterpret_cast<Address>(size),
- RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
- __ j(not_equal, &fail);
- __ ret(0);
- __ bind(&fail);
- __ mov(eax, 0xdeadbeef);
- __ ret(0);
-
- CSignature0<int32_t> csig;
- CodeDesc desc;
- assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
- USE(code);
-
- CodeRunner<int32_t> runnable(isolate, code, &csig);
- int32_t ret_value = runnable.Call();
- CHECK_NE(ret_value, bit_cast<int32_t>(0xdeadbeef));
-
-#ifdef OBJECT_PRINT
- OFStream os(stdout);
- code->Print(os);
- byte* begin = code->instruction_start();
- byte* end = begin + code->instruction_size();
- disasm::Disassembler::Disassemble(stdout, begin, end);
-#endif
-
- size_t offset = 10;
-
- int mode_mask = (1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- DCHECK(RelocInfo::IsWasmMemorySizeReference(it.rinfo()->rmode()));
- it.rinfo()->update_wasm_memory_size(
- isolate, it.rinfo()->wasm_memory_size_reference(),
- it.rinfo()->wasm_memory_size_reference() + offset, SKIP_ICACHE_FLUSH);
- }
-
- ret_value = runnable.Call();
- CHECK_NE(ret_value, bit_cast<int32_t>(0xdeadbeef));
-
-#ifdef OBJECT_PRINT
- code->Print(os);
- begin = code->instruction_start();
- end = begin + code->instruction_size();
- disasm::Disassembler::Disassemble(stdout, begin, end);
-#endif
-}
#undef __
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc b/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
index fd52a80097..b886c6fde2 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
@@ -25,7 +25,7 @@ namespace compiler {
static int32_t DummyStaticFunction(Object* result) { return 1; }
-TEST(WasmRelocationX64MemoryReference) {
+TEST(WasmRelocationX64ContextReference) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::internal::byte buffer[4096];
@@ -33,14 +33,14 @@ TEST(WasmRelocationX64MemoryReference) {
DummyStaticFunction(NULL);
int64_t imm = 1234567;
- __ movq(rax, imm, RelocInfo::WASM_MEMORY_REFERENCE);
+ __ movq(rax, imm, RelocInfo::WASM_CONTEXT_REFERENCE);
__ nop();
__ ret(0);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
USE(code);
CSignature0<int64_t> csig;
@@ -58,12 +58,12 @@ TEST(WasmRelocationX64MemoryReference) {
int offset = 1234;
// Relocating references by offset
- int mode_mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE);
+ int mode_mask = (1 << RelocInfo::WASM_CONTEXT_REFERENCE);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- DCHECK(RelocInfo::IsWasmMemoryReference(it.rinfo()->rmode()));
- it.rinfo()->update_wasm_memory_reference(
- isolate, it.rinfo()->wasm_memory_reference(),
- it.rinfo()->wasm_memory_reference() + offset, SKIP_ICACHE_FLUSH);
+ DCHECK(RelocInfo::IsWasmContextReference(it.rinfo()->rmode()));
+ it.rinfo()->set_wasm_context_reference(
+ isolate, it.rinfo()->wasm_context_reference() + offset,
+ SKIP_ICACHE_FLUSH);
}
// Check if immediate is updated correctly
@@ -78,62 +78,6 @@ TEST(WasmRelocationX64MemoryReference) {
#endif
}
-TEST(WasmRelocationX64WasmMemorySizeReference) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
- v8::internal::byte buffer[4096];
- Assembler assm(isolate, buffer, sizeof buffer);
- DummyStaticFunction(NULL);
- int32_t size = 512;
- Label fail;
-
- __ movl(rax, Immediate(size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
- __ cmpl(rax, Immediate(size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
- __ j(not_equal, &fail);
- __ ret(0);
- __ bind(&fail);
- __ movl(rax, Immediate(0xdeadbeef));
- __ ret(0);
-
- CodeDesc desc;
- assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
- USE(code);
-
- CSignature0<int64_t> csig;
- CodeRunner<int64_t> runnable(isolate, code, &csig);
- int64_t ret_value = runnable.Call();
- CHECK_NE(ret_value, bit_cast<uint32_t>(0xdeadbeef));
-
-#ifdef OBJECT_PRINT
- OFStream os(stdout);
- code->Print(os);
- byte* begin = code->instruction_start();
- byte* end = begin + code->instruction_size();
- disasm::Disassembler::Disassemble(stdout, begin, end);
-#endif
- int32_t diff = 512;
-
- int mode_mask = (1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- DCHECK(RelocInfo::IsWasmMemorySizeReference(it.rinfo()->rmode()));
- it.rinfo()->update_wasm_memory_size(
- isolate, it.rinfo()->wasm_memory_size_reference(),
- it.rinfo()->wasm_memory_size_reference() + diff, SKIP_ICACHE_FLUSH);
- }
-
- ret_value = runnable.Call();
- CHECK_NE(ret_value, bit_cast<uint32_t>(0xdeadbeef));
-
-#ifdef OBJECT_PRINT
- code->Print(os);
- begin = code->instruction_start();
- end = begin + code->instruction_size();
- disasm::Disassembler::Disassemble(stdout, begin, end);
-#endif
-}
#undef __
} // namespace compiler
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 481c168154..6cd9110856 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -41,6 +41,8 @@
#include "src/macro-assembler-inl.h"
#include "src/objects-inl.h"
#include "src/runtime/runtime.h"
+#include "src/snapshot/builtin-deserializer.h"
+#include "src/snapshot/builtin-serializer.h"
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/partial-deserializer.h"
@@ -52,7 +54,15 @@
#include "test/cctest/heap/heap-utils.h"
#include "test/cctest/setup-isolate-for-tests.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
+
+void DisableLazyDeserialization() {
+ // UNINITIALIZED tests do not set up the isolate sufficiently for lazy
+ // deserialization to work.
+ // TODO(jgruber): Fix this. It may just be enough to set the snapshot_blob.
+ FLAG_lazy_deserialization = false;
+}
void DisableAlwaysOpt() {
// Isolates prepared for serialization do not optimize. The only exception is
@@ -66,7 +76,7 @@ class TestIsolate : public Isolate {
public:
static v8::Isolate* NewInitialized(bool enable_serializer) {
i::Isolate* isolate = new TestIsolate(enable_serializer);
- isolate->setup_delegate_ = new SetupIsolateDelegateForTests();
+ isolate->setup_delegate_ = new SetupIsolateDelegateForTests(true);
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Isolate::Scope isolate_scope(v8_isolate);
isolate->Init(NULL);
@@ -77,14 +87,16 @@ class TestIsolate : public Isolate {
// the production Isolate class has one or the other behavior baked in.
static v8::Isolate* New(const v8::Isolate::CreateParams& params) {
i::Isolate* isolate = new TestIsolate(false);
- isolate->setup_delegate_ = new SetupIsolateDelegateForTests();
+ bool create_heap_objects = params.snapshot_blob == nullptr;
+ isolate->setup_delegate_ =
+ new SetupIsolateDelegateForTests(create_heap_objects);
return v8::IsolateNewImpl(isolate, params);
}
explicit TestIsolate(bool enable_serializer) : Isolate(enable_serializer) {
set_array_buffer_allocator(CcTest::array_buffer_allocator());
}
- void CreateSetupDelegateForTests() {
- setup_delegate_ = new SetupIsolateDelegateForTests();
+ void SetDeserializeFromSnapshot() {
+ setup_delegate_ = new SetupIsolateDelegateForTests(false);
}
};
@@ -95,7 +107,19 @@ static Vector<const byte> WritePayload(const Vector<const byte>& payload) {
return Vector<const byte>(const_cast<const byte*>(blob), length);
}
-static Vector<const byte> Serialize(v8::Isolate* isolate) {
+// A convenience struct to simplify management of the two blobs required to
+// deserialize an isolate.
+struct StartupBlobs {
+ Vector<const byte> startup;
+ Vector<const byte> builtin;
+
+ void Dispose() {
+ startup.Dispose();
+ builtin.Dispose();
+ }
+};
+
+static StartupBlobs Serialize(v8::Isolate* isolate) {
// We have to create one context. One reason for this is so that the builtins
// can be loaded from v8natives.js and their addresses can be processed. This
// will clear the pending fixups array, which would otherwise contain GC roots
@@ -112,9 +136,15 @@ static Vector<const byte> Serialize(v8::Isolate* isolate) {
StartupSerializer ser(internal_isolate,
v8::SnapshotCreator::FunctionCodeHandling::kClear);
ser.SerializeStrongReferences();
+
+ i::BuiltinSerializer builtin_serializer(internal_isolate, &ser);
+ builtin_serializer.SerializeBuiltins();
+
ser.SerializeWeakReferencesAndDeferred();
- SnapshotData snapshot_data(&ser);
- return WritePayload(snapshot_data.RawData());
+ SnapshotData startup_snapshot(&ser);
+ BuiltinSnapshotData builtin_snapshot(&builtin_serializer);
+ return {WritePayload(startup_snapshot.RawData()),
+ WritePayload(builtin_snapshot.RawData())};
}
@@ -134,22 +164,23 @@ Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
source_length);
}
-v8::Isolate* InitializeFromBlob(Vector<const byte> blob) {
+v8::Isolate* InitializeFromBlob(StartupBlobs& blobs) {
v8::Isolate* v8_isolate = NULL;
{
- SnapshotData snapshot_data(blob);
- StartupDeserializer deserializer(&snapshot_data);
+ SnapshotData startup_snapshot(blobs.startup);
+ BuiltinSnapshotData builtin_snapshot(blobs.builtin);
+ StartupDeserializer deserializer(&startup_snapshot, &builtin_snapshot);
TestIsolate* isolate = new TestIsolate(false);
v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Isolate::Scope isolate_scope(v8_isolate);
- isolate->CreateSetupDelegateForTests();
+ isolate->SetDeserializeFromSnapshot();
isolate->Init(&deserializer);
}
return v8_isolate;
}
-static v8::Isolate* Deserialize(Vector<const byte> blob) {
- v8::Isolate* isolate = InitializeFromBlob(blob);
+static v8::Isolate* Deserialize(StartupBlobs& blobs) {
+ v8::Isolate* isolate = InitializeFromBlob(blobs);
CHECK(isolate);
return isolate;
}
@@ -168,11 +199,13 @@ static void SanityCheck(v8::Isolate* v8_isolate) {
}
UNINITIALIZED_TEST(StartupSerializerOnce) {
+ DisableLazyDeserialization();
DisableAlwaysOpt();
v8::Isolate* isolate = TestIsolate::NewInitialized(true);
- Vector<const byte> blob = Serialize(isolate);
- isolate = Deserialize(blob);
- blob.Dispose();
+ StartupBlobs blobs = Serialize(isolate);
+ isolate->Dispose();
+ isolate = Deserialize(blobs);
+ blobs.Dispose();
{
v8::HandleScope handle_scope(isolate);
v8::Isolate::Scope isolate_scope(isolate);
@@ -234,13 +267,15 @@ UNINITIALIZED_TEST(StartupSerializerRootMapDependencies) {
}
UNINITIALIZED_TEST(StartupSerializerTwice) {
+ DisableLazyDeserialization();
DisableAlwaysOpt();
v8::Isolate* isolate = TestIsolate::NewInitialized(true);
- Vector<const byte> blob1 = Serialize(isolate);
- Vector<const byte> blob2 = Serialize(isolate);
- blob1.Dispose();
- isolate = Deserialize(blob2);
- blob2.Dispose();
+ StartupBlobs blobs1 = Serialize(isolate);
+ StartupBlobs blobs2 = Serialize(isolate);
+ isolate->Dispose();
+ blobs1.Dispose();
+ isolate = Deserialize(blobs2);
+ blobs2.Dispose();
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -254,11 +289,13 @@ UNINITIALIZED_TEST(StartupSerializerTwice) {
}
UNINITIALIZED_TEST(StartupSerializerOnceRunScript) {
+ DisableLazyDeserialization();
DisableAlwaysOpt();
v8::Isolate* isolate = TestIsolate::NewInitialized(true);
- Vector<const byte> blob = Serialize(isolate);
- isolate = Deserialize(blob);
- blob.Dispose();
+ StartupBlobs blobs = Serialize(isolate);
+ isolate->Dispose();
+ isolate = Deserialize(blobs);
+ blobs.Dispose();
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -278,13 +315,15 @@ UNINITIALIZED_TEST(StartupSerializerOnceRunScript) {
}
UNINITIALIZED_TEST(StartupSerializerTwiceRunScript) {
+ DisableLazyDeserialization();
DisableAlwaysOpt();
v8::Isolate* isolate = TestIsolate::NewInitialized(true);
- Vector<const byte> blob1 = Serialize(isolate);
- Vector<const byte> blob2 = Serialize(isolate);
- blob1.Dispose();
- isolate = Deserialize(blob2);
- blob2.Dispose();
+ StartupBlobs blobs1 = Serialize(isolate);
+ StartupBlobs blobs2 = Serialize(isolate);
+ isolate->Dispose();
+ blobs1.Dispose();
+ isolate = Deserialize(blobs2);
+ blobs2.Dispose();
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -303,6 +342,7 @@ UNINITIALIZED_TEST(StartupSerializerTwiceRunScript) {
}
static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
+ Vector<const byte>* builtin_blob_out,
Vector<const byte>* partial_blob_out) {
v8::Isolate* v8_isolate = TestIsolate::NewInitialized(true);
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
@@ -343,26 +383,35 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
PartialSerializer partial_serializer(isolate, &startup_serializer,
v8::SerializeInternalFieldsCallback());
partial_serializer.Serialize(&raw_context, false);
+
+ i::BuiltinSerializer builtin_serializer(isolate, &startup_serializer);
+ builtin_serializer.SerializeBuiltins();
+
startup_serializer.SerializeWeakReferencesAndDeferred();
SnapshotData startup_snapshot(&startup_serializer);
+ BuiltinSnapshotData builtin_snapshot(&builtin_serializer);
SnapshotData partial_snapshot(&partial_serializer);
*partial_blob_out = WritePayload(partial_snapshot.RawData());
+ *builtin_blob_out = WritePayload(builtin_snapshot.RawData());
*startup_blob_out = WritePayload(startup_snapshot.RawData());
}
v8_isolate->Dispose();
}
UNINITIALIZED_TEST(PartialSerializerContext) {
+ DisableLazyDeserialization();
DisableAlwaysOpt();
Vector<const byte> startup_blob;
+ Vector<const byte> builtin_blob;
Vector<const byte> partial_blob;
- PartiallySerializeContext(&startup_blob, &partial_blob);
+ PartiallySerializeContext(&startup_blob, &builtin_blob, &partial_blob);
- v8::Isolate* v8_isolate = InitializeFromBlob(startup_blob);
+ StartupBlobs blobs = {startup_blob, builtin_blob};
+ v8::Isolate* v8_isolate = InitializeFromBlob(blobs);
CHECK(v8_isolate);
- startup_blob.Dispose();
+ blobs.Dispose();
{
v8::Isolate::Scope isolate_scope(v8_isolate);
@@ -398,7 +447,7 @@ UNINITIALIZED_TEST(PartialSerializerContext) {
}
static void PartiallySerializeCustomContext(
- Vector<const byte>* startup_blob_out,
+ Vector<const byte>* startup_blob_out, Vector<const byte>* builtin_blob_out,
Vector<const byte>* partial_blob_out) {
v8::Isolate* v8_isolate = TestIsolate::NewInitialized(true);
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
@@ -459,26 +508,35 @@ static void PartiallySerializeCustomContext(
PartialSerializer partial_serializer(isolate, &startup_serializer,
v8::SerializeInternalFieldsCallback());
partial_serializer.Serialize(&raw_context, false);
+
+ i::BuiltinSerializer builtin_serializer(isolate, &startup_serializer);
+ builtin_serializer.SerializeBuiltins();
+
startup_serializer.SerializeWeakReferencesAndDeferred();
SnapshotData startup_snapshot(&startup_serializer);
+ BuiltinSnapshotData builtin_snapshot(&builtin_serializer);
SnapshotData partial_snapshot(&partial_serializer);
*partial_blob_out = WritePayload(partial_snapshot.RawData());
+ *builtin_blob_out = WritePayload(builtin_snapshot.RawData());
*startup_blob_out = WritePayload(startup_snapshot.RawData());
}
v8_isolate->Dispose();
}
UNINITIALIZED_TEST(PartialSerializerCustomContext) {
+ DisableLazyDeserialization();
DisableAlwaysOpt();
Vector<const byte> startup_blob;
+ Vector<const byte> builtin_blob;
Vector<const byte> partial_blob;
- PartiallySerializeCustomContext(&startup_blob, &partial_blob);
+ PartiallySerializeCustomContext(&startup_blob, &builtin_blob, &partial_blob);
- v8::Isolate* v8_isolate = InitializeFromBlob(startup_blob);
+ StartupBlobs blobs = {startup_blob, builtin_blob};
+ v8::Isolate* v8_isolate = InitializeFromBlob(blobs);
CHECK(v8_isolate);
- startup_blob.Dispose();
+ blobs.Dispose();
{
v8::Isolate::Scope isolate_scope(v8_isolate);
@@ -732,6 +790,7 @@ TEST(CustomSnapshotDataBlobNeuteredArrayBuffer) {
Int32Expectations expectations = {std::make_tuple("x.buffer.byteLength", 0),
std::make_tuple("x.length", 0)};
+ DisableLazyDeserialization();
DisableAlwaysOpt();
i::FLAG_allow_natives_syntax = true;
v8::StartupData blob;
@@ -1021,6 +1080,7 @@ bool IsCompiled(const char* name) {
}
TEST(SnapshotDataBlobWithWarmup) {
+ DisableLazyDeserialization();
DisableAlwaysOpt();
const char* warmup = "Math.abs(1); Math.random = 1;";
@@ -1050,6 +1110,7 @@ TEST(SnapshotDataBlobWithWarmup) {
}
TEST(CustomSnapshotDataBlobWithWarmup) {
+ DisableLazyDeserialization();
DisableAlwaysOpt();
const char* source =
"function f() { return Math.abs(1); }\n"
@@ -1143,13 +1204,13 @@ static Handle<SharedFunctionInfo> CompileScript(
Isolate* isolate, Handle<String> source, Handle<String> name,
ScriptData** cached_data, v8::ScriptCompiler::CompileOptions options) {
return Compiler::GetSharedFunctionInfoForScript(
- source, name, 0, 0, v8::ScriptOriginOptions(), Handle<Object>(),
- Handle<Context>(isolate->native_context()), NULL, cached_data, options,
- NOT_NATIVES_CODE, Handle<FixedArray>());
+ source, name, 0, 0, v8::ScriptOriginOptions(), Handle<Object>(),
+ Handle<Context>(isolate->native_context()), NULL, cached_data,
+ options, NOT_NATIVES_CODE, Handle<FixedArray>())
+ .ToHandleChecked();
}
TEST(CodeSerializerOnePlusOne) {
- FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
@@ -1199,7 +1260,6 @@ TEST(CodeSerializerOnePlusOne) {
}
TEST(CodeSerializerPromotedToCompilationCache) {
- FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -1229,7 +1289,6 @@ TEST(CodeSerializerPromotedToCompilationCache) {
}
TEST(CodeSerializerInternalizedString) {
- FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
@@ -1288,7 +1347,6 @@ TEST(CodeSerializerInternalizedString) {
}
TEST(CodeSerializerLargeCodeObject) {
- FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
@@ -1341,7 +1399,6 @@ TEST(CodeSerializerLargeCodeObject) {
TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
if (FLAG_never_compact) return;
ManualGCScope manual_gc_scope;
- FLAG_serialize_toplevel = true;
FLAG_always_opt = false;
const char* filter_flag = "--turbo-filter=NOTHING";
FlagList::SetFlagsFromString(filter_flag, StrLength(filter_flag));
@@ -1418,7 +1475,6 @@ TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
source.Dispose();
}
TEST(CodeSerializerLargeStrings) {
- FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
Factory* f = isolate->factory();
@@ -1475,7 +1531,6 @@ TEST(CodeSerializerLargeStrings) {
}
TEST(CodeSerializerThreeBigStrings) {
- FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
Factory* f = isolate->factory();
@@ -1585,7 +1640,6 @@ class SerializerTwoByteResource : public v8::String::ExternalStringResource {
};
TEST(CodeSerializerExternalString) {
- FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
@@ -1647,7 +1701,6 @@ TEST(CodeSerializerExternalString) {
}
TEST(CodeSerializerLargeExternalString) {
- FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
@@ -1705,7 +1758,6 @@ TEST(CodeSerializerLargeExternalString) {
}
TEST(CodeSerializerExternalScriptName) {
- FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
@@ -1764,8 +1816,8 @@ static void SerializerCodeEventListener(const v8::JitCodeEvent* event) {
}
}
-
-v8::ScriptCompiler::CachedData* ProduceCache(const char* source) {
+v8::ScriptCompiler::CachedData* ProduceCache(const char* source,
+ bool eager = false) {
v8::ScriptCompiler::CachedData* cache;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -1779,9 +1831,11 @@ v8::ScriptCompiler::CachedData* ProduceCache(const char* source) {
v8::Local<v8::String> source_str = v8_str(source);
v8::ScriptOrigin origin(v8_str("test"));
v8::ScriptCompiler::Source source(source_str, origin);
+ v8::ScriptCompiler::CompileOptions options =
+ eager ? v8::ScriptCompiler::kProduceFullCodeCache
+ : v8::ScriptCompiler::kProduceCodeCache;
v8::Local<v8::UnboundScript> script =
- v8::ScriptCompiler::CompileUnboundScript(
- isolate1, &source, v8::ScriptCompiler::kProduceCodeCache)
+ v8::ScriptCompiler::CompileUnboundScript(isolate1, &source, options)
.ToLocalChecked();
const v8::ScriptCompiler::CachedData* data = source.GetCachedData();
CHECK(data);
@@ -1803,9 +1857,16 @@ v8::ScriptCompiler::CachedData* ProduceCache(const char* source) {
return cache;
}
-TEST(CodeSerializerIsolates) {
- FLAG_serialize_toplevel = true;
+void CheckDeserializedFlag(v8::Local<v8::UnboundScript> script) {
+ i::Handle<i::SharedFunctionInfo> sfi = v8::Utils::OpenHandle(*script);
+ i::Handle<i::Script> i_script(Script::cast(sfi->script()));
+ i::SharedFunctionInfo::ScriptIterator iterator(i_script);
+ while (SharedFunctionInfo* next = iterator.Next()) {
+ CHECK_EQ(next->is_compiled(), next->deserialized());
+ }
+}
+TEST(CodeSerializerIsolates) {
const char* source = "function f() { return 'abc'; }; f() + 'def'";
v8::ScriptCompiler::CachedData* cache = ProduceCache(source);
@@ -1832,6 +1893,7 @@ TEST(CodeSerializerIsolates) {
.ToLocalChecked();
}
CHECK(!cache->rejected);
+ CheckDeserializedFlag(script);
v8::Local<v8::Value> result = script->BindToCurrentContext()
->Run(isolate2->GetCurrentContext())
.ToLocalChecked();
@@ -1844,9 +1906,53 @@ TEST(CodeSerializerIsolates) {
isolate2->Dispose();
}
-TEST(CodeSerializerFlagChange) {
- FLAG_serialize_toplevel = true;
+TEST(CodeSerializerIsolatesEager) {
+ const char* source =
+ "function f() {"
+ " return function g() {"
+ " return 'abc';"
+ " }"
+ "}"
+ "f()() + 'def'";
+ v8::ScriptCompiler::CachedData* cache = ProduceCache(source, true);
+
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate2 = v8::Isolate::New(create_params);
+ isolate2->SetJitCodeEventHandler(v8::kJitCodeEventDefault,
+ SerializerCodeEventListener);
+ toplevel_test_code_event_found = false;
+ {
+ v8::Isolate::Scope iscope(isolate2);
+ v8::HandleScope scope(isolate2);
+ v8::Local<v8::Context> context = v8::Context::New(isolate2);
+ v8::Context::Scope context_scope(context);
+
+ v8::Local<v8::String> source_str = v8_str(source);
+ v8::ScriptOrigin origin(v8_str("test"));
+ v8::ScriptCompiler::Source source(source_str, origin, cache);
+ v8::Local<v8::UnboundScript> script;
+ {
+ DisallowCompilation no_compile(reinterpret_cast<Isolate*>(isolate2));
+ script = v8::ScriptCompiler::CompileUnboundScript(
+ isolate2, &source, v8::ScriptCompiler::kConsumeCodeCache)
+ .ToLocalChecked();
+ }
+ CHECK(!cache->rejected);
+ CheckDeserializedFlag(script);
+ v8::Local<v8::Value> result = script->BindToCurrentContext()
+ ->Run(isolate2->GetCurrentContext())
+ .ToLocalChecked();
+ CHECK(result->ToString(isolate2->GetCurrentContext())
+ .ToLocalChecked()
+ ->Equals(isolate2->GetCurrentContext(), v8_str("abcdef"))
+ .FromJust());
+ }
+ CHECK(toplevel_test_code_event_found);
+ isolate2->Dispose();
+}
+TEST(CodeSerializerFlagChange) {
const char* source = "function f() { return 'abc'; }; f() + 'def'";
v8::ScriptCompiler::CachedData* cache = ProduceCache(source);
@@ -1874,8 +1980,6 @@ TEST(CodeSerializerFlagChange) {
}
TEST(CodeSerializerBitFlip) {
- FLAG_serialize_toplevel = true;
-
const char* source = "function f() { return 'abc'; }; f() + 'def'";
v8::ScriptCompiler::CachedData* cache = ProduceCache(source);
@@ -1903,8 +2007,6 @@ TEST(CodeSerializerBitFlip) {
}
TEST(CodeSerializerWithHarmonyScoping) {
- FLAG_serialize_toplevel = true;
-
const char* source1 = "'use strict'; let x = 'X'";
const char* source2 = "'use strict'; let y = 'Y'";
const char* source3 = "'use strict'; x + y";
@@ -1969,6 +2071,7 @@ TEST(CodeSerializerWithHarmonyScoping) {
isolate2, &source, v8::ScriptCompiler::kConsumeCodeCache)
.ToLocalChecked();
}
+ CheckDeserializedFlag(script);
v8::Local<v8::Value> result = script->BindToCurrentContext()
->Run(isolate2->GetCurrentContext())
.ToLocalChecked();
@@ -1991,11 +2094,13 @@ TEST(Regress503552) {
Handle<String> source = isolate->factory()->NewStringFromAsciiChecked(
"function f() {} function g() {}");
ScriptData* script_data = NULL;
- Handle<SharedFunctionInfo> shared = Compiler::GetSharedFunctionInfoForScript(
- source, Handle<String>(), 0, 0, v8::ScriptOriginOptions(),
- Handle<Object>(), Handle<Context>(isolate->native_context()), NULL,
- &script_data, v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE,
- Handle<FixedArray>());
+ Handle<SharedFunctionInfo> shared =
+ Compiler::GetSharedFunctionInfoForScript(
+ source, MaybeHandle<String>(), 0, 0, v8::ScriptOriginOptions(),
+ MaybeHandle<Object>(), Handle<Context>(isolate->native_context()),
+ NULL, &script_data, v8::ScriptCompiler::kProduceCodeCache,
+ NOT_NATIVES_CODE, MaybeHandle<FixedArray>())
+ .ToHandleChecked();
delete script_data;
heap::SimulateIncrementalMarking(isolate->heap());
@@ -2004,60 +2109,6 @@ TEST(Regress503552) {
delete script_data;
}
-TEST(CodeSerializerEmbeddedObject) {
- FLAG_serialize_toplevel = true;
- LocalContext context;
- Isolate* isolate = CcTest::i_isolate();
- isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
- v8::HandleScope scope(CcTest::isolate());
-
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- HandleScope handles(isolate);
-
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
- v8::internal::CodeObjectRequired::kYes);
- assembler.enable_serializer();
- Handle<HeapNumber> number = isolate->factory()->NewHeapNumber(0.3);
- CHECK(isolate->heap()->InNewSpace(*number));
- Handle<Code> code;
- {
- MacroAssembler* masm = &assembler;
- masm->Push(number);
- CodeDesc desc;
- masm->GetCode(isolate, &desc);
- code = isolate->factory()->NewCode(desc, Code::ComputeFlags(Code::FUNCTION),
- masm->CodeObject());
- code->set_has_reloc_info_for_serialization(true);
- }
- RelocIterator rit1(*code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
- CHECK_EQ(*number, rit1.rinfo()->target_object());
-
- Handle<String> source = isolate->factory()->empty_string();
- Handle<SharedFunctionInfo> sfi =
- isolate->factory()->NewSharedFunctionInfo(source, code, false);
- ScriptData* script_data = CodeSerializer::Serialize(isolate, sfi, source);
-
- Handle<SharedFunctionInfo> copy =
- CodeSerializer::Deserialize(isolate, script_data, source)
- .ToHandleChecked();
- RelocIterator rit2(copy->code(),
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
- CHECK(rit2.rinfo()->target_object()->IsHeapNumber());
- CHECK_EQ(0.3, HeapNumber::cast(rit2.rinfo()->target_object())->value());
-
- CcTest::CollectAllAvailableGarbage();
-
- RelocIterator rit3(copy->code(),
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
- CHECK(rit3.rinfo()->target_object()->IsHeapNumber());
- CHECK_EQ(0.3, HeapNumber::cast(rit3.rinfo()->target_object())->value());
-
- delete script_data;
-}
-
TEST(SnapshotCreatorMultipleContexts) {
DisableAlwaysOpt();
v8::StartupData blob;
@@ -2718,3 +2769,27 @@ TEST(SerializationMemoryStats) {
v8::StartupData blob = v8::V8::CreateSnapshotDataBlob();
delete[] blob.data;
}
+
+TEST(BuiltinsHaveBuiltinIdForLazyDeserialization) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+
+ CHECK(Builtins::IsLazy(Builtins::kRegExpPrototypeExec));
+ CHECK_EQ(Builtins::kRegExpPrototypeExec,
+ isolate->regexp_exec_function()
+ ->shared()
+ ->lazy_deserialization_builtin_id());
+ CHECK(Builtins::IsLazy(Builtins::kAsyncIteratorValueUnwrap));
+ CHECK_EQ(Builtins::kAsyncIteratorValueUnwrap,
+ isolate->async_iterator_value_unwrap_shared_fun()
+ ->lazy_deserialization_builtin_id());
+
+ CHECK(!Builtins::IsLazy(Builtins::kIllegal));
+ CHECK(!isolate->opaque_reference_function()
+ ->shared()
+ ->HasLazyDeserializationBuiltinId());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-simulator-arm.cc b/deps/v8/test/cctest/test-simulator-arm.cc
index 6350e177d8..eabc43df31 100644
--- a/deps/v8/test/cctest/test-simulator-arm.cc
+++ b/deps/v8/test/cctest/test-simulator-arm.cc
@@ -29,19 +29,20 @@
#include "test/cctest/cctest.h"
#include "src/arm/simulator-arm.h"
+#include "src/assembler-inl.h"
#include "src/disassembler.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
+namespace v8 {
+namespace internal {
+
#if defined(USE_SIMULATOR)
#ifndef V8_TARGET_LITTLE_ENDIAN
#error Expected ARM to be little-endian
#endif
-using namespace v8::base;
-using namespace v8::internal;
-
// Define these function prototypes to match JSEntryFunction in execution.cc.
typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4);
@@ -195,8 +196,8 @@ static void TestInvalidateExclusiveAccess(
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
TestData t = initial_data;
@@ -263,8 +264,8 @@ static int ExecuteMemoryAccess(Isolate* isolate, TestData* test_data,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
return reinterpret_cast<int>(
@@ -278,30 +279,33 @@ class MemoryAccessThread : public v8::base::Thread {
test_data_(NULL),
is_finished_(false),
has_request_(false),
- did_request_(false) {}
+ did_request_(false),
+ isolate_(nullptr) {}
virtual void Run() {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- v8::Isolate* isolate = v8::Isolate::New(create_params);
- Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
- v8::Isolate::Scope scope(isolate);
-
- v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
- while (!is_finished_) {
- while (!(has_request_ || is_finished_)) {
- has_request_cv_.Wait(&mutex_);
- }
+ isolate_ = v8::Isolate::New(create_params);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate_);
+ {
+ v8::Isolate::Scope scope(isolate_);
+ v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
+ while (!is_finished_) {
+ while (!(has_request_ || is_finished_)) {
+ has_request_cv_.Wait(&mutex_);
+ }
+
+ if (is_finished_) {
+ break;
+ }
- if (is_finished_) {
- break;
+ ExecuteMemoryAccess(i_isolate, test_data_, access_);
+ has_request_ = false;
+ did_request_ = true;
+ did_request_cv_.NotifyOne();
}
-
- ExecuteMemoryAccess(i_isolate, test_data_, access_);
- has_request_ = false;
- did_request_ = true;
- did_request_cv_.NotifyOne();
}
+ isolate_->Dispose();
}
void NextAndWait(TestData* test_data, MemoryAccess access) {
@@ -332,6 +336,7 @@ class MemoryAccessThread : public v8::base::Thread {
v8::base::Mutex mutex_;
v8::base::ConditionVariable has_request_cv_;
v8::base::ConditionVariable did_request_cv_;
+ v8::Isolate* isolate_;
};
TEST(simulator_invalidate_exclusive_access_threaded) {
@@ -385,3 +390,6 @@ TEST(simulator_invalidate_exclusive_access_threaded) {
#undef __
#endif // USE_SIMULATOR
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-simulator-arm64.cc b/deps/v8/test/cctest/test-simulator-arm64.cc
index 137e2066d0..9ba216a643 100644
--- a/deps/v8/test/cctest/test-simulator-arm64.cc
+++ b/deps/v8/test/cctest/test-simulator-arm64.cc
@@ -33,15 +33,15 @@
#include "src/macro-assembler-inl.h"
#include "src/objects-inl.h"
+namespace v8 {
+namespace internal {
+
#if defined(USE_SIMULATOR)
#ifndef V8_TARGET_LITTLE_ENDIAN
#error Expected ARM to be little-endian
#endif
-using namespace v8::base;
-using namespace v8::internal;
-
#define __ masm.
struct MemoryAccess {
@@ -189,8 +189,8 @@ static void TestInvalidateExclusiveAccess(
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
TestData t = initial_data;
Simulator::CallArgument args[] = {
Simulator::CallArgument(reinterpret_cast<uintptr_t>(&t)),
@@ -259,8 +259,8 @@ static int ExecuteMemoryAccess(Isolate* isolate, TestData* test_data,
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
Simulator::CallArgument args[] = {
Simulator::CallArgument(reinterpret_cast<uintptr_t>(test_data)),
Simulator::CallArgument::End()};
@@ -275,30 +275,33 @@ class MemoryAccessThread : public v8::base::Thread {
test_data_(NULL),
is_finished_(false),
has_request_(false),
- did_request_(false) {}
+ did_request_(false),
+ isolate_(nullptr) {}
virtual void Run() {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- v8::Isolate* isolate = v8::Isolate::New(create_params);
- Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
- v8::Isolate::Scope scope(isolate);
-
- v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
- while (!is_finished_) {
- while (!(has_request_ || is_finished_)) {
- has_request_cv_.Wait(&mutex_);
- }
+ isolate_ = v8::Isolate::New(create_params);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate_);
+ {
+ v8::Isolate::Scope scope(isolate_);
+ v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
+ while (!is_finished_) {
+ while (!(has_request_ || is_finished_)) {
+ has_request_cv_.Wait(&mutex_);
+ }
+
+ if (is_finished_) {
+ break;
+ }
- if (is_finished_) {
- break;
+ ExecuteMemoryAccess(i_isolate, test_data_, access_);
+ has_request_ = false;
+ did_request_ = true;
+ did_request_cv_.NotifyOne();
}
-
- ExecuteMemoryAccess(i_isolate, test_data_, access_);
- has_request_ = false;
- did_request_ = true;
- did_request_cv_.NotifyOne();
}
+ isolate_->Dispose();
}
void NextAndWait(TestData* test_data, MemoryAccess access) {
@@ -329,6 +332,7 @@ class MemoryAccessThread : public v8::base::Thread {
v8::base::Mutex mutex_;
v8::base::ConditionVariable has_request_cv_;
v8::base::ConditionVariable did_request_cv_;
+ v8::Isolate* isolate_;
};
TEST(simulator_invalidate_exclusive_access_threaded) {
@@ -382,3 +386,6 @@ TEST(simulator_invalidate_exclusive_access_threaded) {
#undef __
#endif // USE_SIMULATOR
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index e35f516947..ec6b659406 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -97,6 +97,7 @@ class MyRandomNumberGenerator {
namespace v8 {
namespace internal {
+namespace test_strings {
static const int DEEP_DEPTH = 8 * 1024;
static const int SUPER_DEEP_DEPTH = 80 * 1024;
@@ -1154,7 +1155,6 @@ TEST(CachedHashOverflow) {
TEST(SliceFromCons) {
- FLAG_string_slices = true;
CcTest::InitializeVM();
Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
@@ -1221,7 +1221,6 @@ TEST(InternalizeExternal) {
}
TEST(SliceFromExternal) {
- FLAG_string_slices = true;
CcTest::InitializeVM();
Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
@@ -1242,7 +1241,6 @@ TEST(SliceFromExternal) {
TEST(TrivialSlice) {
// This tests whether a slice that contains the entire parent string
// actually creates a new string (it should not).
- FLAG_string_slices = true;
CcTest::InitializeVM();
Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
@@ -1272,7 +1270,6 @@ TEST(TrivialSlice) {
TEST(SliceFromSlice) {
// This tests whether a slice that contains the entire parent string
// actually creates a new string (it should not).
- FLAG_string_slices = true;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Value> result;
@@ -1505,27 +1502,28 @@ static void CheckCanonicalEquivalence(uint16_t c, uint16_t test) {
TEST(Latin1IgnoreCase) {
- using namespace unibrow;
- for (uint16_t c = Latin1::kMaxChar + 1; c != 0; c++) {
- uint16_t lower = ConvertLatin1<ToLowercase, false>(c);
- uint16_t upper = ConvertLatin1<ToUppercase, false>(c);
- uint16_t test = Latin1::ConvertNonLatin1ToLatin1(c);
+ for (uint16_t c = unibrow::Latin1::kMaxChar + 1; c != 0; c++) {
+ uint16_t lower = ConvertLatin1<unibrow::ToLowercase, false>(c);
+ uint16_t upper = ConvertLatin1<unibrow::ToUppercase, false>(c);
+ uint16_t test = unibrow::Latin1::ConvertNonLatin1ToLatin1(c);
// Filter out all character whose upper is not their lower or vice versa.
if (lower == 0 && upper == 0) {
CheckCanonicalEquivalence(c, test);
continue;
}
- if (lower > Latin1::kMaxChar && upper > Latin1::kMaxChar) {
+ if (lower > unibrow::Latin1::kMaxChar &&
+ upper > unibrow::Latin1::kMaxChar) {
CheckCanonicalEquivalence(c, test);
continue;
}
if (lower == 0 && upper != 0) {
- lower = ConvertLatin1<ToLowercase, false>(upper);
+ lower = ConvertLatin1<unibrow::ToLowercase, false>(upper);
}
if (upper == 0 && lower != c) {
- upper = ConvertLatin1<ToUppercase, false>(lower);
+ upper = ConvertLatin1<unibrow::ToUppercase, false>(lower);
}
- if (lower > Latin1::kMaxChar && upper > Latin1::kMaxChar) {
+ if (lower > unibrow::Latin1::kMaxChar &&
+ upper > unibrow::Latin1::kMaxChar) {
CheckCanonicalEquivalence(c, test);
continue;
}
@@ -1668,5 +1666,6 @@ TEST(ExternalStringIndexOf) {
.FromJust());
}
+} // namespace test_strings
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-strtod.cc b/deps/v8/test/cctest/test-strtod.cc
index 622886c941..2a9bf99723 100644
--- a/deps/v8/test/cctest/test-strtod.cc
+++ b/deps/v8/test/cctest/test-strtod.cc
@@ -38,6 +38,7 @@
namespace v8 {
namespace internal {
+namespace test_strtod {
static Vector<const char> StringToVector(const char* str) {
return Vector<const char>(str, StrLength(str));
@@ -480,5 +481,6 @@ TEST(RandomStrtod) {
}
}
+} // namespace test_strtod
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-symbols.cc b/deps/v8/test/cctest/test-symbols.cc
index be37c20717..750ab3c06f 100644
--- a/deps/v8/test/cctest/test-symbols.cc
+++ b/deps/v8/test/cctest/test-symbols.cc
@@ -33,15 +33,8 @@
#include "src/factory.h"
#include "src/isolate.h"
#include "src/objects.h"
+#include "src/objects/name-inl.h"
#include "src/ostreams.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/factory.h -> src/objects-inl.h
-#include "src/objects-inl.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/feedback-vector.h ->
-// src/feedback-vector-inl.h
-#include "src/feedback-vector-inl.h"
-#include "src/v8.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index d288eb7242..0c6c0b6a0a 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -595,3 +595,29 @@ TEST(TerminateAndTryCall) {
CHECK_EQ(4, result.FromJust());
CHECK(!isolate->IsExecutionTerminating());
}
+
+class ConsoleImpl : public v8::debug::ConsoleDelegate {
+ private:
+ void Log(const v8::debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) override {
+ CompileRun("1 + 1");
+ }
+};
+
+TEST(TerminateConsole) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::Isolate* isolate = CcTest::isolate();
+ ConsoleImpl console;
+ v8::debug::SetConsoleDelegate(isolate, &console);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::ObjectTemplate> global = CreateGlobalTemplate(
+ isolate, TerminateCurrentThread, DoLoopCancelTerminate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
+ v8::Context::Scope context_scope(context);
+ CHECK(!isolate->IsExecutionTerminating());
+ v8::TryCatch try_catch(isolate);
+ CHECK(!isolate->IsExecutionTerminating());
+ CHECK(CompileRun("terminate(); console.log(); fail();").IsEmpty());
+ CHECK(try_catch.HasCaught());
+ CHECK(!isolate->IsExecutionTerminating());
+}
diff --git a/deps/v8/test/cctest/test-transitions.cc b/deps/v8/test/cctest/test-transitions.cc
index 2abab39b26..0c49094ce4 100644
--- a/deps/v8/test/cctest/test-transitions.cc
+++ b/deps/v8/test/cctest/test-transitions.cc
@@ -12,8 +12,6 @@
#include "src/factory.h"
#include "src/field-type.h"
#include "src/global-handles.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/field-type.h -> src/objects-inl.h
#include "src/objects-inl.h"
#include "src/transitions.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index 46b7858194..782d4fd045 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -5,16 +5,10 @@
#include <vector>
#include "src/compiler/types.h"
-#include "src/factory.h"
+#include "src/factory-inl.h"
#include "src/heap/heap.h"
#include "src/isolate.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/factory.h -> src/objects-inl.h
-#include "src/objects-inl.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/feedback-vector.h ->
-// src/feedback-vector-inl.h
-#include "src/feedback-vector-inl.h"
+#include "src/objects.h"
#include "test/cctest/cctest.h"
#include "test/cctest/types-fuzz.h"
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index 1aaa5bfa12..42b3e355ab 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -24,8 +24,9 @@
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
-using namespace v8::base;
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
+namespace test_unboxed_doubles {
#if V8_DOUBLE_FIELDS_UNBOXING
@@ -1597,3 +1598,7 @@ TEST(IncrementalWriteBarrierObjectShiftFieldsRight) {
// Map::ReconfigureProperty() supports that.
#endif
+
+} // namespace test_unboxed_doubles
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-usecounters.cc b/deps/v8/test/cctest/test-usecounters.cc
index 7130137b55..c300361a51 100644
--- a/deps/v8/test/cctest/test-usecounters.cc
+++ b/deps/v8/test/cctest/test-usecounters.cc
@@ -6,7 +6,9 @@
#include "test/cctest/cctest.h"
-namespace {
+namespace v8 {
+namespace internal {
+namespace test_usecounters {
int* global_use_counts = NULL;
@@ -14,7 +16,6 @@ void MockUseCounterCallback(v8::Isolate* isolate,
v8::Isolate::UseCounterFeature feature) {
++global_use_counts[feature];
}
-}
TEST(DefineGetterSetterThrowUseCount) {
i::FLAG_harmony_strict_legacy_accessor_builtins = false;
@@ -140,3 +141,7 @@ TEST(LabeledExpressionStatement) {
"bat: do { } while (false);");
CHECK_EQ(2, use_counts[v8::Isolate::kLabeledExpressionStatement]);
}
+
+} // namespace test_usecounters
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-utils-arm64.cc b/deps/v8/test/cctest/test-utils-arm64.cc
index 6c2229b1a7..5abe0e46d3 100644
--- a/deps/v8/test/cctest/test-utils-arm64.cc
+++ b/deps/v8/test/cctest/test-utils-arm64.cc
@@ -29,6 +29,7 @@
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/utils-arm64.h"
+#include "src/base/template-utils.h"
#include "src/macro-assembler-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-utils-arm64.h"
@@ -227,7 +228,6 @@ bool EqualRegisters(const RegisterDump* a, const RegisterDump* b) {
return true;
}
-
RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
int reg_size, int reg_count, RegList allowed) {
RegList list = 0;
diff --git a/deps/v8/test/cctest/test-utils-arm64.h b/deps/v8/test/cctest/test-utils-arm64.h
index a8a0b0cbf9..f629a17646 100644
--- a/deps/v8/test/cctest/test-utils-arm64.h
+++ b/deps/v8/test/cctest/test-utils-arm64.h
@@ -212,6 +212,12 @@ bool EqualNzcv(uint32_t expected, uint32_t result);
bool EqualRegisters(const RegisterDump* a, const RegisterDump* b);
+// Create an array of type {RegType}, size {Size}, filled with {NoReg}.
+template <typename RegType, size_t Size>
+std::array<RegType, Size> CreateRegisterArray() {
+ return base::make_array<Size>([](size_t) { return RegType::no_reg(); });
+}
+
// Populate the w, x and r arrays with registers from the 'allowed' mask. The
// r array will be populated with <reg_size>-sized registers,
//
diff --git a/deps/v8/test/cctest/test-version.cc b/deps/v8/test/cctest/test-version.cc
index 78bc81a7e8..301fe58c50 100644
--- a/deps/v8/test/cctest/test-version.cc
+++ b/deps/v8/test/cctest/test-version.cc
@@ -70,28 +70,6 @@ static void CheckVersion(int major, int minor, int build, int patch,
TEST(VersionString) {
-#ifdef USE_SIMULATOR
- CheckVersion(0, 0, 0, 0, false, "0.0.0 SIMULATOR", "libv8-0.0.0.so");
- CheckVersion(0, 0, 0, 0, true,
- "0.0.0 (candidate) SIMULATOR", "libv8-0.0.0-candidate.so");
- CheckVersion(1, 0, 0, 0, false, "1.0.0 SIMULATOR", "libv8-1.0.0.so");
- CheckVersion(1, 0, 0, 0, true,
- "1.0.0 (candidate) SIMULATOR", "libv8-1.0.0-candidate.so");
- CheckVersion(1, 0, 0, 1, false, "1.0.0.1 SIMULATOR", "libv8-1.0.0.1.so");
- CheckVersion(1, 0, 0, 1, true,
- "1.0.0.1 (candidate) SIMULATOR", "libv8-1.0.0.1-candidate.so");
- CheckVersion(2, 5, 10, 7, false, "2.5.10.7 SIMULATOR", "libv8-2.5.10.7.so");
- CheckVersion(2, 5, 10, 7, true,
- "2.5.10.7 (candidate) SIMULATOR", "libv8-2.5.10.7-candidate.so");
- CheckVersion(6, 0, 287, 0, "-emb.1", false, "6.0.287-emb.1 SIMULATOR",
- "libv8-6.0.287-emb.1.so");
- CheckVersion(6, 0, 287, 0, "-emb.1", true, "6.0.287-emb.1 (candidate) SIMULATOR",
- "libv8-6.0.287-emb.1-candidate.so");
- CheckVersion(6, 0, 287, 53, "-emb.1", false, "6.0.287.53-emb.1 SIMULATOR",
- "libv8-6.0.287.53-emb.1.so");
- CheckVersion(6, 0, 287, 53, "-emb.1", true, "6.0.287.53-emb.1 (candidate) SIMULATOR",
- "libv8-6.0.287.53-emb.1-candidate.so");
-#else
CheckVersion(0, 0, 0, 0, "", false, "0.0.0", "libv8-0.0.0.so");
CheckVersion(0, 0, 0, 0, "", true, "0.0.0 (candidate)",
"libv8-0.0.0-candidate.so");
@@ -112,7 +90,6 @@ TEST(VersionString) {
"libv8-6.0.287.53-emb.1.so");
CheckVersion(6, 0, 287, 53, "-emb.1", true, "6.0.287.53-emb.1 (candidate)",
"libv8-6.0.287.53-emb.1-candidate.so");
-#endif
}
} // namespace internal
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index 13a0c538d9..60f6498022 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -27,23 +27,16 @@
#include <utility>
-#include "src/v8.h"
-
#include "src/factory.h"
#include "src/global-handles.h"
#include "src/isolate.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/factory.h -> src/objects-inl.h
#include "src/objects-inl.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/feedback-vector.h ->
-// src/feedback-vector-inl.h
-#include "src/feedback-vector-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
namespace v8 {
namespace internal {
+namespace test_weakmaps {
static Isolate* GetIsolateFrom(LocalContext* context) {
return reinterpret_cast<Isolate*>((*context)->GetIsolate());
@@ -191,7 +184,7 @@ TEST(Regress2060a) {
Handle<JSObject> object = factory->NewJSObject(function, TENURED);
CHECK(!heap->InNewSpace(*object));
CHECK(!first_page->Contains(object->address()));
- int32_t hash = object->GetOrCreateHash(isolate)->value();
+ int32_t hash = key->GetOrCreateHash(isolate)->value();
JSWeakCollection::Set(weakmap, key, object, hash);
}
}
@@ -263,5 +256,6 @@ TEST(Regress399527) {
CcTest::CollectAllGarbage();
}
+} // namespace test_weakmaps
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-weaksets.cc b/deps/v8/test/cctest/test-weaksets.cc
index 9658ac3ed3..900515aadd 100644
--- a/deps/v8/test/cctest/test-weaksets.cc
+++ b/deps/v8/test/cctest/test-weaksets.cc
@@ -27,23 +27,16 @@
#include <utility>
-#include "src/v8.h"
-
#include "src/factory.h"
#include "src/global-handles.h"
#include "src/isolate.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/factory.h -> src/objects-inl.h
#include "src/objects-inl.h"
-// FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/feedback-vector.h ->
-// src/feedback-vector-inl.h
-#include "src/feedback-vector-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
namespace v8 {
namespace internal {
+namespace test_weaksets {
static Isolate* GetIsolateFrom(LocalContext* context) {
return reinterpret_cast<Isolate*>((*context)->GetIsolate());
@@ -244,5 +237,6 @@ TEST(WeakSet_Regress2060b) {
CcTest::CollectAllGarbage();
}
+} // namespace test_weaksets
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index 2cd053767d..c7bb737299 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -16,6 +16,11 @@
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace test_run_wasm_64 {
+
// If the target architecture is 64-bit, enable all tests.
#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
#define WASM_64 1
@@ -23,18 +28,6 @@
#define WASM_64 0
#endif
-#define CHECK_TRAP32(x) \
- CHECK_EQ(0xdeadbeef, (bit_cast<uint32_t>(x)) & 0xFFFFFFFF)
-#define CHECK_TRAP64(x) \
- CHECK_EQ(0xdeadbeefdeadbeef, (bit_cast<uint64_t>(x)) & 0xFFFFFFFFFFFFFFFF)
-#define CHECK_TRAP(x) CHECK_TRAP32(x)
-
-#define asi64(x) static_cast<int64_t>(x)
-
-#define asu64(x) static_cast<uint64_t>(x)
-
-#define B2(a, b) kExprBlock, a, b, kExprEnd
-
// Can't bridge macro land with nested macros.
#if V8_TARGET_ARCH_MIPS
#define MIPS true
@@ -95,6 +88,8 @@
FOREACH_I64_OPERATOR(DECLARE_CONST)
#undef DECLARE_CONST
+#undef FOREACH_I64_OPERATOR
+
#define REQUIRE(name) \
if (!WASM_64 && !kSupported_##name) return
@@ -274,11 +269,11 @@ WASM_EXEC_TEST(I64DivS_Trap) {
REQUIRE(I64DivS);
WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- CHECK_EQ(0, r.Call(asi64(0), asi64(100)));
- CHECK_TRAP64(r.Call(asi64(100), asi64(0)));
- CHECK_TRAP64(r.Call(asi64(-1001), asi64(0)));
- CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), asi64(-1)));
- CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), asi64(0)));
+ CHECK_EQ(0, r.Call(int64_t{0}, int64_t{100}));
+ CHECK_TRAP64(r.Call(int64_t{100}, int64_t{0}));
+ CHECK_TRAP64(r.Call(int64_t{-1001}, int64_t{0}));
+ CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), int64_t{-1}));
+ CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), int64_t{0}));
}
WASM_EXEC_TEST(I64DivS_Byzero_Const) {
@@ -315,10 +310,10 @@ WASM_EXEC_TEST(I64DivU_Trap) {
REQUIRE(I64DivU);
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- CHECK_EQ(0, r.Call(asu64(0), asu64(100)));
- CHECK_TRAP64(r.Call(asu64(100), asu64(0)));
- CHECK_TRAP64(r.Call(asu64(1001), asu64(0)));
- CHECK_TRAP64(r.Call(std::numeric_limits<uint64_t>::max(), asu64(0)));
+ CHECK_EQ(0, r.Call(uint64_t{0}, uint64_t{100}));
+ CHECK_TRAP64(r.Call(uint64_t{100}, uint64_t{0}));
+ CHECK_TRAP64(r.Call(uint64_t{1001}, uint64_t{0}));
+ CHECK_TRAP64(r.Call(std::numeric_limits<uint64_t>::max(), uint64_t{0}));
}
WASM_EXEC_TEST(I64DivU_Byzero_Const) {
@@ -356,11 +351,11 @@ WASM_EXEC_TEST(I64RemS_Trap) {
REQUIRE(I64RemS);
WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- CHECK_EQ(33, r.Call(asi64(133), asi64(100)));
- CHECK_EQ(0, r.Call(std::numeric_limits<int64_t>::min(), asi64(-1)));
- CHECK_TRAP64(r.Call(asi64(100), asi64(0)));
- CHECK_TRAP64(r.Call(asi64(-1001), asi64(0)));
- CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), asi64(0)));
+ CHECK_EQ(33, r.Call(int64_t{133}, int64_t{100}));
+ CHECK_EQ(0, r.Call(std::numeric_limits<int64_t>::min(), int64_t{-1}));
+ CHECK_TRAP64(r.Call(int64_t{100}, int64_t{0}));
+ CHECK_TRAP64(r.Call(int64_t{-1001}, int64_t{0}));
+ CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), int64_t{0}));
}
WASM_EXEC_TEST(I64RemU) {
@@ -382,10 +377,10 @@ WASM_EXEC_TEST(I64RemU_Trap) {
REQUIRE(I64RemU);
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
BUILD(r, WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- CHECK_EQ(17, r.Call(asu64(217), asu64(100)));
- CHECK_TRAP64(r.Call(asu64(100), asu64(0)));
- CHECK_TRAP64(r.Call(asu64(1001), asu64(0)));
- CHECK_TRAP64(r.Call(std::numeric_limits<uint64_t>::max(), asu64(0)));
+ CHECK_EQ(17, r.Call(uint64_t{217}, uint64_t{100}));
+ CHECK_TRAP64(r.Call(uint64_t{100}, uint64_t{0}));
+ CHECK_TRAP64(r.Call(uint64_t{1001}, uint64_t{0}));
+ CHECK_TRAP64(r.Call(std::numeric_limits<uint64_t>::max(), uint64_t{0}));
}
WASM_EXEC_TEST(I64And) {
@@ -998,6 +993,8 @@ WASM_EXEC_TEST(I64Binops) {
TEST_I64_BINOP(I64Rol, 8728493013947314237, 0xe07af243ac4d219d, 15);
}
+#undef TEST_I64_BINOP
+
#define TEST_I64_CMP(name, expected, a, b) \
do { \
if (WASM_64 || kSupported_##name) \
@@ -1017,6 +1014,8 @@ WASM_EXEC_TEST(I64Compare) {
TEST_I64_CMP(I64GeU, 0, 0x0886A0C58C7AA224, 0x5DDBE5A81FD7EE47);
}
+#undef TEST_I64_CMP
+
WASM_EXEC_TEST(I64Clz) {
REQUIRE(I64Clz);
struct {
@@ -1476,7 +1475,7 @@ WASM_EXEC_TEST(I64Ror) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- int64_t expected = bits::RotateRight64(*i, *j & 0x3f);
+ int64_t expected = base::bits::RotateRight64(*i, *j & 0x3f);
CHECK_EQ(expected, r.Call(*i, *j));
}
}
@@ -1489,7 +1488,7 @@ WASM_EXEC_TEST(I64Rol) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- int64_t expected = bits::RotateLeft64(*i, *j & 0x3f);
+ int64_t expected = base::bits::RotateLeft64(*i, *j & 0x3f);
CHECK_EQ(expected, r.Call(*i, *j));
}
}
@@ -1519,7 +1518,7 @@ WASM_EXEC_TEST(StoreMem_offset_oob_i64) {
CHECK_EQ(0, memcmp(&memory[0], &memory[8 + boundary], memsize));
for (uint32_t offset = boundary + 1; offset < boundary + 19; offset++) {
- CHECK_TRAP(r.Call(offset)); // out of bounds.
+ CHECK_TRAP32(r.Call(offset)); // out of bounds.
}
}
}
@@ -1668,3 +1667,28 @@ WASM_EXEC_TEST(Regress5874) {
r.Call();
}
+
+WASM_EXEC_TEST(Regression_6858) {
+ REQUIRE(I64DivS);
+ // WasmRunner with 5 params and returns, which is the maximum.
+ WasmRunner<int64_t, int64_t, int64_t, int64_t, int64_t> r(execution_mode);
+ BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ int64_t dividend = 15;
+ int64_t divisor = 0;
+ int64_t filler = 34;
+ CHECK_TRAP64(r.Call(dividend, divisor, filler, filler));
+}
+
+#undef WASM_64
+#undef MIPS
+#undef REQUIRE
+#undef ADD_CODE
+
+// clang-format gets confused about these closing parentheses (wants to change
+// the first comment to "// namespace v8". Disable it.
+// clang-format off
+} // namespace test_run_wasm_64
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+// clang-format on
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
index 75823cdfa2..9e15c46f8d 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
@@ -15,27 +15,9 @@
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
-using namespace v8::base;
-using namespace v8::internal;
-using namespace v8::internal::compiler;
-using namespace v8::internal::wasm;
-
-// for even shorter tests.
-#define B2(a, b) kExprBlock, a, b, kExprEnd
-#define B1(a) kExprBlock, a, kExprEnd
-#define RET(x) x, kExprReturn, 1
-#define RET_I8(x) kExprI8Const, x, kExprReturn, 1
-
-namespace {
-uint32_t GetMatchingRelocInfoCount(Handle<Code> code, RelocInfo::Mode rmode) {
- int filter = 1 << rmode;
- uint32_t ret = 0;
- for (RelocIterator it(*code, filter); !it.done(); it.next()) {
- ++ret;
- }
- return ret;
-}
-}
+namespace v8 {
+namespace internal {
+namespace wasm {
WASM_EXEC_TEST(Int32AsmjsDivS) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
@@ -213,88 +195,6 @@ WASM_EXEC_TEST(StoreMemI32_oob_asm) {
}
}
-#define FOREACH_INT_CHECKED_LOAD_OP(TEST_BODY) \
- TEST_BODY(kExprI32AsmjsLoadMem8S) \
- TEST_BODY(kExprI32AsmjsLoadMem8U) \
- TEST_BODY(kExprI32AsmjsLoadMem16S) \
- TEST_BODY(kExprI32AsmjsLoadMem16U) \
- TEST_BODY(kExprI32AsmjsLoadMem)
-
-#define FOREACH_INT_CHECKED_STORE_OP(TEST_BODY) \
- TEST_BODY(kExprI32AsmjsStoreMem8) \
- TEST_BODY(kExprI32AsmjsStoreMem16) \
- TEST_BODY(kExprI32AsmjsStoreMem)
-
-#define INT_LOAD_TEST(OP_TYPE) \
- TEST(RunWasm_AsmCheckedRelocInfo##OP_TYPE) { \
- WasmRunner<int32_t, uint32_t> r(kExecuteCompiled); \
- r.builder().ChangeOriginToAsmjs(); \
- BUILD(r, WASM_UNOP(OP_TYPE, WASM_GET_LOCAL(0))); \
- CHECK_EQ(1, GetMatchingRelocInfoCount(r.builder().GetFunctionCode(0), \
- RelocInfo::WASM_MEMORY_REFERENCE)); \
- CHECK_NE( \
- 0, GetMatchingRelocInfoCount(r.builder().GetFunctionCode(0), \
- RelocInfo::WASM_MEMORY_SIZE_REFERENCE)); \
- }
-
-FOREACH_INT_CHECKED_LOAD_OP(INT_LOAD_TEST)
-
-#define INT_STORE_TEST(OP_TYPE) \
- TEST(RunWasm_AsmCheckedRelocInfo##OP_TYPE) { \
- WasmRunner<int32_t, uint32_t, uint32_t> r(kExecuteCompiled); \
- r.builder().ChangeOriginToAsmjs(); \
- BUILD(r, WASM_BINOP(OP_TYPE, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))); \
- CHECK_EQ(1, GetMatchingRelocInfoCount(r.builder().GetFunctionCode(0), \
- RelocInfo::WASM_MEMORY_REFERENCE)); \
- CHECK_NE( \
- 0, GetMatchingRelocInfoCount(r.builder().GetFunctionCode(0), \
- RelocInfo::WASM_MEMORY_SIZE_REFERENCE)); \
- }
-
-FOREACH_INT_CHECKED_STORE_OP(INT_STORE_TEST)
-
-TEST(RunWasm_AsmCheckedLoadFloat32RelocInfo) {
- WasmRunner<float, uint32_t> r(kExecuteCompiled);
- r.builder().ChangeOriginToAsmjs();
- BUILD(r, WASM_UNOP(kExprF32AsmjsLoadMem, WASM_GET_LOCAL(0)));
-
- CHECK_EQ(1, GetMatchingRelocInfoCount(r.builder().GetFunctionCode(0),
- RelocInfo::WASM_MEMORY_REFERENCE));
- CHECK_NE(0, GetMatchingRelocInfoCount(r.builder().GetFunctionCode(0),
- RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
-}
-
-TEST(RunWasm_AsmCheckedStoreFloat32RelocInfo) {
- WasmRunner<float, uint32_t, float> r(kExecuteCompiled);
- r.builder().ChangeOriginToAsmjs();
- BUILD(r, WASM_BINOP(kExprF32AsmjsStoreMem, WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(1)));
-
- CHECK_EQ(1, GetMatchingRelocInfoCount(r.builder().GetFunctionCode(0),
- RelocInfo::WASM_MEMORY_REFERENCE));
- CHECK_NE(0, GetMatchingRelocInfoCount(r.builder().GetFunctionCode(0),
- RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
-}
-
-TEST(RunWasm_AsmCheckedLoadFloat64RelocInfo) {
- WasmRunner<double, uint32_t> r(kExecuteCompiled);
- r.builder().ChangeOriginToAsmjs();
- BUILD(r, WASM_UNOP(kExprF64AsmjsLoadMem, WASM_GET_LOCAL(0)));
-
- CHECK_EQ(1, GetMatchingRelocInfoCount(r.builder().GetFunctionCode(0),
- RelocInfo::WASM_MEMORY_REFERENCE));
- CHECK_NE(0, GetMatchingRelocInfoCount(r.builder().GetFunctionCode(0),
- RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
-}
-
-TEST(RunWasm_AsmCheckedStoreFloat64RelocInfo) {
- WasmRunner<double, uint32_t, double> r(kExecuteCompiled);
- r.builder().ChangeOriginToAsmjs();
- BUILD(r, WASM_BINOP(kExprF64AsmjsStoreMem, WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(1)));
-
- CHECK_EQ(1, GetMatchingRelocInfoCount(r.builder().GetFunctionCode(0),
- RelocInfo::WASM_MEMORY_REFERENCE));
- CHECK_NE(0, GetMatchingRelocInfoCount(r.builder().GetFunctionCode(0),
- RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
-}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
index 70b273aa48..cb291b8741 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
@@ -2,14 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
-#define WASM_ATOMICS_OP(op) kAtomicPrefix, static_cast<byte>(op)
-#define WASM_ATOMICS_BINOP(op, x, y) x, y, WASM_ATOMICS_OP(op)
-#define WASM_ATOMICS_TERNARY_OP(op, x, y, z) x, y, z, WASM_ATOMICS_OP(op)
+namespace v8 {
+namespace internal {
+namespace wasm {
typedef uint32_t (*Uint32BinOp)(uint32_t, uint32_t);
typedef uint16_t (*Uint16BinOp)(uint16_t, uint16_t);
@@ -55,8 +56,10 @@ void RunU32BinOp(WasmOpcode wasm_op, Uint32BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t, uint32_t> r(kExecuteCompiled);
uint32_t* memory = r.builder().AddMemoryElems<uint32_t>(8);
+ r.builder().SetHasSharedMemory();
- BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_GET_LOCAL(0)));
+ BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_GET_LOCAL(0),
+ MachineRepresentation::kWord32));
FOR_UINT32_INPUTS(i) {
uint32_t initial = *i;
@@ -69,19 +72,21 @@ void RunU32BinOp(WasmOpcode wasm_op, Uint32BinOp expected_op) {
}
}
-WASM_EXEC_TEST(I32Add) { RunU32BinOp(kExprI32AtomicAdd, Add); }
-WASM_EXEC_TEST(I32Sub) { RunU32BinOp(kExprI32AtomicSub, Sub); }
-WASM_EXEC_TEST(I32And) { RunU32BinOp(kExprI32AtomicAnd, And); }
-WASM_EXEC_TEST(I32Or) { RunU32BinOp(kExprI32AtomicOr, Or); }
-WASM_EXEC_TEST(I32Xor) { RunU32BinOp(kExprI32AtomicXor, Xor); }
-WASM_EXEC_TEST(I32Exchange) { RunU32BinOp(kExprI32AtomicExchange, Exchange); }
+TEST(I32AtomicAdd) { RunU32BinOp(kExprI32AtomicAdd, Add); }
+TEST(I32AtomicSub) { RunU32BinOp(kExprI32AtomicSub, Sub); }
+TEST(I32AtomicAnd) { RunU32BinOp(kExprI32AtomicAnd, And); }
+TEST(I32AtomicOr) { RunU32BinOp(kExprI32AtomicOr, Or); }
+TEST(I32AtomicXor) { RunU32BinOp(kExprI32AtomicXor, Xor); }
+TEST(I32AtomicExchange) { RunU32BinOp(kExprI32AtomicExchange, Exchange); }
void RunU16BinOp(WasmOpcode wasm_op, Uint16BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t, uint32_t> r(kExecuteCompiled);
+ r.builder().SetHasSharedMemory();
uint16_t* memory = r.builder().AddMemoryElems<uint16_t>(8);
- BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_GET_LOCAL(0)));
+ BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_GET_LOCAL(0),
+ MachineRepresentation::kWord16));
FOR_UINT16_INPUTS(i) {
uint16_t initial = *i;
@@ -94,21 +99,21 @@ void RunU16BinOp(WasmOpcode wasm_op, Uint16BinOp expected_op) {
}
}
-WASM_EXEC_TEST(I32Add16U) { RunU16BinOp(kExprI32AtomicAdd16U, Add); }
-WASM_EXEC_TEST(I32Sub16U) { RunU16BinOp(kExprI32AtomicSub16U, Sub); }
-WASM_EXEC_TEST(I32And16U) { RunU16BinOp(kExprI32AtomicAnd16U, And); }
-WASM_EXEC_TEST(I32Or16U) { RunU16BinOp(kExprI32AtomicOr16U, Or); }
-WASM_EXEC_TEST(I32Xor16U) { RunU16BinOp(kExprI32AtomicXor16U, Xor); }
-WASM_EXEC_TEST(I32Exchange16U) {
- RunU16BinOp(kExprI32AtomicExchange16U, Exchange);
-}
+TEST(I32AtomicAdd16U) { RunU16BinOp(kExprI32AtomicAdd16U, Add); }
+TEST(I32AtomicSub16U) { RunU16BinOp(kExprI32AtomicSub16U, Sub); }
+TEST(I32AtomicAnd16U) { RunU16BinOp(kExprI32AtomicAnd16U, And); }
+TEST(I32AtomicOr16U) { RunU16BinOp(kExprI32AtomicOr16U, Or); }
+TEST(I32AtomicXor16U) { RunU16BinOp(kExprI32AtomicXor16U, Xor); }
+TEST(I32AtomicExchange16U) { RunU16BinOp(kExprI32AtomicExchange16U, Exchange); }
void RunU8BinOp(WasmOpcode wasm_op, Uint8BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t, uint32_t> r(kExecuteCompiled);
+ r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(8);
- BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_GET_LOCAL(0)));
+ BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_GET_LOCAL(0),
+ MachineRepresentation::kWord8));
FOR_UINT8_INPUTS(i) {
uint8_t initial = *i;
@@ -121,22 +126,21 @@ void RunU8BinOp(WasmOpcode wasm_op, Uint8BinOp expected_op) {
}
}
-WASM_EXEC_TEST(I32Add8U) { RunU8BinOp(kExprI32AtomicAdd8U, Add); }
-WASM_EXEC_TEST(I32Sub8U) { RunU8BinOp(kExprI32AtomicSub8U, Sub); }
-WASM_EXEC_TEST(I32And8U) { RunU8BinOp(kExprI32AtomicAnd8U, And); }
-WASM_EXEC_TEST(I32Or8U) { RunU8BinOp(kExprI32AtomicOr8U, Or); }
-WASM_EXEC_TEST(I32Xor8U) { RunU8BinOp(kExprI32AtomicXor8U, Xor); }
-WASM_EXEC_TEST(I32Exchange8U) {
- RunU8BinOp(kExprI32AtomicExchange8U, Exchange);
-}
+TEST(I32AtomicAdd8U) { RunU8BinOp(kExprI32AtomicAdd8U, Add); }
+TEST(I32AtomicSub8U) { RunU8BinOp(kExprI32AtomicSub8U, Sub); }
+TEST(I32AtomicAnd8U) { RunU8BinOp(kExprI32AtomicAnd8U, And); }
+TEST(I32AtomicOr8U) { RunU8BinOp(kExprI32AtomicOr8U, Or); }
+TEST(I32AtomicXor8U) { RunU8BinOp(kExprI32AtomicXor8U, Xor); }
+TEST(I32AtomicExchange8U) { RunU8BinOp(kExprI32AtomicExchange8U, Exchange); }
-WASM_EXEC_TEST(I32CompareExchange) {
+TEST(I32AtomicCompareExchange) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t, uint32_t, uint32_t> r(kExecuteCompiled);
+ r.builder().SetHasSharedMemory();
uint32_t* memory = r.builder().AddMemoryElems<uint32_t>(8);
- BUILD(r,
- WASM_ATOMICS_TERNARY_OP(kExprI32AtomicCompareExchange, WASM_I32V_1(0),
- WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ BUILD(r, WASM_ATOMICS_TERNARY_OP(
+ kExprI32AtomicCompareExchange, WASM_I32V_1(0), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1), MachineRepresentation::kWord32));
FOR_UINT32_INPUTS(i) {
uint32_t initial = *i;
@@ -149,13 +153,15 @@ WASM_EXEC_TEST(I32CompareExchange) {
}
}
-WASM_EXEC_TEST(I32CompareExchange16U) {
+TEST(I32AtomicCompareExchange16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t, uint32_t, uint32_t> r(kExecuteCompiled);
+ r.builder().SetHasSharedMemory();
uint16_t* memory = r.builder().AddMemoryElems<uint16_t>(8);
BUILD(r, WASM_ATOMICS_TERNARY_OP(kExprI32AtomicCompareExchange16U,
WASM_I32V_1(0), WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(1)));
+ WASM_GET_LOCAL(1),
+ MachineRepresentation::kWord16));
FOR_UINT16_INPUTS(i) {
uint16_t initial = *i;
@@ -168,13 +174,15 @@ WASM_EXEC_TEST(I32CompareExchange16U) {
}
}
-WASM_EXEC_TEST(I32CompareExchange8U) {
+TEST(I32AtomicCompareExchange8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t, uint32_t, uint32_t> r(kExecuteCompiled);
+ r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(8);
BUILD(r,
WASM_ATOMICS_TERNARY_OP(kExprI32AtomicCompareExchange8U, WASM_I32V_1(0),
- WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ MachineRepresentation::kWord8));
FOR_UINT8_INPUTS(i) {
uint8_t initial = *i;
@@ -186,3 +194,110 @@ WASM_EXEC_TEST(I32CompareExchange8U) {
}
}
}
+
+TEST(I32AtomicLoad) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t> r(kExecuteCompiled);
+ r.builder().SetHasSharedMemory();
+ uint32_t* memory = r.builder().AddMemoryElems<uint32_t>(8);
+ BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad, WASM_ZERO,
+ MachineRepresentation::kWord32));
+
+ FOR_UINT32_INPUTS(i) {
+ uint32_t expected = *i;
+ r.builder().WriteMemory(&memory[0], expected);
+ CHECK_EQ(expected, r.Call());
+ }
+}
+
+TEST(I32AtomicLoad16U) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t> r(kExecuteCompiled);
+ r.builder().SetHasSharedMemory();
+ uint16_t* memory = r.builder().AddMemoryElems<uint16_t>(8);
+ BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad16U, WASM_ZERO,
+ MachineRepresentation::kWord16));
+
+ FOR_UINT16_INPUTS(i) {
+ uint16_t expected = *i;
+ r.builder().WriteMemory(&memory[0], expected);
+ CHECK_EQ(expected, r.Call());
+ }
+}
+
+TEST(I32AtomicLoad8U) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t> r(kExecuteCompiled);
+ r.builder().SetHasSharedMemory();
+ uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(8);
+ BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad8U, WASM_ZERO,
+ MachineRepresentation::kWord8));
+
+ FOR_UINT8_INPUTS(i) {
+ uint8_t expected = *i;
+ r.builder().WriteMemory(&memory[0], expected);
+ CHECK_EQ(expected, r.Call());
+ }
+}
+
+TEST(I32AtomicStoreLoad) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t, uint32_t> r(kExecuteCompiled);
+ r.builder().SetHasSharedMemory();
+ uint32_t* memory = r.builder().AddMemoryElems<uint32_t>(8);
+
+ BUILD(r,
+ WASM_ATOMICS_STORE_OP(kExprI32AtomicStore, WASM_ZERO, WASM_GET_LOCAL(0),
+ MachineRepresentation::kWord32),
+ WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad, WASM_ZERO,
+ MachineRepresentation::kWord32));
+
+ FOR_UINT32_INPUTS(i) {
+ uint32_t expected = *i;
+ CHECK_EQ(expected, r.Call(*i));
+ CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
+ }
+}
+
+TEST(I32AtomicStoreLoad16U) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t, uint32_t> r(kExecuteCompiled);
+ r.builder().SetHasSharedMemory();
+ uint16_t* memory = r.builder().AddMemoryElems<uint16_t>(8);
+
+ BUILD(
+ r,
+ WASM_ATOMICS_STORE_OP(kExprI32AtomicStore16U, WASM_ZERO,
+ WASM_GET_LOCAL(0), MachineRepresentation::kWord16),
+ WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad16U, WASM_ZERO,
+ MachineRepresentation::kWord16));
+
+ FOR_UINT16_INPUTS(i) {
+ uint16_t expected = *i;
+ CHECK_EQ(expected, r.Call(*i));
+ CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
+ }
+}
+
+TEST(I32AtomicStoreLoad8U) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t, uint32_t> r(kExecuteCompiled);
+ r.builder().SetHasSharedMemory();
+ uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(8);
+
+ BUILD(r,
+ WASM_ATOMICS_STORE_OP(kExprI32AtomicStore8U, WASM_ZERO,
+ WASM_GET_LOCAL(0), MachineRepresentation::kWord8),
+ WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad8U, WASM_ZERO,
+ MachineRepresentation::kWord8));
+
+ FOR_UINT8_INPUTS(i) {
+ uint8_t expected = *i;
+ CHECK_EQ(expected, r.Call(*i));
+ CHECK_EQ(*i, r.builder().ReadMemory(&memory[0]));
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
index 44a29b980d..10ba64c993 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -16,14 +16,10 @@
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
-using namespace v8::base;
-using namespace v8::internal;
-using namespace v8::internal::compiler;
-using namespace v8::internal::wasm;
-
namespace v8 {
namespace internal {
namespace wasm {
+namespace test_run_wasm_interpreter {
TEST(Run_WasmInt8Const_i) {
WasmRunner<int32_t> r(kExecuteInterpreted);
@@ -428,6 +424,7 @@ TEST(InterpreterLoadWithoutMemory) {
CHECK_TRAP32(r.Call(0));
}
+} // namespace test_run_wasm_interpreter
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
index 8bbe59b8c2..e4bb9b0822 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
@@ -14,16 +14,9 @@
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
-using namespace v8::base;
-using namespace v8::internal;
-using namespace v8::internal::compiler;
-using namespace v8::internal::wasm;
-
-#define BUILD(r, ...) \
- do { \
- byte code[] = {__VA_ARGS__}; \
- r.Build(code, code + arraysize(code)); \
- } while (false)
+namespace v8 {
+namespace internal {
+namespace wasm {
#define ADD_CODE(vec, ...) \
do { \
@@ -47,7 +40,7 @@ class PredictableInputValues {
};
uint32_t AddJSSelector(TestingModuleBuilder* builder, FunctionSig* sig,
- int which) {
+ int which, Handle<FixedArray> js_imports_table) {
const int kMaxParams = 11;
static const char* formals[kMaxParams] = {"",
"a",
@@ -68,7 +61,7 @@ uint32_t AddJSSelector(TestingModuleBuilder* builder, FunctionSig* sig,
SNPrintF(source, "(function(%s) { return %c; })",
formals[sig->parameter_count()], param);
- return builder->AddJsFunction(sig, source.start());
+ return builder->AddJsFunction(sig, source.start(), js_imports_table);
}
void EXPECT_CALL(double expected, Handle<JSFunction> jsfunc,
@@ -84,7 +77,7 @@ void EXPECT_CALL(double expected, Handle<JSFunction> jsfunc,
CHECK_EQ(expected, Smi::ToInt(*result));
} else {
CHECK(result->IsHeapNumber());
- CheckFloatEq(expected, HeapNumber::cast(*result)->value());
+ CHECK_FLOAT_EQ(expected, HeapNumber::cast(*result)->value());
}
}
@@ -97,8 +90,8 @@ void EXPECT_CALL(double expected, Handle<JSFunction> jsfunc, double a,
}
} // namespace
-TEST(Run_Int32Sub_jswrapped) {
- WasmRunner<int, int, int> r(kExecuteCompiled);
+WASM_EXEC_TEST(Run_Int32Sub_jswrapped) {
+ WasmRunner<int, int, int> r(execution_mode);
BUILD(r, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
Handle<JSFunction> jsfunc = r.builder().WrapCode(r.function()->func_index);
@@ -106,8 +99,8 @@ TEST(Run_Int32Sub_jswrapped) {
EXPECT_CALL(-8723487, jsfunc, -8000000, 723487);
}
-TEST(Run_Float32Div_jswrapped) {
- WasmRunner<float, float, float> r(kExecuteCompiled);
+WASM_EXEC_TEST(Run_Float32Div_jswrapped) {
+ WasmRunner<float, float, float> r(execution_mode);
BUILD(r, WASM_F32_DIV(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
Handle<JSFunction> jsfunc = r.builder().WrapCode(r.function()->func_index);
@@ -115,8 +108,8 @@ TEST(Run_Float32Div_jswrapped) {
EXPECT_CALL(64, jsfunc, -16, -0.25);
}
-TEST(Run_Float64Add_jswrapped) {
- WasmRunner<double, double, double> r(kExecuteCompiled);
+WASM_EXEC_TEST(Run_Float64Add_jswrapped) {
+ WasmRunner<double, double, double> r(execution_mode);
BUILD(r, WASM_F64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
Handle<JSFunction> jsfunc = r.builder().WrapCode(r.function()->func_index);
@@ -124,8 +117,8 @@ TEST(Run_Float64Add_jswrapped) {
EXPECT_CALL(-5.5, jsfunc, -5.25, -0.25);
}
-TEST(Run_I32Popcount_jswrapped) {
- WasmRunner<int, int> r(kExecuteCompiled);
+WASM_EXEC_TEST(Run_I32Popcount_jswrapped) {
+ WasmRunner<int, int> r(execution_mode);
BUILD(r, WASM_I32_POPCNT(WASM_GET_LOCAL(0)));
Handle<JSFunction> jsfunc = r.builder().WrapCode(r.function()->func_index);
@@ -134,11 +127,13 @@ TEST(Run_I32Popcount_jswrapped) {
EXPECT_CALL(6, jsfunc, 0x3F, 0);
}
-TEST(Run_CallJS_Add_jswrapped) {
- WasmRunner<int, int> r(kExecuteCompiled);
+WASM_EXEC_TEST(Run_CallJS_Add_jswrapped) {
+ WasmRunner<int, int> r(execution_mode);
TestSignatures sigs;
- uint32_t js_index =
- r.builder().AddJsFunction(sigs.i_i(), "(function(a) { return a + 99; })");
+ Handle<FixedArray> js_imports_table =
+ r.main_isolate()->factory()->NewFixedArray(2 * 3 + 1, TENURED);
+ uint32_t js_index = r.builder().AddJsFunction(
+ sigs.i_i(), "(function(a) { return a + 99; })", js_imports_table);
BUILD(r, WASM_CALL_FUNCTION(js_index, WASM_GET_LOCAL(0)));
Handle<JSFunction> jsfunc = r.builder().WrapCode(r.function()->func_index);
@@ -148,7 +143,7 @@ TEST(Run_CallJS_Add_jswrapped) {
EXPECT_CALL(-666666801, jsfunc, -666666900, -1);
}
-void RunJSSelectTest(int which) {
+void RunJSSelectTest(WasmExecutionMode mode, int which) {
const int kMaxParams = 8;
PredictableInputValues inputs(0x100);
ValueType type = kWasmF64;
@@ -158,8 +153,12 @@ void RunJSSelectTest(int which) {
HandleScope scope(CcTest::InitIsolateOnce());
FunctionSig sig(1, num_params, types);
- WasmRunner<void> r(kExecuteCompiled);
- uint32_t js_index = AddJSSelector(&r.builder(), &sig, which);
+ WasmRunner<void> r(mode);
+ Handle<FixedArray> js_imports_table =
+ scope.isolate()->factory()->NewFixedArray(2 * 3 + 1, TENURED);
+ uint32_t js_index =
+ AddJSSelector(&r.builder(), &sig, which, js_imports_table);
+
WasmFunctionCompiler& t = r.NewFunction(&sig);
{
@@ -182,47 +181,47 @@ void RunJSSelectTest(int which) {
}
}
-TEST(Run_JSSelect_0) {
+WASM_EXEC_TEST(Run_JSSelect_0) {
CcTest::InitializeVM();
- RunJSSelectTest(0);
+ RunJSSelectTest(execution_mode, 0);
}
-TEST(Run_JSSelect_1) {
+WASM_EXEC_TEST(Run_JSSelect_1) {
CcTest::InitializeVM();
- RunJSSelectTest(1);
+ RunJSSelectTest(execution_mode, 1);
}
-TEST(Run_JSSelect_2) {
+WASM_EXEC_TEST(Run_JSSelect_2) {
CcTest::InitializeVM();
- RunJSSelectTest(2);
+ RunJSSelectTest(execution_mode, 2);
}
-TEST(Run_JSSelect_3) {
+WASM_EXEC_TEST(Run_JSSelect_3) {
CcTest::InitializeVM();
- RunJSSelectTest(3);
+ RunJSSelectTest(execution_mode, 3);
}
-TEST(Run_JSSelect_4) {
+WASM_EXEC_TEST(Run_JSSelect_4) {
CcTest::InitializeVM();
- RunJSSelectTest(4);
+ RunJSSelectTest(execution_mode, 4);
}
-TEST(Run_JSSelect_5) {
+WASM_EXEC_TEST(Run_JSSelect_5) {
CcTest::InitializeVM();
- RunJSSelectTest(5);
+ RunJSSelectTest(execution_mode, 5);
}
-TEST(Run_JSSelect_6) {
+WASM_EXEC_TEST(Run_JSSelect_6) {
CcTest::InitializeVM();
- RunJSSelectTest(6);
+ RunJSSelectTest(execution_mode, 6);
}
-TEST(Run_JSSelect_7) {
+WASM_EXEC_TEST(Run_JSSelect_7) {
CcTest::InitializeVM();
- RunJSSelectTest(7);
+ RunJSSelectTest(execution_mode, 7);
}
-void RunWASMSelectTest(int which) {
+void RunWASMSelectTest(WasmExecutionMode mode, int which) {
PredictableInputValues inputs(0x200);
Isolate* isolate = CcTest::InitIsolateOnce();
const int kMaxParams = 8;
@@ -232,7 +231,7 @@ void RunWASMSelectTest(int which) {
type, type, type, type};
FunctionSig sig(1, num_params, types);
- WasmRunner<void> r(kExecuteCompiled);
+ WasmRunner<void> r(mode);
WasmFunctionCompiler& t = r.NewFunction(&sig);
BUILD(t, WASM_GET_LOCAL(which));
Handle<JSFunction> jsfunc = r.builder().WrapCode(t.function_index());
@@ -253,47 +252,48 @@ void RunWASMSelectTest(int which) {
}
}
-TEST(Run_WASMSelect_0) {
+WASM_EXEC_TEST(Run_WASMSelect_0) {
CcTest::InitializeVM();
- RunWASMSelectTest(0);
+ RunWASMSelectTest(execution_mode, 0);
}
-TEST(Run_WASMSelect_1) {
+WASM_EXEC_TEST(Run_WASMSelect_1) {
CcTest::InitializeVM();
- RunWASMSelectTest(1);
+ RunWASMSelectTest(execution_mode, 1);
}
-TEST(Run_WASMSelect_2) {
+WASM_EXEC_TEST(Run_WASMSelect_2) {
CcTest::InitializeVM();
- RunWASMSelectTest(2);
+ RunWASMSelectTest(execution_mode, 2);
}
-TEST(Run_WASMSelect_3) {
+WASM_EXEC_TEST(Run_WASMSelect_3) {
CcTest::InitializeVM();
- RunWASMSelectTest(3);
+ RunWASMSelectTest(execution_mode, 3);
}
-TEST(Run_WASMSelect_4) {
+WASM_EXEC_TEST(Run_WASMSelect_4) {
CcTest::InitializeVM();
- RunWASMSelectTest(4);
+ RunWASMSelectTest(execution_mode, 4);
}
-TEST(Run_WASMSelect_5) {
+WASM_EXEC_TEST(Run_WASMSelect_5) {
CcTest::InitializeVM();
- RunWASMSelectTest(5);
+ RunWASMSelectTest(execution_mode, 5);
}
-TEST(Run_WASMSelect_6) {
+WASM_EXEC_TEST(Run_WASMSelect_6) {
CcTest::InitializeVM();
- RunWASMSelectTest(6);
+ RunWASMSelectTest(execution_mode, 6);
}
-TEST(Run_WASMSelect_7) {
+WASM_EXEC_TEST(Run_WASMSelect_7) {
CcTest::InitializeVM();
- RunWASMSelectTest(7);
+ RunWASMSelectTest(execution_mode, 7);
}
-void RunWASMSelectAlignTest(int num_args, int num_params) {
+void RunWASMSelectAlignTest(WasmExecutionMode mode, int num_args,
+ int num_params) {
PredictableInputValues inputs(0x300);
Isolate* isolate = CcTest::InitIsolateOnce();
const int kMaxParams = 10;
@@ -304,7 +304,7 @@ void RunWASMSelectAlignTest(int num_args, int num_params) {
FunctionSig sig(1, num_params, types);
for (int which = 0; which < num_params; which++) {
- WasmRunner<void> r(kExecuteCompiled);
+ WasmRunner<void> r(mode);
WasmFunctionCompiler& t = r.NewFunction(&sig);
BUILD(t, WASM_GET_LOCAL(which));
Handle<JSFunction> jsfunc = r.builder().WrapCode(t.function_index());
@@ -326,68 +326,69 @@ void RunWASMSelectAlignTest(int num_args, int num_params) {
}
}
-TEST(Run_WASMSelectAlign_0) {
+WASM_EXEC_TEST(Run_WASMSelectAlign_0) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(0, 1);
- RunWASMSelectAlignTest(0, 2);
+ RunWASMSelectAlignTest(execution_mode, 0, 1);
+ RunWASMSelectAlignTest(execution_mode, 0, 2);
}
-TEST(Run_WASMSelectAlign_1) {
+WASM_EXEC_TEST(Run_WASMSelectAlign_1) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(1, 2);
- RunWASMSelectAlignTest(1, 3);
+ RunWASMSelectAlignTest(execution_mode, 1, 2);
+ RunWASMSelectAlignTest(execution_mode, 1, 3);
}
-TEST(Run_WASMSelectAlign_2) {
+WASM_EXEC_TEST(Run_WASMSelectAlign_2) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(2, 3);
- RunWASMSelectAlignTest(2, 4);
+ RunWASMSelectAlignTest(execution_mode, 2, 3);
+ RunWASMSelectAlignTest(execution_mode, 2, 4);
}
-TEST(Run_WASMSelectAlign_3) {
+WASM_EXEC_TEST(Run_WASMSelectAlign_3) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(3, 3);
- RunWASMSelectAlignTest(3, 4);
+ RunWASMSelectAlignTest(execution_mode, 3, 3);
+ RunWASMSelectAlignTest(execution_mode, 3, 4);
}
-TEST(Run_WASMSelectAlign_4) {
+WASM_EXEC_TEST(Run_WASMSelectAlign_4) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(4, 3);
- RunWASMSelectAlignTest(4, 4);
+ RunWASMSelectAlignTest(execution_mode, 4, 3);
+ RunWASMSelectAlignTest(execution_mode, 4, 4);
}
-TEST(Run_WASMSelectAlign_7) {
+WASM_EXEC_TEST(Run_WASMSelectAlign_7) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(7, 5);
- RunWASMSelectAlignTest(7, 6);
- RunWASMSelectAlignTest(7, 7);
+ RunWASMSelectAlignTest(execution_mode, 7, 5);
+ RunWASMSelectAlignTest(execution_mode, 7, 6);
+ RunWASMSelectAlignTest(execution_mode, 7, 7);
}
-TEST(Run_WASMSelectAlign_8) {
+WASM_EXEC_TEST(Run_WASMSelectAlign_8) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(8, 5);
- RunWASMSelectAlignTest(8, 6);
- RunWASMSelectAlignTest(8, 7);
- RunWASMSelectAlignTest(8, 8);
+ RunWASMSelectAlignTest(execution_mode, 8, 5);
+ RunWASMSelectAlignTest(execution_mode, 8, 6);
+ RunWASMSelectAlignTest(execution_mode, 8, 7);
+ RunWASMSelectAlignTest(execution_mode, 8, 8);
}
-TEST(Run_WASMSelectAlign_9) {
+WASM_EXEC_TEST(Run_WASMSelectAlign_9) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(9, 6);
- RunWASMSelectAlignTest(9, 7);
- RunWASMSelectAlignTest(9, 8);
- RunWASMSelectAlignTest(9, 9);
+ RunWASMSelectAlignTest(execution_mode, 9, 6);
+ RunWASMSelectAlignTest(execution_mode, 9, 7);
+ RunWASMSelectAlignTest(execution_mode, 9, 8);
+ RunWASMSelectAlignTest(execution_mode, 9, 9);
}
-TEST(Run_WASMSelectAlign_10) {
+WASM_EXEC_TEST(Run_WASMSelectAlign_10) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(10, 7);
- RunWASMSelectAlignTest(10, 8);
- RunWASMSelectAlignTest(10, 9);
- RunWASMSelectAlignTest(10, 10);
+ RunWASMSelectAlignTest(execution_mode, 10, 7);
+ RunWASMSelectAlignTest(execution_mode, 10, 8);
+ RunWASMSelectAlignTest(execution_mode, 10, 9);
+ RunWASMSelectAlignTest(execution_mode, 10, 10);
}
-void RunJSSelectAlignTest(int num_args, int num_params) {
+void RunJSSelectAlignTest(WasmExecutionMode mode, int num_args,
+ int num_params) {
PredictableInputValues inputs(0x400);
Isolate* isolate = CcTest::InitIsolateOnce();
Factory* factory = isolate->factory();
@@ -416,8 +417,11 @@ void RunJSSelectAlignTest(int num_args, int num_params) {
// Call different select JS functions.
for (int which = 0; which < num_params; which++) {
- WasmRunner<void> r(kExecuteCompiled);
- uint32_t js_index = AddJSSelector(&r.builder(), &sig, which);
+ WasmRunner<void> r(mode);
+ Handle<FixedArray> js_imports_table =
+ factory->NewFixedArray(2 * 3 + 1, TENURED);
+ uint32_t js_index =
+ AddJSSelector(&r.builder(), &sig, which, js_imports_table);
CHECK_EQ(predicted_js_index, js_index);
WasmFunctionCompiler& t = r.NewFunction(&sig);
t.Build(&code[0], &code[end]);
@@ -443,64 +447,70 @@ void RunJSSelectAlignTest(int num_args, int num_params) {
}
}
-TEST(Run_JSSelectAlign_0) {
+WASM_EXEC_TEST(Run_JSSelectAlign_0) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(0, 1);
- RunJSSelectAlignTest(0, 2);
+ RunJSSelectAlignTest(execution_mode, 0, 1);
+ RunJSSelectAlignTest(execution_mode, 0, 2);
}
-TEST(Run_JSSelectAlign_1) {
+WASM_EXEC_TEST(Run_JSSelectAlign_1) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(1, 2);
- RunJSSelectAlignTest(1, 3);
+ RunJSSelectAlignTest(execution_mode, 1, 2);
+ RunJSSelectAlignTest(execution_mode, 1, 3);
}
-TEST(Run_JSSelectAlign_2) {
+WASM_EXEC_TEST(Run_JSSelectAlign_2) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(2, 3);
- RunJSSelectAlignTest(2, 4);
+ RunJSSelectAlignTest(execution_mode, 2, 3);
+ RunJSSelectAlignTest(execution_mode, 2, 4);
}
-TEST(Run_JSSelectAlign_3) {
+WASM_EXEC_TEST(Run_JSSelectAlign_3) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(3, 3);
- RunJSSelectAlignTest(3, 4);
+ RunJSSelectAlignTest(execution_mode, 3, 3);
+ RunJSSelectAlignTest(execution_mode, 3, 4);
}
-TEST(Run_JSSelectAlign_4) {
+WASM_EXEC_TEST(Run_JSSelectAlign_4) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(4, 3);
- RunJSSelectAlignTest(4, 4);
+ RunJSSelectAlignTest(execution_mode, 4, 3);
+ RunJSSelectAlignTest(execution_mode, 4, 4);
}
-TEST(Run_JSSelectAlign_7) {
+WASM_EXEC_TEST(Run_JSSelectAlign_7) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(7, 3);
- RunJSSelectAlignTest(7, 4);
- RunJSSelectAlignTest(7, 4);
- RunJSSelectAlignTest(7, 4);
+ RunJSSelectAlignTest(execution_mode, 7, 3);
+ RunJSSelectAlignTest(execution_mode, 7, 4);
+ RunJSSelectAlignTest(execution_mode, 7, 4);
+ RunJSSelectAlignTest(execution_mode, 7, 4);
}
-TEST(Run_JSSelectAlign_8) {
+WASM_EXEC_TEST(Run_JSSelectAlign_8) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(8, 5);
- RunJSSelectAlignTest(8, 6);
- RunJSSelectAlignTest(8, 7);
- RunJSSelectAlignTest(8, 8);
+ RunJSSelectAlignTest(execution_mode, 8, 5);
+ RunJSSelectAlignTest(execution_mode, 8, 6);
+ RunJSSelectAlignTest(execution_mode, 8, 7);
+ RunJSSelectAlignTest(execution_mode, 8, 8);
}
-TEST(Run_JSSelectAlign_9) {
+WASM_EXEC_TEST(Run_JSSelectAlign_9) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(9, 6);
- RunJSSelectAlignTest(9, 7);
- RunJSSelectAlignTest(9, 8);
- RunJSSelectAlignTest(9, 9);
+ RunJSSelectAlignTest(execution_mode, 9, 6);
+ RunJSSelectAlignTest(execution_mode, 9, 7);
+ RunJSSelectAlignTest(execution_mode, 9, 8);
+ RunJSSelectAlignTest(execution_mode, 9, 9);
}
-TEST(Run_JSSelectAlign_10) {
+WASM_EXEC_TEST(Run_JSSelectAlign_10) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(10, 7);
- RunJSSelectAlignTest(10, 8);
- RunJSSelectAlignTest(10, 9);
- RunJSSelectAlignTest(10, 10);
+ RunJSSelectAlignTest(execution_mode, 10, 7);
+ RunJSSelectAlignTest(execution_mode, 10, 8);
+ RunJSSelectAlignTest(execution_mode, 10, 9);
+ RunJSSelectAlignTest(execution_mode, 10, 10);
}
+
+#undef ADD_CODE
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index 268e0d5a93..72ed1f03f5 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -5,24 +5,27 @@
#include <stdlib.h>
#include <string.h>
+#include "src/api.h"
#include "src/objects-inl.h"
#include "src/snapshot/code-serializer.h"
#include "src/version.h"
+#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-opcodes.h"
#include "test/cctest/cctest.h"
+#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
#include "test/common/wasm/wasm-module-runner.h"
-using namespace v8::base;
-using namespace v8::internal;
-using namespace v8::internal::compiler;
-using namespace v8::internal::wasm;
+namespace v8 {
+namespace internal {
+namespace wasm {
namespace {
void Cleanup(Isolate* isolate = nullptr) {
@@ -287,7 +290,7 @@ class WasmSerializationTest {
Handle<Object>(Smi::FromInt(41), current_isolate())};
int32_t result = testing::CallWasmFunctionForTesting(
current_isolate(), instance, &thrower, kFunctionName, 1, params);
- CHECK(result == 42);
+ CHECK_EQ(42, result);
}
Isolate* current_isolate() {
@@ -318,6 +321,10 @@ class WasmSerializationTest {
WasmSerializationTest::BuildWireBytes(zone(), &buffer);
Isolate* serialization_isolate = CcTest::InitIsolateOnce();
+ // Isolates do not have serialization enabled by default. We must enable it
+ // here or else the assembler will not mark external references so that the
+ // serializer can handle them correctly.
+ serialization_isolate->set_serializer_enabled_for_test(true);
ErrorThrower thrower(serialization_isolate, "");
uint8_t* bytes = nullptr;
size_t bytes_size = 0;
@@ -452,7 +459,7 @@ TEST(ModuleBuilder) {
size_t third = buffer.size() / 3;
size_t first_mark = third - 2;
size_t second_mark = buffer.size() - 2 - third;
- CHECK(0 < first_mark);
+ CHECK_LT(0, first_mark);
CHECK(first_mark < second_mark);
CHECK(second_mark < buffer.size());
Isolate* i_isolate = CcTest::InitIsolateOnce();
@@ -482,7 +489,7 @@ TEST(FailingModuleBuilder) {
size_t third = buffer.size() / 3;
size_t first_mark = third - 2;
size_t second_mark = buffer.size() - 2 - third;
- CHECK(0 < first_mark);
+ CHECK_LT(0, first_mark);
CHECK(first_mark < second_mark);
CHECK(second_mark < buffer.size());
Isolate* i_isolate = CcTest::InitIsolateOnce();
@@ -501,23 +508,6 @@ bool False(v8::Local<v8::Context> context, v8::Local<v8::String> source) {
return false;
}
-TEST(BlockWasmCodeGen) {
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator, ZONE_NAME);
- ZoneBuffer buffer(&zone);
- WasmSerializationTest::BuildWireBytes(&zone, &buffer);
- Isolate* isolate = CcTest::InitIsolateOnce();
- HandleScope scope(isolate);
- testing::SetupIsolateForWasmModule(isolate);
- CcTest::isolate()->SetAllowCodeGenerationFromStringsCallback(False);
-
- ErrorThrower thrower(isolate, "block codegen");
- MaybeHandle<WasmModuleObject> ret = wasm::SyncCompile(
- isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()));
- CHECK(ret.is_null());
- CHECK(thrower.error());
-}
-
TEST(BlockWasmCodeGenAtDeserialization) {
WasmSerializationTest test;
{
@@ -557,13 +547,16 @@ TEST(TransferrableWasmModules) {
create_params.array_buffer_allocator =
from_isolate->array_buffer_allocator();
v8::Isolate* to_isolate = v8::Isolate::New(create_params);
- v8::HandleScope new_scope(to_isolate);
- v8::Local<v8::Context> deserialization_context =
- v8::Context::New(to_isolate);
- deserialization_context->Enter();
- v8::MaybeLocal<v8::WasmCompiledModule> mod =
- v8::WasmCompiledModule::FromTransferrableModule(to_isolate, store[0]);
- CHECK(!mod.IsEmpty());
+ {
+ v8::HandleScope new_scope(to_isolate);
+ v8::Local<v8::Context> deserialization_context =
+ v8::Context::New(to_isolate);
+ deserialization_context->Enter();
+ v8::MaybeLocal<v8::WasmCompiledModule> mod =
+ v8::WasmCompiledModule::FromTransferrableModule(to_isolate, store[0]);
+ CHECK(!mod.IsEmpty());
+ }
+ to_isolate->Dispose();
}
}
@@ -795,7 +788,7 @@ TEST(Run_WasmModule_GrowMemOobFixedIndex) {
Handle<Object> params[1] = {Handle<Object>(Smi::FromInt(1), isolate)};
int32_t result =
testing::RunWasmModuleForTesting(isolate, instance, 1, params);
- CHECK(result == 0xaced);
+ CHECK_EQ(0xaced, result);
}
Cleanup();
}
@@ -846,7 +839,7 @@ TEST(Run_WasmModule_GrowMemOobVariableIndex) {
Handle<Object>(Smi::FromInt((20 + i) * kPageSize - 4), isolate)};
int32_t result =
testing::RunWasmModuleForTesting(isolate, instance, 1, params);
- CHECK(result == 0xaced);
+ CHECK_EQ(0xaced, result);
}
v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
@@ -1095,18 +1088,22 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
{}, {})
.ToHandleChecked();
Handle<JSArrayBuffer> memory(instance->memory_buffer(), isolate);
+ Handle<WasmMemoryObject> mem_obj(instance->memory_object(), isolate);
void* const old_allocation_base = memory->allocation_base();
size_t const old_allocation_length = memory->allocation_length();
- // Fake the Embedder flow by creating a memory object, externalize and grow.
- Handle<WasmMemoryObject> mem_obj =
- WasmMemoryObject::New(isolate, memory, 100);
-
+ // Fake the Embedder flow by externalizing the memory object, and grow.
v8::Utils::ToLocal(memory)->Externalize();
uint32_t result = WasmMemoryObject::Grow(isolate, mem_obj, 4);
- const bool free_memory = true;
- wasm::DetachWebAssemblyMemoryBuffer(isolate, memory, free_memory);
+ bool free_memory = !memory->has_guard_region();
+ if (!free_memory) {
+ // current_pages = Initial memory size(16) + GrowWebAssemblyMemory(4)
+ const uint32_t current_pages = 20;
+ i::WasmMemoryObject::SetupNewBufferWithSameBackingStore(isolate, mem_obj,
+ current_pages);
+ }
+ wasm::DetachMemoryBuffer(isolate, memory, free_memory);
CHECK_EQ(16, result);
memory = handle(mem_obj->array_buffer());
instance->set_memory_buffer(*memory);
@@ -1122,8 +1119,12 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
isolate->array_buffer_allocator()->Free(memory->allocation_base(),
memory->allocation_length(),
allocation_mode);
- isolate->array_buffer_allocator()->Free(
- old_allocation_base, old_allocation_length, allocation_mode);
+ if (free_memory) {
+ // GrowMemory without guard pages enabled allocates an extra buffer,
+ // that needs to be freed as well
+ isolate->array_buffer_allocator()->Free(
+ old_allocation_base, old_allocation_length, allocation_mode);
+ }
memory->set_allocation_base(nullptr);
memory->set_allocation_length(0);
}
@@ -1143,7 +1144,7 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMemMemSize) {
WasmMemoryObject::New(isolate, buffer, 100);
v8::Utils::ToLocal(buffer)->Externalize();
int32_t result = WasmMemoryObject::Grow(isolate, mem_obj, 0);
- wasm::DetachWebAssemblyMemoryBuffer(isolate, buffer, false);
+ wasm::DetachMemoryBuffer(isolate, buffer, false);
CHECK_EQ(16, result);
isolate->array_buffer_allocator()->Free(backing_store,
@@ -1164,9 +1165,52 @@ TEST(Run_WasmModule_Buffer_Externalized_Detach) {
isolate, backing_store, 16 * WasmModule::kPageSize, backing_store,
16 * WasmModule::kPageSize, false, false);
v8::Utils::ToLocal(buffer)->Externalize();
- wasm::DetachWebAssemblyMemoryBuffer(isolate, buffer, true);
+ wasm::DetachMemoryBuffer(isolate, buffer, true);
isolate->array_buffer_allocator()->Free(backing_store,
16 * WasmModule::kPageSize);
}
Cleanup();
}
+
+TEST(AtomicOpDisassembly) {
+ {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ TestSignatures sigs;
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ builder->SetHasSharedMemory();
+ builder->SetMaxMemorySize(16);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
+ ExportAsMain(f);
+ byte code[] = {
+ WASM_ATOMICS_STORE_OP(kExprI32AtomicStore, WASM_ZERO, WASM_GET_LOCAL(0),
+ MachineRepresentation::kWord32),
+ WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad, WASM_ZERO,
+ MachineRepresentation::kWord32)};
+ EMIT_CODE_WITH_END(f, code);
+
+ HandleScope scope(isolate);
+ ZoneBuffer buffer(&zone);
+ builder->WriteTo(buffer);
+ testing::SetupIsolateForWasmModule(isolate);
+
+ ErrorThrower thrower(isolate, "Test");
+ MaybeHandle<WasmModuleObject> module_object = SyncCompile(
+ isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()));
+
+ MaybeHandle<WasmCompiledModule> compiled_module(
+ module_object.ToHandleChecked()->compiled_module(), isolate);
+ CHECK(!compiled_module.is_null());
+ compiled_module.ToHandleChecked()->DisassembleFunction(0);
+ }
+ Cleanup();
+}
+
+#undef EMIT_CODE_WITH_END
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc b/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
index f82d39a49c..030b7a4288 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
@@ -12,8 +12,10 @@
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
-using namespace v8::internal;
-using namespace v8::internal::compiler;
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace test_run_wasm_relocation {
#define FOREACH_TYPE(TEST_BODY) \
TEST_BODY(int32_t, WASM_I32_ADD) \
@@ -22,8 +24,8 @@ using namespace v8::internal::compiler;
TEST_BODY(double, WASM_F64_ADD)
#define LOAD_SET_GLOBAL_TEST_BODY(C_TYPE, ADD) \
- TEST(WasmRelocateGlobal_##C_TYPE) { \
- WasmRunner<C_TYPE, C_TYPE> r(kExecuteCompiled); \
+ WASM_EXEC_TEST(WasmRelocateGlobal_##C_TYPE) { \
+ WasmRunner<C_TYPE, C_TYPE> r(execution_mode); \
Isolate* isolate = CcTest::i_isolate(); \
\
r.builder().AddGlobal<C_TYPE>(); \
@@ -60,3 +62,11 @@ using namespace v8::internal::compiler;
}
FOREACH_TYPE(LOAD_SET_GLOBAL_TEST_BODY)
+
+#undef FOREACH_TYPE
+#undef LOAD_SET_GLOBAL_TEST_BODY
+
+} // namespace test_run_wasm_relocation
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index f980b1a90e..5c5d74e747 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -8,10 +8,10 @@
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
-using namespace v8::base;
-using namespace v8::internal;
-using namespace v8::internal::compiler;
-using namespace v8::internal::wasm;
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace test_run_wasm_simd {
namespace {
@@ -31,20 +31,25 @@ typedef int8_t (*Int8BinOp)(int8_t, int8_t);
typedef int (*Int8CompareOp)(int8_t, int8_t);
typedef int8_t (*Int8ShiftOp)(int8_t, int);
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#define SIMD_LOWERING_TARGET 1
-#else
-#define SIMD_LOWERING_TARGET 0
-#endif
-
-#define WASM_SIMD_TEST(name) \
- void RunWasm_##name##_Impl(); \
- TEST(RunWasm_##name) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(); \
- } \
- void RunWasm_##name##_Impl()
+#define WASM_SIMD_TEST(name) \
+ void RunWasm_##name##_Impl(WasmExecutionMode execution_mode); \
+ TEST(RunWasm_##name##_compiled) { \
+ EXPERIMENTAL_FLAG_SCOPE(simd); \
+ RunWasm_##name##_Impl(kExecuteCompiled); \
+ } \
+ TEST(RunWasm_##name##_simd_lowered) { \
+ EXPERIMENTAL_FLAG_SCOPE(simd); \
+ RunWasm_##name##_Impl(kExecuteSimdLowered); \
+ } \
+ void RunWasm_##name##_Impl(WasmExecutionMode execution_mode)
+
+#define WASM_SIMD_COMPILED_TEST(name) \
+ void RunWasm_##name##_Impl(WasmExecutionMode execution_mode); \
+ TEST(RunWasm_##name##_compiled) { \
+ EXPERIMENTAL_FLAG_SCOPE(simd); \
+ RunWasm_##name##_Impl(kExecuteCompiled); \
+ } \
+ void RunWasm_##name##_Impl(WasmExecutionMode execution_mode)
// Generic expected value functions.
template <typename T>
@@ -82,25 +87,6 @@ T Maximum(T a, T b) {
return a >= b ? a : b;
}
-// For float operands, Min and Max must return NaN if either operand is NaN.
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-template <>
-float Minimum(float a, float b) {
- if (std::isnan(a) || std::isnan(b))
- return std::numeric_limits<float>::quiet_NaN();
- return a <= b ? a : b;
-}
-
-template <>
-float Maximum(float a, float b) {
- if (std::isnan(a) || std::isnan(b))
- return std::numeric_limits<float>::quiet_NaN();
- return a >= b ? a : b;
-}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-
template <typename T>
T UnsignedMinimum(T a, T b) {
using UnsignedT = typename std::make_unsigned<T>::type;
@@ -419,10 +405,10 @@ bool SkipFPValue(float x) {
// doesn't handle NaNs. Also skip extreme values.
bool SkipFPExpectedValue(float x) { return std::isnan(x) || SkipFPValue(x); }
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST(F32x4Splat) {
- WasmRunner<int32_t, float> r(kExecuteCompiled);
+ WasmRunner<int32_t, float> r(execution_mode);
byte lane_val = 0;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r,
@@ -436,7 +422,7 @@ WASM_SIMD_TEST(F32x4Splat) {
}
WASM_SIMD_TEST(F32x4ReplaceLane) {
- WasmRunner<int32_t, float, float> r(kExecuteCompiled);
+ WasmRunner<int32_t, float, float> r(execution_mode);
byte old_val = 0;
byte new_val = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -463,7 +449,7 @@ WASM_SIMD_TEST(F32x4ReplaceLane) {
// Tests both signed and unsigned conversion.
WASM_SIMD_TEST(F32x4ConvertI32x4) {
- WasmRunner<int32_t, int32_t, float, float> r(kExecuteCompiled);
+ WasmRunner<int32_t, int32_t, float, float> r(execution_mode);
byte a = 0;
byte expected_signed = 1;
byte expected_unsigned = 2;
@@ -484,14 +470,10 @@ WASM_SIMD_TEST(F32x4ConvertI32x4) {
static_cast<float>(static_cast<uint32_t>(*i))));
}
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-void RunF32x4UnOpTest(WasmOpcode simd_op, FloatUnOp expected_op,
- float error = 0.0f) {
- WasmRunner<int32_t, float, float, float> r(kExecuteCompiled);
+void RunF32x4UnOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+ FloatUnOp expected_op, float error = 0.0f) {
+ WasmRunner<int32_t, float, float, float> r(execution_mode);
byte a = 0;
byte low = 1;
byte high = 2;
@@ -510,29 +492,27 @@ void RunF32x4UnOpTest(WasmOpcode simd_op, FloatUnOp expected_op,
}
}
-WASM_SIMD_TEST(F32x4Abs) { RunF32x4UnOpTest(kExprF32x4Abs, std::abs); }
-WASM_SIMD_TEST(F32x4Neg) { RunF32x4UnOpTest(kExprF32x4Neg, Negate); }
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+WASM_SIMD_TEST(F32x4Abs) {
+ RunF32x4UnOpTest(execution_mode, kExprF32x4Abs, std::abs);
+}
+WASM_SIMD_TEST(F32x4Neg) {
+ RunF32x4UnOpTest(execution_mode, kExprF32x4Neg, Negate);
+}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64
static const float kApproxError = 0.01f;
-WASM_SIMD_TEST(F32x4RecipApprox) {
- RunF32x4UnOpTest(kExprF32x4RecipApprox, Recip, kApproxError);
+WASM_SIMD_COMPILED_TEST(F32x4RecipApprox) {
+ RunF32x4UnOpTest(execution_mode, kExprF32x4RecipApprox, Recip, kApproxError);
}
-WASM_SIMD_TEST(F32x4RecipSqrtApprox) {
- RunF32x4UnOpTest(kExprF32x4RecipSqrtApprox, RecipSqrt, kApproxError);
+WASM_SIMD_COMPILED_TEST(F32x4RecipSqrtApprox) {
+ RunF32x4UnOpTest(execution_mode, kExprF32x4RecipSqrtApprox, RecipSqrt,
+ kApproxError);
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-void RunF32x4BinOpTest(WasmOpcode simd_op, FloatBinOp expected_op) {
- WasmRunner<int32_t, float, float, float> r(kExecuteCompiled);
+void RunF32x4BinOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+ FloatBinOp expected_op) {
+ WasmRunner<int32_t, float, float, float> r(execution_mode);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -555,18 +535,25 @@ void RunF32x4BinOpTest(WasmOpcode simd_op, FloatBinOp expected_op) {
}
}
-WASM_SIMD_TEST(F32x4Add) { RunF32x4BinOpTest(kExprF32x4Add, Add); }
-WASM_SIMD_TEST(F32x4Sub) { RunF32x4BinOpTest(kExprF32x4Sub, Sub); }
-WASM_SIMD_TEST(F32x4Mul) { RunF32x4BinOpTest(kExprF32x4Mul, Mul); }
-WASM_SIMD_TEST(F32x4_Min) { RunF32x4BinOpTest(kExprF32x4Min, Minimum); }
-WASM_SIMD_TEST(F32x4_Max) { RunF32x4BinOpTest(kExprF32x4Max, Maximum); }
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+WASM_SIMD_TEST(F32x4Add) {
+ RunF32x4BinOpTest(execution_mode, kExprF32x4Add, Add);
+}
+WASM_SIMD_TEST(F32x4Sub) {
+ RunF32x4BinOpTest(execution_mode, kExprF32x4Sub, Sub);
+}
+WASM_SIMD_TEST(F32x4Mul) {
+ RunF32x4BinOpTest(execution_mode, kExprF32x4Mul, Mul);
+}
+WASM_SIMD_TEST(F32x4_Min) {
+ RunF32x4BinOpTest(execution_mode, kExprF32x4Min, JSMin);
+}
+WASM_SIMD_TEST(F32x4_Max) {
+ RunF32x4BinOpTest(execution_mode, kExprF32x4Max, JSMax);
+}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-void RunF32x4CompareOpTest(WasmOpcode simd_op, FloatCompareOp expected_op) {
- WasmRunner<int32_t, float, float, int32_t> r(kExecuteCompiled);
+void RunF32x4CompareOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+ FloatCompareOp expected_op) {
+ WasmRunner<int32_t, float, float, int32_t> r(execution_mode);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -589,19 +576,31 @@ void RunF32x4CompareOpTest(WasmOpcode simd_op, FloatCompareOp expected_op) {
}
}
-WASM_SIMD_TEST(F32x4Eq) { RunF32x4CompareOpTest(kExprF32x4Eq, Equal); }
+WASM_SIMD_TEST(F32x4Eq) {
+ RunF32x4CompareOpTest(execution_mode, kExprF32x4Eq, Equal);
+}
-WASM_SIMD_TEST(F32x4Ne) { RunF32x4CompareOpTest(kExprF32x4Ne, NotEqual); }
+WASM_SIMD_TEST(F32x4Ne) {
+ RunF32x4CompareOpTest(execution_mode, kExprF32x4Ne, NotEqual);
+}
-WASM_SIMD_TEST(F32x4Gt) { RunF32x4CompareOpTest(kExprF32x4Gt, Greater); }
+WASM_SIMD_TEST(F32x4Gt) {
+ RunF32x4CompareOpTest(execution_mode, kExprF32x4Gt, Greater);
+}
-WASM_SIMD_TEST(F32x4Ge) { RunF32x4CompareOpTest(kExprF32x4Ge, GreaterEqual); }
+WASM_SIMD_TEST(F32x4Ge) {
+ RunF32x4CompareOpTest(execution_mode, kExprF32x4Ge, GreaterEqual);
+}
-WASM_SIMD_TEST(F32x4Lt) { RunF32x4CompareOpTest(kExprF32x4Lt, Less); }
+WASM_SIMD_TEST(F32x4Lt) {
+ RunF32x4CompareOpTest(execution_mode, kExprF32x4Lt, Less);
+}
-WASM_SIMD_TEST(F32x4Le) { RunF32x4CompareOpTest(kExprF32x4Le, LessEqual); }
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+WASM_SIMD_TEST(F32x4Le) {
+ RunF32x4CompareOpTest(execution_mode, kExprF32x4Le, LessEqual);
+}
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST(I32x4Splat) {
// Store SIMD value in a local variable, use extract lane to check lane values
@@ -614,7 +613,7 @@ WASM_SIMD_TEST(I32x4Splat) {
// return 0
//
// return 1
- WasmRunner<int32_t, int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t, int32_t> r(execution_mode);
byte lane_val = 0;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r,
@@ -625,7 +624,7 @@ WASM_SIMD_TEST(I32x4Splat) {
}
WASM_SIMD_TEST(I32x4ReplaceLane) {
- WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
byte old_val = 0;
byte new_val = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -651,7 +650,7 @@ WASM_SIMD_TEST(I32x4ReplaceLane) {
}
WASM_SIMD_TEST(I16x8Splat) {
- WasmRunner<int32_t, int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t, int32_t> r(execution_mode);
byte lane_val = 0;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r,
@@ -662,7 +661,7 @@ WASM_SIMD_TEST(I16x8Splat) {
}
WASM_SIMD_TEST(I16x8ReplaceLane) {
- WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
byte old_val = 0;
byte new_val = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -711,7 +710,7 @@ WASM_SIMD_TEST(I16x8ReplaceLane) {
}
WASM_SIMD_TEST(I8x16Splat) {
- WasmRunner<int32_t, int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t, int32_t> r(execution_mode);
byte lane_val = 0;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r,
@@ -722,7 +721,7 @@ WASM_SIMD_TEST(I8x16Splat) {
}
WASM_SIMD_TEST(I8x16ReplaceLane) {
- WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
byte old_val = 0;
byte new_val = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -825,8 +824,8 @@ WASM_SIMD_TEST(I8x16ReplaceLane) {
CHECK_EQ(1, r.Call(1, 2));
}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64
// Determines if conversion from float to int will be valid.
bool CanRoundToZeroAndConvert(double val, bool unsigned_integer) {
const double max_uint = static_cast<double>(0xffffffffu);
@@ -869,7 +868,7 @@ int32_t ConvertToInt(double val, bool unsigned_integer) {
// Tests both signed and unsigned conversion.
WASM_SIMD_TEST(I32x4ConvertF32x4) {
- WasmRunner<int32_t, float, int32_t, int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t, float, int32_t, int32_t> r(execution_mode);
byte a = 0;
byte expected_signed = 1;
byte expected_unsigned = 2;
@@ -891,14 +890,10 @@ WASM_SIMD_TEST(I32x4ConvertF32x4) {
CHECK_EQ(1, r.Call(*i, signed_value, unsigned_value));
}
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64
// Tests both signed and unsigned conversion from I16x8 (unpacking).
-WASM_SIMD_TEST(I32x4ConvertI16x8) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
+WASM_SIMD_COMPILED_TEST(I32x4ConvertI16x8) {
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
byte a = 0;
byte unpacked_signed = 1;
byte unpacked_unsigned = 2;
@@ -923,8 +918,9 @@ WASM_SIMD_TEST(I32x4ConvertI16x8) {
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
-void RunI32x4UnOpTest(WasmOpcode simd_op, Int32UnOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
+void RunI32x4UnOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+ Int32UnOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -935,16 +931,17 @@ void RunI32x4UnOpTest(WasmOpcode simd_op, Int32UnOp expected_op) {
FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i))); }
}
-WASM_SIMD_TEST(I32x4Neg) { RunI32x4UnOpTest(kExprI32x4Neg, Negate); }
+WASM_SIMD_TEST(I32x4Neg) {
+ RunI32x4UnOpTest(execution_mode, kExprI32x4Neg, Negate);
+}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET || \
- V8_TARGET_ARCH_X64
-WASM_SIMD_TEST(S128Not) { RunI32x4UnOpTest(kExprS128Not, Not); }
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET
- // V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
+WASM_SIMD_TEST(S128Not) { RunI32x4UnOpTest(execution_mode, kExprS128Not, Not); }
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
-void RunI32x4BinOpTest(WasmOpcode simd_op, Int32BinOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
+void RunI32x4BinOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+ Int32BinOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -961,36 +958,51 @@ void RunI32x4BinOpTest(WasmOpcode simd_op, Int32BinOp expected_op) {
}
}
-WASM_SIMD_TEST(I32x4Add) { RunI32x4BinOpTest(kExprI32x4Add, Add); }
+WASM_SIMD_TEST(I32x4Add) {
+ RunI32x4BinOpTest(execution_mode, kExprI32x4Add, Add);
+}
-WASM_SIMD_TEST(I32x4Sub) { RunI32x4BinOpTest(kExprI32x4Sub, Sub); }
+WASM_SIMD_TEST(I32x4Sub) {
+ RunI32x4BinOpTest(execution_mode, kExprI32x4Sub, Sub);
+}
-WASM_SIMD_TEST(I32x4Mul) { RunI32x4BinOpTest(kExprI32x4Mul, Mul); }
+WASM_SIMD_TEST(I32x4Mul) {
+ RunI32x4BinOpTest(execution_mode, kExprI32x4Mul, Mul);
+}
-WASM_SIMD_TEST(I32x4MinS) { RunI32x4BinOpTest(kExprI32x4MinS, Minimum); }
+WASM_SIMD_TEST(I32x4MinS) {
+ RunI32x4BinOpTest(execution_mode, kExprI32x4MinS, Minimum);
+}
-WASM_SIMD_TEST(I32x4MaxS) { RunI32x4BinOpTest(kExprI32x4MaxS, Maximum); }
+WASM_SIMD_TEST(I32x4MaxS) {
+ RunI32x4BinOpTest(execution_mode, kExprI32x4MaxS, Maximum);
+}
WASM_SIMD_TEST(I32x4MinU) {
- RunI32x4BinOpTest(kExprI32x4MinU, UnsignedMinimum);
+ RunI32x4BinOpTest(execution_mode, kExprI32x4MinU, UnsignedMinimum);
}
WASM_SIMD_TEST(I32x4MaxU) {
- RunI32x4BinOpTest(kExprI32x4MaxU, UnsignedMaximum);
+ RunI32x4BinOpTest(execution_mode, kExprI32x4MaxU, UnsignedMaximum);
}
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
- SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-WASM_SIMD_TEST(S128And) { RunI32x4BinOpTest(kExprS128And, And); }
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+WASM_SIMD_TEST(S128And) {
+ RunI32x4BinOpTest(execution_mode, kExprS128And, And);
+}
-WASM_SIMD_TEST(S128Or) { RunI32x4BinOpTest(kExprS128Or, Or); }
+WASM_SIMD_TEST(S128Or) { RunI32x4BinOpTest(execution_mode, kExprS128Or, Or); }
-WASM_SIMD_TEST(S128Xor) { RunI32x4BinOpTest(kExprS128Xor, Xor); }
+WASM_SIMD_TEST(S128Xor) {
+ RunI32x4BinOpTest(execution_mode, kExprS128Xor, Xor);
+}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
- // SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+ // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-void RunI32x4CompareOpTest(WasmOpcode simd_op, Int32CompareOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
+void RunI32x4CompareOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+ Int32CompareOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -1007,35 +1019,49 @@ void RunI32x4CompareOpTest(WasmOpcode simd_op, Int32CompareOp expected_op) {
}
}
-WASM_SIMD_TEST(I32x4Eq) { RunI32x4CompareOpTest(kExprI32x4Eq, Equal); }
+WASM_SIMD_TEST(I32x4Eq) {
+ RunI32x4CompareOpTest(execution_mode, kExprI32x4Eq, Equal);
+}
-WASM_SIMD_TEST(I32x4Ne) { RunI32x4CompareOpTest(kExprI32x4Ne, NotEqual); }
+WASM_SIMD_TEST(I32x4Ne) {
+ RunI32x4CompareOpTest(execution_mode, kExprI32x4Ne, NotEqual);
+}
-WASM_SIMD_TEST(I32x4LtS) { RunI32x4CompareOpTest(kExprI32x4LtS, Less); }
+WASM_SIMD_TEST(I32x4LtS) {
+ RunI32x4CompareOpTest(execution_mode, kExprI32x4LtS, Less);
+}
-WASM_SIMD_TEST(I32x4LeS) { RunI32x4CompareOpTest(kExprI32x4LeS, LessEqual); }
+WASM_SIMD_TEST(I32x4LeS) {
+ RunI32x4CompareOpTest(execution_mode, kExprI32x4LeS, LessEqual);
+}
-WASM_SIMD_TEST(I32x4GtS) { RunI32x4CompareOpTest(kExprI32x4GtS, Greater); }
+WASM_SIMD_TEST(I32x4GtS) {
+ RunI32x4CompareOpTest(execution_mode, kExprI32x4GtS, Greater);
+}
-WASM_SIMD_TEST(I32x4GeS) { RunI32x4CompareOpTest(kExprI32x4GeS, GreaterEqual); }
+WASM_SIMD_TEST(I32x4GeS) {
+ RunI32x4CompareOpTest(execution_mode, kExprI32x4GeS, GreaterEqual);
+}
-WASM_SIMD_TEST(I32x4LtU) { RunI32x4CompareOpTest(kExprI32x4LtU, UnsignedLess); }
+WASM_SIMD_TEST(I32x4LtU) {
+ RunI32x4CompareOpTest(execution_mode, kExprI32x4LtU, UnsignedLess);
+}
WASM_SIMD_TEST(I32x4LeU) {
- RunI32x4CompareOpTest(kExprI32x4LeU, UnsignedLessEqual);
+ RunI32x4CompareOpTest(execution_mode, kExprI32x4LeU, UnsignedLessEqual);
}
WASM_SIMD_TEST(I32x4GtU) {
- RunI32x4CompareOpTest(kExprI32x4GtU, UnsignedGreater);
+ RunI32x4CompareOpTest(execution_mode, kExprI32x4GtU, UnsignedGreater);
}
WASM_SIMD_TEST(I32x4GeU) {
- RunI32x4CompareOpTest(kExprI32x4GeU, UnsignedGreaterEqual);
+ RunI32x4CompareOpTest(execution_mode, kExprI32x4GeU, UnsignedGreaterEqual);
}
-void RunI32x4ShiftOpTest(WasmOpcode simd_op, Int32ShiftOp expected_op,
- int shift) {
- WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
+void RunI32x4ShiftOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+ Int32ShiftOp expected_op, int shift) {
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -1048,22 +1074,22 @@ void RunI32x4ShiftOpTest(WasmOpcode simd_op, Int32ShiftOp expected_op,
}
WASM_SIMD_TEST(I32x4Shl) {
- RunI32x4ShiftOpTest(kExprI32x4Shl, LogicalShiftLeft, 1);
+ RunI32x4ShiftOpTest(execution_mode, kExprI32x4Shl, LogicalShiftLeft, 1);
}
WASM_SIMD_TEST(I32x4ShrS) {
- RunI32x4ShiftOpTest(kExprI32x4ShrS, ArithmeticShiftRight, 1);
+ RunI32x4ShiftOpTest(execution_mode, kExprI32x4ShrS, ArithmeticShiftRight, 1);
}
WASM_SIMD_TEST(I32x4ShrU) {
- RunI32x4ShiftOpTest(kExprI32x4ShrU, LogicalShiftRight, 1);
+ RunI32x4ShiftOpTest(execution_mode, kExprI32x4ShrU, LogicalShiftRight, 1);
}
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
// Tests both signed and unsigned conversion from I8x16 (unpacking).
-WASM_SIMD_TEST(I16x8ConvertI8x16) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
+WASM_SIMD_COMPILED_TEST(I16x8ConvertI8x16) {
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
byte a = 0;
byte unpacked_signed = 1;
byte unpacked_unsigned = 2;
@@ -1087,10 +1113,11 @@ WASM_SIMD_TEST(I16x8ConvertI8x16) {
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
-#if SIMD_LOWERING_TARGET || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_X64
-void RunI16x8UnOpTest(WasmOpcode simd_op, Int16UnOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_X64
+void RunI16x8UnOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+ Int16UnOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -1101,15 +1128,17 @@ void RunI16x8UnOpTest(WasmOpcode simd_op, Int16UnOp expected_op) {
FOR_INT16_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i))); }
}
-WASM_SIMD_TEST(I16x8Neg) { RunI16x8UnOpTest(kExprI16x8Neg, Negate); }
-#endif // SIMD_LOWERING_TARGET || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_X64
+WASM_SIMD_TEST(I16x8Neg) {
+ RunI16x8UnOpTest(execution_mode, kExprI16x8Neg, Negate);
+}
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_X64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
// Tests both signed and unsigned conversion from I32x4 (packing).
-WASM_SIMD_TEST(I16x8ConvertI32x4) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
+WASM_SIMD_COMPILED_TEST(I16x8ConvertI32x4) {
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
byte a = 0;
byte packed_signed = 1;
byte packed_unsigned = 2;
@@ -1138,9 +1167,10 @@ WASM_SIMD_TEST(I16x8ConvertI32x4) {
// V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
- SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-void RunI16x8BinOpTest(WasmOpcode simd_op, Int16BinOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+void RunI16x8BinOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+ Int16BinOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -1157,42 +1187,55 @@ void RunI16x8BinOpTest(WasmOpcode simd_op, Int16BinOp expected_op) {
}
}
-WASM_SIMD_TEST(I16x8Add) { RunI16x8BinOpTest(kExprI16x8Add, Add); }
+WASM_SIMD_TEST(I16x8Add) {
+ RunI16x8BinOpTest(execution_mode, kExprI16x8Add, Add);
+}
WASM_SIMD_TEST(I16x8AddSaturateS) {
- RunI16x8BinOpTest(kExprI16x8AddSaturateS, AddSaturate);
+ RunI16x8BinOpTest(execution_mode, kExprI16x8AddSaturateS, AddSaturate);
}
-WASM_SIMD_TEST(I16x8Sub) { RunI16x8BinOpTest(kExprI16x8Sub, Sub); }
+WASM_SIMD_TEST(I16x8Sub) {
+ RunI16x8BinOpTest(execution_mode, kExprI16x8Sub, Sub);
+}
WASM_SIMD_TEST(I16x8SubSaturateS) {
- RunI16x8BinOpTest(kExprI16x8SubSaturateS, SubSaturate);
+ RunI16x8BinOpTest(execution_mode, kExprI16x8SubSaturateS, SubSaturate);
}
-WASM_SIMD_TEST(I16x8Mul) { RunI16x8BinOpTest(kExprI16x8Mul, Mul); }
+WASM_SIMD_TEST(I16x8Mul) {
+ RunI16x8BinOpTest(execution_mode, kExprI16x8Mul, Mul);
+}
-WASM_SIMD_TEST(I16x8MinS) { RunI16x8BinOpTest(kExprI16x8MinS, Minimum); }
+WASM_SIMD_TEST(I16x8MinS) {
+ RunI16x8BinOpTest(execution_mode, kExprI16x8MinS, Minimum);
+}
-WASM_SIMD_TEST(I16x8MaxS) { RunI16x8BinOpTest(kExprI16x8MaxS, Maximum); }
+WASM_SIMD_TEST(I16x8MaxS) {
+ RunI16x8BinOpTest(execution_mode, kExprI16x8MaxS, Maximum);
+}
WASM_SIMD_TEST(I16x8AddSaturateU) {
- RunI16x8BinOpTest(kExprI16x8AddSaturateU, UnsignedAddSaturate);
+ RunI16x8BinOpTest(execution_mode, kExprI16x8AddSaturateU,
+ UnsignedAddSaturate);
}
WASM_SIMD_TEST(I16x8SubSaturateU) {
- RunI16x8BinOpTest(kExprI16x8SubSaturateU, UnsignedSubSaturate);
+ RunI16x8BinOpTest(execution_mode, kExprI16x8SubSaturateU,
+ UnsignedSubSaturate);
}
WASM_SIMD_TEST(I16x8MinU) {
- RunI16x8BinOpTest(kExprI16x8MinU, UnsignedMinimum);
+ RunI16x8BinOpTest(execution_mode, kExprI16x8MinU, UnsignedMinimum);
}
WASM_SIMD_TEST(I16x8MaxU) {
- RunI16x8BinOpTest(kExprI16x8MaxU, UnsignedMaximum);
+ RunI16x8BinOpTest(execution_mode, kExprI16x8MaxU, UnsignedMaximum);
}
-void RunI16x8CompareOpTest(WasmOpcode simd_op, Int16CompareOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
+void RunI16x8CompareOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+ Int16CompareOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -1209,35 +1252,49 @@ void RunI16x8CompareOpTest(WasmOpcode simd_op, Int16CompareOp expected_op) {
}
}
-WASM_SIMD_TEST(I16x8Eq) { RunI16x8CompareOpTest(kExprI16x8Eq, Equal); }
+WASM_SIMD_TEST(I16x8Eq) {
+ RunI16x8CompareOpTest(execution_mode, kExprI16x8Eq, Equal);
+}
-WASM_SIMD_TEST(I16x8Ne) { RunI16x8CompareOpTest(kExprI16x8Ne, NotEqual); }
+WASM_SIMD_TEST(I16x8Ne) {
+ RunI16x8CompareOpTest(execution_mode, kExprI16x8Ne, NotEqual);
+}
-WASM_SIMD_TEST(I16x8LtS) { RunI16x8CompareOpTest(kExprI16x8LtS, Less); }
+WASM_SIMD_TEST(I16x8LtS) {
+ RunI16x8CompareOpTest(execution_mode, kExprI16x8LtS, Less);
+}
-WASM_SIMD_TEST(I16x8LeS) { RunI16x8CompareOpTest(kExprI16x8LeS, LessEqual); }
+WASM_SIMD_TEST(I16x8LeS) {
+ RunI16x8CompareOpTest(execution_mode, kExprI16x8LeS, LessEqual);
+}
-WASM_SIMD_TEST(I16x8GtS) { RunI16x8CompareOpTest(kExprI16x8GtS, Greater); }
+WASM_SIMD_TEST(I16x8GtS) {
+ RunI16x8CompareOpTest(execution_mode, kExprI16x8GtS, Greater);
+}
-WASM_SIMD_TEST(I16x8GeS) { RunI16x8CompareOpTest(kExprI16x8GeS, GreaterEqual); }
+WASM_SIMD_TEST(I16x8GeS) {
+ RunI16x8CompareOpTest(execution_mode, kExprI16x8GeS, GreaterEqual);
+}
WASM_SIMD_TEST(I16x8GtU) {
- RunI16x8CompareOpTest(kExprI16x8GtU, UnsignedGreater);
+ RunI16x8CompareOpTest(execution_mode, kExprI16x8GtU, UnsignedGreater);
}
WASM_SIMD_TEST(I16x8GeU) {
- RunI16x8CompareOpTest(kExprI16x8GeU, UnsignedGreaterEqual);
+ RunI16x8CompareOpTest(execution_mode, kExprI16x8GeU, UnsignedGreaterEqual);
}
-WASM_SIMD_TEST(I16x8LtU) { RunI16x8CompareOpTest(kExprI16x8LtU, UnsignedLess); }
+WASM_SIMD_TEST(I16x8LtU) {
+ RunI16x8CompareOpTest(execution_mode, kExprI16x8LtU, UnsignedLess);
+}
WASM_SIMD_TEST(I16x8LeU) {
- RunI16x8CompareOpTest(kExprI16x8LeU, UnsignedLessEqual);
+ RunI16x8CompareOpTest(execution_mode, kExprI16x8LeU, UnsignedLessEqual);
}
-void RunI16x8ShiftOpTest(WasmOpcode simd_op, Int16ShiftOp expected_op,
- int shift) {
- WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
+void RunI16x8ShiftOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+ Int16ShiftOp expected_op, int shift) {
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -1250,19 +1307,20 @@ void RunI16x8ShiftOpTest(WasmOpcode simd_op, Int16ShiftOp expected_op,
}
WASM_SIMD_TEST(I16x8Shl) {
- RunI16x8ShiftOpTest(kExprI16x8Shl, LogicalShiftLeft, 1);
+ RunI16x8ShiftOpTest(execution_mode, kExprI16x8Shl, LogicalShiftLeft, 1);
}
WASM_SIMD_TEST(I16x8ShrS) {
- RunI16x8ShiftOpTest(kExprI16x8ShrS, ArithmeticShiftRight, 1);
+ RunI16x8ShiftOpTest(execution_mode, kExprI16x8ShrS, ArithmeticShiftRight, 1);
}
WASM_SIMD_TEST(I16x8ShrU) {
- RunI16x8ShiftOpTest(kExprI16x8ShrU, LogicalShiftRight, 1);
+ RunI16x8ShiftOpTest(execution_mode, kExprI16x8ShrU, LogicalShiftRight, 1);
}
-void RunI8x16UnOpTest(WasmOpcode simd_op, Int8UnOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
+void RunI8x16UnOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+ Int8UnOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -1273,15 +1331,17 @@ void RunI8x16UnOpTest(WasmOpcode simd_op, Int8UnOp expected_op) {
FOR_INT8_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i))); }
}
-WASM_SIMD_TEST(I8x16Neg) { RunI8x16UnOpTest(kExprI8x16Neg, Negate); }
+WASM_SIMD_TEST(I8x16Neg) {
+ RunI8x16UnOpTest(execution_mode, kExprI8x16Neg, Negate);
+}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
- // SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+ // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
// Tests both signed and unsigned conversion from I16x8 (packing).
-WASM_SIMD_TEST(I8x16ConvertI16x8) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
+WASM_SIMD_COMPILED_TEST(I8x16ConvertI16x8) {
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
byte a = 0;
byte packed_signed = 1;
byte packed_unsigned = 2;
@@ -1310,9 +1370,10 @@ WASM_SIMD_TEST(I8x16ConvertI16x8) {
// V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
- SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-void RunI8x16BinOpTest(WasmOpcode simd_op, Int8BinOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+void RunI8x16BinOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+ Int8BinOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -1329,40 +1390,51 @@ void RunI8x16BinOpTest(WasmOpcode simd_op, Int8BinOp expected_op) {
}
}
-WASM_SIMD_TEST(I8x16Add) { RunI8x16BinOpTest(kExprI8x16Add, Add); }
+WASM_SIMD_TEST(I8x16Add) {
+ RunI8x16BinOpTest(execution_mode, kExprI8x16Add, Add);
+}
WASM_SIMD_TEST(I8x16AddSaturateS) {
- RunI8x16BinOpTest(kExprI8x16AddSaturateS, AddSaturate);
+ RunI8x16BinOpTest(execution_mode, kExprI8x16AddSaturateS, AddSaturate);
}
-WASM_SIMD_TEST(I8x16Sub) { RunI8x16BinOpTest(kExprI8x16Sub, Sub); }
+WASM_SIMD_TEST(I8x16Sub) {
+ RunI8x16BinOpTest(execution_mode, kExprI8x16Sub, Sub);
+}
WASM_SIMD_TEST(I8x16SubSaturateS) {
- RunI8x16BinOpTest(kExprI8x16SubSaturateS, SubSaturate);
+ RunI8x16BinOpTest(execution_mode, kExprI8x16SubSaturateS, SubSaturate);
}
-WASM_SIMD_TEST(I8x16MinS) { RunI8x16BinOpTest(kExprI8x16MinS, Minimum); }
+WASM_SIMD_TEST(I8x16MinS) {
+ RunI8x16BinOpTest(execution_mode, kExprI8x16MinS, Minimum);
+}
-WASM_SIMD_TEST(I8x16MaxS) { RunI8x16BinOpTest(kExprI8x16MaxS, Maximum); }
+WASM_SIMD_TEST(I8x16MaxS) {
+ RunI8x16BinOpTest(execution_mode, kExprI8x16MaxS, Maximum);
+}
WASM_SIMD_TEST(I8x16AddSaturateU) {
- RunI8x16BinOpTest(kExprI8x16AddSaturateU, UnsignedAddSaturate);
+ RunI8x16BinOpTest(execution_mode, kExprI8x16AddSaturateU,
+ UnsignedAddSaturate);
}
WASM_SIMD_TEST(I8x16SubSaturateU) {
- RunI8x16BinOpTest(kExprI8x16SubSaturateU, UnsignedSubSaturate);
+ RunI8x16BinOpTest(execution_mode, kExprI8x16SubSaturateU,
+ UnsignedSubSaturate);
}
WASM_SIMD_TEST(I8x16MinU) {
- RunI8x16BinOpTest(kExprI8x16MinU, UnsignedMinimum);
+ RunI8x16BinOpTest(execution_mode, kExprI8x16MinU, UnsignedMinimum);
}
WASM_SIMD_TEST(I8x16MaxU) {
- RunI8x16BinOpTest(kExprI8x16MaxU, UnsignedMaximum);
+ RunI8x16BinOpTest(execution_mode, kExprI8x16MaxU, UnsignedMaximum);
}
-void RunI8x16CompareOpTest(WasmOpcode simd_op, Int8CompareOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
+void RunI8x16CompareOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+ Int8CompareOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -1379,43 +1451,59 @@ void RunI8x16CompareOpTest(WasmOpcode simd_op, Int8CompareOp expected_op) {
}
}
-WASM_SIMD_TEST(I8x16Eq) { RunI8x16CompareOpTest(kExprI8x16Eq, Equal); }
+WASM_SIMD_TEST(I8x16Eq) {
+ RunI8x16CompareOpTest(execution_mode, kExprI8x16Eq, Equal);
+}
-WASM_SIMD_TEST(I8x16Ne) { RunI8x16CompareOpTest(kExprI8x16Ne, NotEqual); }
+WASM_SIMD_TEST(I8x16Ne) {
+ RunI8x16CompareOpTest(execution_mode, kExprI8x16Ne, NotEqual);
+}
-WASM_SIMD_TEST(I8x16GtS) { RunI8x16CompareOpTest(kExprI8x16GtS, Greater); }
+WASM_SIMD_TEST(I8x16GtS) {
+ RunI8x16CompareOpTest(execution_mode, kExprI8x16GtS, Greater);
+}
-WASM_SIMD_TEST(I8x16GeS) { RunI8x16CompareOpTest(kExprI8x16GeS, GreaterEqual); }
+WASM_SIMD_TEST(I8x16GeS) {
+ RunI8x16CompareOpTest(execution_mode, kExprI8x16GeS, GreaterEqual);
+}
-WASM_SIMD_TEST(I8x16LtS) { RunI8x16CompareOpTest(kExprI8x16LtS, Less); }
+WASM_SIMD_TEST(I8x16LtS) {
+ RunI8x16CompareOpTest(execution_mode, kExprI8x16LtS, Less);
+}
-WASM_SIMD_TEST(I8x16LeS) { RunI8x16CompareOpTest(kExprI8x16LeS, LessEqual); }
+WASM_SIMD_TEST(I8x16LeS) {
+ RunI8x16CompareOpTest(execution_mode, kExprI8x16LeS, LessEqual);
+}
WASM_SIMD_TEST(I8x16GtU) {
- RunI8x16CompareOpTest(kExprI8x16GtU, UnsignedGreater);
+ RunI8x16CompareOpTest(execution_mode, kExprI8x16GtU, UnsignedGreater);
}
WASM_SIMD_TEST(I8x16GeU) {
- RunI8x16CompareOpTest(kExprI8x16GeU, UnsignedGreaterEqual);
+ RunI8x16CompareOpTest(execution_mode, kExprI8x16GeU, UnsignedGreaterEqual);
}
-WASM_SIMD_TEST(I8x16LtU) { RunI8x16CompareOpTest(kExprI8x16LtU, UnsignedLess); }
+WASM_SIMD_TEST(I8x16LtU) {
+ RunI8x16CompareOpTest(execution_mode, kExprI8x16LtU, UnsignedLess);
+}
WASM_SIMD_TEST(I8x16LeU) {
- RunI8x16CompareOpTest(kExprI8x16LeU, UnsignedLessEqual);
+ RunI8x16CompareOpTest(execution_mode, kExprI8x16LeU, UnsignedLessEqual);
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
- // SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-WASM_SIMD_TEST(I8x16Mul) { RunI8x16BinOpTest(kExprI8x16Mul, Mul); }
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
// V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-void RunI8x16ShiftOpTest(WasmOpcode simd_op, Int8ShiftOp expected_op,
- int shift) {
- WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64
+WASM_SIMD_TEST(I8x16Mul) {
+ RunI8x16BinOpTest(execution_mode, kExprI8x16Mul, Mul);
+}
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64
+
+void RunI8x16ShiftOpTest(WasmExecutionMode execution_mode, WasmOpcode simd_op,
+ Int8ShiftOp expected_op, int shift) {
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -1428,20 +1516,20 @@ void RunI8x16ShiftOpTest(WasmOpcode simd_op, Int8ShiftOp expected_op,
}
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64 || SIMD_LOWERING_TARGET
+ V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST(I8x16Shl) {
- RunI8x16ShiftOpTest(kExprI8x16Shl, LogicalShiftLeft, 1);
+ RunI8x16ShiftOpTest(execution_mode, kExprI8x16Shl, LogicalShiftLeft, 1);
}
WASM_SIMD_TEST(I8x16ShrS) {
- RunI8x16ShiftOpTest(kExprI8x16ShrS, ArithmeticShiftRight, 1);
+ RunI8x16ShiftOpTest(execution_mode, kExprI8x16ShrS, ArithmeticShiftRight, 1);
}
WASM_SIMD_TEST(I8x16ShrU) {
- RunI8x16ShiftOpTest(kExprI8x16ShrU, LogicalShiftRight, 1);
+ RunI8x16ShiftOpTest(execution_mode, kExprI8x16ShrU, LogicalShiftRight, 1);
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64 || SIMD_LOWERING_TARGET
+ // V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
@@ -1449,8 +1537,8 @@ WASM_SIMD_TEST(I8x16ShrU) {
// rest false, and comparing for non-equality with zero to convert to a boolean
// vector.
#define WASM_SIMD_SELECT_TEST(format) \
- WASM_SIMD_TEST(S##format##Select) { \
- WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled); \
+ WASM_SIMD_COMPILED_TEST(S##format##Select) { \
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_mode); \
byte val1 = 0; \
byte val2 = 1; \
byte src1 = r.AllocateLocal(kWasmS128); \
@@ -1489,8 +1577,8 @@ WASM_SIMD_SELECT_TEST(8x16)
// Test Select by making a mask where the 0th and 3rd lanes are non-zero and the
// rest 0. The mask is not the result of a comparison op.
#define WASM_SIMD_NON_CANONICAL_SELECT_TEST(format) \
- WASM_SIMD_TEST(S##format##NonCanonicalSelect) { \
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled); \
+ WASM_SIMD_COMPILED_TEST(S##format##NonCanonicalSelect) { \
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode); \
byte val1 = 0; \
byte val2 = 1; \
byte combined = 2; \
@@ -1522,17 +1610,13 @@ WASM_SIMD_SELECT_TEST(8x16)
WASM_SIMD_NON_CANONICAL_SELECT_TEST(32x4)
WASM_SIMD_NON_CANONICAL_SELECT_TEST(16x8)
WASM_SIMD_NON_CANONICAL_SELECT_TEST(8x16)
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
// Test binary ops with two lane test patterns, all lanes distinct.
template <typename T>
void RunBinaryLaneOpTest(
- WasmOpcode simd_op,
+ WasmExecutionMode execution_mode, WasmOpcode simd_op,
const std::array<T, kSimd128Size / sizeof(T)>& expected) {
- WasmRunner<int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t> r(execution_mode);
// Set up two test patterns as globals, e.g. [0, 1, 2, 3] and [4, 5, 6, 7].
T* src0 = r.builder().AddGlobal<T>(kWasmS128);
T* src1 = r.builder().AddGlobal<T>(kWasmS128);
@@ -1559,17 +1643,14 @@ void RunBinaryLaneOpTest(
CHECK_EQ(src0[i], expected[i]);
}
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-WASM_SIMD_TEST(I32x4AddHoriz) {
- RunBinaryLaneOpTest<int32_t>(kExprI32x4AddHoriz, {{1, 5, 9, 13}});
+WASM_SIMD_COMPILED_TEST(I32x4AddHoriz) {
+ RunBinaryLaneOpTest<int32_t>(execution_mode, kExprI32x4AddHoriz,
+ {{1, 5, 9, 13}});
}
-WASM_SIMD_TEST(I16x8AddHoriz) {
- RunBinaryLaneOpTest<int16_t>(kExprI16x8AddHoriz,
+WASM_SIMD_COMPILED_TEST(I16x8AddHoriz) {
+ RunBinaryLaneOpTest<int16_t>(execution_mode, kExprI16x8AddHoriz,
{{1, 5, 9, 13, 17, 21, 25, 29}});
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
@@ -1577,251 +1658,270 @@ WASM_SIMD_TEST(I16x8AddHoriz) {
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
-WASM_SIMD_TEST(F32x4AddHoriz) {
- RunBinaryLaneOpTest<float>(kExprF32x4AddHoriz, {{1.0f, 5.0f, 9.0f, 13.0f}});
+WASM_SIMD_COMPILED_TEST(F32x4AddHoriz) {
+ RunBinaryLaneOpTest<float>(execution_mode, kExprF32x4AddHoriz,
+ {{1.0f, 5.0f, 9.0f, 13.0f}});
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64
// Test some regular shuffles that may have special handling on some targets.
// Test a normal and unary versions (where second operand isn't used).
-WASM_SIMD_TEST(S32x4Dup) {
+WASM_SIMD_COMPILED_TEST(S32x4Dup) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{16, 17, 18, 19, 16, 17, 18, 19, 16, 17, 18, 19, 16, 17, 18, 19}});
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle, {{4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7}});
+ execution_mode, kExprS8x16Shuffle,
+ {{4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7}});
}
-WASM_SIMD_TEST(S32x4ZipLeft) {
+WASM_SIMD_COMPILED_TEST(S32x4ZipLeft) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23}});
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle, {{0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 4, 5, 6, 7}});
+ execution_mode, kExprS8x16Shuffle,
+ {{0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 4, 5, 6, 7}});
}
-WASM_SIMD_TEST(S32x4ZipRight) {
+WASM_SIMD_COMPILED_TEST(S32x4ZipRight) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31}});
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{8, 9, 10, 11, 8, 9, 10, 11, 12, 13, 14, 15, 12, 13, 14, 15}});
}
-WASM_SIMD_TEST(S32x4UnzipLeft) {
+WASM_SIMD_COMPILED_TEST(S32x4UnzipLeft) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}});
- RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{0, 1, 2, 3, 8, 9, 10, 11, 0,
- 1, 2, 3, 8, 9, 10, 11}});
+ RunBinaryLaneOpTest<int8_t>(
+ execution_mode, kExprS8x16Shuffle,
+ {{0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11}});
}
-WASM_SIMD_TEST(S32x4UnzipRight) {
+WASM_SIMD_COMPILED_TEST(S32x4UnzipRight) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}});
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15}});
}
-WASM_SIMD_TEST(S32x4TransposeLeft) {
+WASM_SIMD_COMPILED_TEST(S32x4TransposeLeft) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27}});
- RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{0, 1, 2, 3, 0, 1, 2, 3, 8, 9,
- 10, 11, 8, 9, 10, 11}});
+ RunBinaryLaneOpTest<int8_t>(
+ execution_mode, kExprS8x16Shuffle,
+ {{0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 8, 9, 10, 11}});
}
-WASM_SIMD_TEST(S32x4TransposeRight) {
+WASM_SIMD_COMPILED_TEST(S32x4TransposeRight) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31}});
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{4, 5, 6, 7, 4, 5, 6, 7, 12, 13, 14, 15, 12, 13, 14, 15}});
}
// Reverses are only unary.
-WASM_SIMD_TEST(S32x2Reverse) {
- RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{4, 5, 6, 7, 0, 1, 2, 3, 12,
- 13, 14, 15, 8, 9, 10, 11}});
+WASM_SIMD_COMPILED_TEST(S32x2Reverse) {
+ RunBinaryLaneOpTest<int8_t>(
+ execution_mode, kExprS8x16Shuffle,
+ {{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}});
}
// Test irregular shuffle.
-WASM_SIMD_TEST(S32x4Irregular) {
+WASM_SIMD_COMPILED_TEST(S32x4Irregular) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{0, 1, 2, 3, 16, 17, 18, 19, 16, 17, 18, 19, 20, 21, 22, 23}});
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle, {{0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7}});
+ execution_mode, kExprS8x16Shuffle,
+ {{0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7}});
}
-WASM_SIMD_TEST(S16x8Dup) {
+WASM_SIMD_COMPILED_TEST(S16x8Dup) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19}});
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle, {{6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7}});
+ execution_mode, kExprS8x16Shuffle,
+ {{6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7}});
}
-WASM_SIMD_TEST(S16x8ZipLeft) {
+WASM_SIMD_COMPILED_TEST(S16x8ZipLeft) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23}});
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle, {{0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7}});
+ execution_mode, kExprS8x16Shuffle,
+ {{0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7}});
}
-WASM_SIMD_TEST(S16x8ZipRight) {
+WASM_SIMD_COMPILED_TEST(S16x8ZipRight) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31}});
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{8, 9, 8, 9, 10, 11, 10, 11, 12, 13, 12, 13, 14, 15, 14, 15}});
}
-WASM_SIMD_TEST(S16x8UnzipLeft) {
+WASM_SIMD_COMPILED_TEST(S16x8UnzipLeft) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29}});
- RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{0, 1, 4, 5, 8, 9, 12, 13, 0,
- 1, 4, 5, 8, 9, 12, 13}});
+ RunBinaryLaneOpTest<int8_t>(
+ execution_mode, kExprS8x16Shuffle,
+ {{0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13}});
}
-WASM_SIMD_TEST(S16x8UnzipRight) {
+WASM_SIMD_COMPILED_TEST(S16x8UnzipRight) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31}});
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15}});
}
-WASM_SIMD_TEST(S16x8TransposeLeft) {
+WASM_SIMD_COMPILED_TEST(S16x8TransposeLeft) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29}});
- RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{0, 1, 0, 1, 4, 5, 4, 5, 8, 9,
- 8, 9, 12, 13, 12, 13}});
+ RunBinaryLaneOpTest<int8_t>(
+ execution_mode, kExprS8x16Shuffle,
+ {{0, 1, 0, 1, 4, 5, 4, 5, 8, 9, 8, 9, 12, 13, 12, 13}});
}
-WASM_SIMD_TEST(S16x8TransposeRight) {
+WASM_SIMD_COMPILED_TEST(S16x8TransposeRight) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31}});
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{2, 3, 2, 3, 6, 7, 6, 7, 10, 11, 10, 11, 14, 15, 14, 15}});
}
-WASM_SIMD_TEST(S16x4Reverse) {
- RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{6, 7, 4, 5, 2, 3, 0, 1, 14,
- 15, 12, 13, 10, 11, 8, 9}});
+WASM_SIMD_COMPILED_TEST(S16x4Reverse) {
+ RunBinaryLaneOpTest<int8_t>(
+ execution_mode, kExprS8x16Shuffle,
+ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}});
}
-WASM_SIMD_TEST(S16x2Reverse) {
- RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{2, 3, 0, 1, 6, 7, 4, 5, 10,
- 11, 8, 9, 14, 15, 12, 13}});
+WASM_SIMD_COMPILED_TEST(S16x2Reverse) {
+ RunBinaryLaneOpTest<int8_t>(
+ execution_mode, kExprS8x16Shuffle,
+ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}});
}
-WASM_SIMD_TEST(S16x8Irregular) {
+WASM_SIMD_COMPILED_TEST(S16x8Irregular) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{0, 1, 16, 17, 16, 17, 0, 1, 4, 5, 20, 21, 6, 7, 22, 23}});
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle, {{0, 1, 0, 1, 0, 1, 0, 1, 4, 5, 4, 5, 6, 7, 6, 7}});
+ execution_mode, kExprS8x16Shuffle,
+ {{0, 1, 0, 1, 0, 1, 0, 1, 4, 5, 4, 5, 6, 7, 6, 7}});
}
-WASM_SIMD_TEST(S8x16Dup) {
+WASM_SIMD_COMPILED_TEST(S8x16Dup) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19}});
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle, {{7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}});
+ execution_mode, kExprS8x16Shuffle,
+ {{7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}});
}
-WASM_SIMD_TEST(S8x16ZipLeft) {
+WASM_SIMD_COMPILED_TEST(S8x16ZipLeft) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}});
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle, {{0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7}});
+ execution_mode, kExprS8x16Shuffle,
+ {{0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7}});
}
-WASM_SIMD_TEST(S8x16ZipRight) {
+WASM_SIMD_COMPILED_TEST(S8x16ZipRight) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}});
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15}});
}
-WASM_SIMD_TEST(S8x16UnzipLeft) {
+WASM_SIMD_COMPILED_TEST(S8x16UnzipLeft) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}});
- RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{0, 2, 4, 6, 8, 10, 12, 14, 0,
- 2, 4, 6, 8, 10, 12, 14}});
+ RunBinaryLaneOpTest<int8_t>(
+ execution_mode, kExprS8x16Shuffle,
+ {{0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14}});
}
-WASM_SIMD_TEST(S8x16UnzipRight) {
+WASM_SIMD_COMPILED_TEST(S8x16UnzipRight) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}});
- RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{1, 3, 5, 7, 9, 11, 13, 15, 1,
- 3, 5, 7, 9, 11, 13, 15}});
+ RunBinaryLaneOpTest<int8_t>(
+ execution_mode, kExprS8x16Shuffle,
+ {{1, 3, 5, 7, 9, 11, 13, 15, 1, 3, 5, 7, 9, 11, 13, 15}});
}
-WASM_SIMD_TEST(S8x16TransposeLeft) {
+WASM_SIMD_COMPILED_TEST(S8x16TransposeLeft) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}});
- RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{0, 0, 2, 2, 4, 4, 6, 6, 8, 8,
- 10, 10, 12, 12, 14, 14}});
+ RunBinaryLaneOpTest<int8_t>(
+ execution_mode, kExprS8x16Shuffle,
+ {{0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14}});
}
-WASM_SIMD_TEST(S8x16TransposeRight) {
+WASM_SIMD_COMPILED_TEST(S8x16TransposeRight) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}});
- RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{1, 1, 3, 3, 5, 5, 7, 7, 9, 9,
- 11, 11, 13, 13, 15, 15}});
+ RunBinaryLaneOpTest<int8_t>(
+ execution_mode, kExprS8x16Shuffle,
+ {{1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15}});
}
-WASM_SIMD_TEST(S8x8Reverse) {
- RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{7, 6, 5, 4, 3, 2, 1, 0, 15,
- 14, 13, 12, 11, 10, 9, 8}});
+WASM_SIMD_COMPILED_TEST(S8x8Reverse) {
+ RunBinaryLaneOpTest<int8_t>(
+ execution_mode, kExprS8x16Shuffle,
+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}});
}
-WASM_SIMD_TEST(S8x4Reverse) {
- RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{3, 2, 1, 0, 7, 6, 5, 4, 11,
- 10, 9, 8, 15, 14, 13, 12}});
+WASM_SIMD_COMPILED_TEST(S8x4Reverse) {
+ RunBinaryLaneOpTest<int8_t>(
+ execution_mode, kExprS8x16Shuffle,
+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}});
}
-WASM_SIMD_TEST(S8x2Reverse) {
- RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8,
- 11, 10, 13, 12, 15, 14}});
+WASM_SIMD_COMPILED_TEST(S8x2Reverse) {
+ RunBinaryLaneOpTest<int8_t>(
+ execution_mode, kExprS8x16Shuffle,
+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}});
}
-WASM_SIMD_TEST(S8x16Irregular) {
+WASM_SIMD_COMPILED_TEST(S8x16Irregular) {
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle,
+ execution_mode, kExprS8x16Shuffle,
{{0, 16, 0, 16, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}});
RunBinaryLaneOpTest<int8_t>(
- kExprS8x16Shuffle, {{0, 0, 0, 0, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7}});
+ execution_mode, kExprS8x16Shuffle,
+ {{0, 0, 0, 0, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7}});
}
// Test shuffles that concatenate the two vectors.
-void RunConcatOpTest() {}
-WASM_SIMD_TEST(S8x16Concat) {
+WASM_SIMD_COMPILED_TEST(S8x16Concat) {
static const int kLanes = 16;
std::array<uint8_t, kLanes> expected;
for (int bias = 1; bias < kLanes; bias++) {
@@ -1834,20 +1934,16 @@ WASM_SIMD_TEST(S8x16Concat) {
for (int j = 0; j < bias; j++) {
expected[i++] = j + kLanes;
}
- RunBinaryLaneOpTest(kExprS8x16Shuffle, expected);
+ RunBinaryLaneOpTest(execution_mode, kExprS8x16Shuffle, expected);
}
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64
// Boolean unary operations are 'AllTrue' and 'AnyTrue', which return an integer
// result. Use relational ops on numeric vectors to create the boolean vector
// test inputs. Test inputs with all true, all false, one true, and one false.
#define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes) \
- WASM_SIMD_TEST(ReductionTest##lanes) { \
- WasmRunner<int32_t> r(kExecuteCompiled); \
+ WASM_SIMD_COMPILED_TEST(ReductionTest##lanes) { \
+ WasmRunner<int32_t> r(execution_mode); \
byte zero = r.AllocateLocal(kWasmS128); \
byte one_one = r.AllocateLocal(kWasmS128); \
byte reduced = r.AllocateLocal(kWasmI32); \
@@ -1919,13 +2015,8 @@ WASM_SIMD_BOOL_REDUCTION_TEST(32x4, 4)
WASM_SIMD_BOOL_REDUCTION_TEST(16x8, 8)
WASM_SIMD_BOOL_REDUCTION_TEST(8x16, 16)
-#endif // !V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64
-
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST(SimdI32x4ExtractWithF32x4) {
- WasmRunner<int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t> r(execution_mode);
BUILD(r, WASM_IF_ELSE_I(
WASM_I32_EQ(WASM_SIMD_I32x4_EXTRACT_LANE(
0, WASM_SIMD_F32x4_SPLAT(WASM_F32(30.5))),
@@ -1935,7 +2026,7 @@ WASM_SIMD_TEST(SimdI32x4ExtractWithF32x4) {
}
WASM_SIMD_TEST(SimdF32x4ExtractWithI32x4) {
- WasmRunner<int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t> r(execution_mode);
BUILD(r,
WASM_IF_ELSE_I(WASM_F32_EQ(WASM_SIMD_F32x4_EXTRACT_LANE(
0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(15))),
@@ -1949,7 +2040,7 @@ WASM_SIMD_TEST(SimdF32x4AddWithI32x4) {
// representable as a float.
const int kOne = 0x3f800000;
const int kTwo = 0x40000000;
- WasmRunner<int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t> r(execution_mode);
BUILD(r,
WASM_IF_ELSE_I(
WASM_F32_EQ(
@@ -1964,7 +2055,7 @@ WASM_SIMD_TEST(SimdF32x4AddWithI32x4) {
}
WASM_SIMD_TEST(SimdI32x4AddWithF32x4) {
- WasmRunner<int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t> r(execution_mode);
BUILD(r,
WASM_IF_ELSE_I(
WASM_I32_EQ(
@@ -1977,13 +2068,13 @@ WASM_SIMD_TEST(SimdI32x4AddWithF32x4) {
WASM_I32V(1), WASM_I32V(0)));
CHECK_EQ(1, r.Call());
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET || \
- V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST(SimdI32x4Local) {
- WasmRunner<int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t> r(execution_mode);
r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(31))),
@@ -1992,7 +2083,7 @@ WASM_SIMD_TEST(SimdI32x4Local) {
}
WASM_SIMD_TEST(SimdI32x4SplatFromExtract) {
- WasmRunner<int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t> r(execution_mode);
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(0, WASM_SIMD_I32x4_EXTRACT_LANE(
@@ -2003,7 +2094,7 @@ WASM_SIMD_TEST(SimdI32x4SplatFromExtract) {
}
WASM_SIMD_TEST(SimdI32x4For) {
- WasmRunner<int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t> r(execution_mode);
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmS128);
BUILD(r,
@@ -2035,13 +2126,13 @@ WASM_SIMD_TEST(SimdI32x4For) {
WASM_GET_LOCAL(0));
CHECK_EQ(1, r.Call());
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET ||
- // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
+ // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST(SimdF32x4For) {
- WasmRunner<int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t> r(execution_mode);
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(1, WASM_SIMD_F32x4_SPLAT(WASM_F32(21.25))),
@@ -2063,11 +2154,11 @@ WASM_SIMD_TEST(SimdF32x4For) {
WASM_GET_LOCAL(0));
CHECK_EQ(1, r.Call());
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET || \
- V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
template <typename T, int numLanes = 4>
void SetVectorByLanes(T* v, const std::array<T, numLanes>& arr) {
@@ -2095,7 +2186,7 @@ const T& GetScalar(T* v, int lane) {
}
WASM_SIMD_TEST(SimdI32x4GetGlobal) {
- WasmRunner<int32_t, int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t, int32_t> r(execution_mode);
int32_t* global = r.builder().AddGlobal<int32_t>(kWasmS128);
SetVectorByLanes(global, {{0, 1, 2, 3}});
r.AllocateLocal(kWasmI32);
@@ -2118,7 +2209,7 @@ WASM_SIMD_TEST(SimdI32x4GetGlobal) {
}
WASM_SIMD_TEST(SimdI32x4SetGlobal) {
- WasmRunner<int32_t, int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t, int32_t> r(execution_mode);
int32_t* global = r.builder().AddGlobal<int32_t>(kWasmS128);
BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(23))),
WASM_SET_GLOBAL(0, WASM_SIMD_I32x4_REPLACE_LANE(1, WASM_GET_GLOBAL(0),
@@ -2134,13 +2225,13 @@ WASM_SIMD_TEST(SimdI32x4SetGlobal) {
CHECK_EQ(GetScalar(global, 2), 45);
CHECK_EQ(GetScalar(global, 3), 56);
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET ||
- // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
+ // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST(SimdF32x4GetGlobal) {
- WasmRunner<int32_t, int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t, int32_t> r(execution_mode);
float* global = r.builder().AddGlobal<float>(kWasmS128);
SetVectorByLanes<float>(global, {{0.0, 1.5, 2.25, 3.5}});
r.AllocateLocal(kWasmI32);
@@ -2163,7 +2254,7 @@ WASM_SIMD_TEST(SimdF32x4GetGlobal) {
}
WASM_SIMD_TEST(SimdF32x4SetGlobal) {
- WasmRunner<int32_t, int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t, int32_t> r(execution_mode);
float* global = r.builder().AddGlobal<float>(kWasmS128);
BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_SPLAT(WASM_F32(13.5))),
WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_REPLACE_LANE(1, WASM_GET_GLOBAL(0),
@@ -2179,13 +2270,13 @@ WASM_SIMD_TEST(SimdF32x4SetGlobal) {
CHECK_EQ(GetScalar(global, 2), 32.25f);
CHECK_EQ(GetScalar(global, 3), 65.0f);
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET || \
- V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-WASM_SIMD_TEST(SimdLoadStoreLoad) {
- WasmRunner<int32_t> r(kExecuteCompiled);
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
+ WasmRunner<int32_t> r(execution_mode);
int32_t* memory = r.builder().AddMemoryElems<int32_t>(4);
BUILD(r, WASM_SIMD_STORE_MEM(WASM_ZERO, WASM_SIMD_LOAD_MEM(WASM_ZERO)),
@@ -2197,5 +2288,51 @@ WASM_SIMD_TEST(SimdLoadStoreLoad) {
CHECK_EQ(expected, r.Call());
}
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET ||
- // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
+ // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+
+#undef WASM_SIMD_TEST
+#undef WASM_SIMD_COMPILED_TEST
+#undef WASM_SIMD_CHECK_LANE
+#undef WASM_SIMD_CHECK4
+#undef WASM_SIMD_CHECK_SPLAT4
+#undef WASM_SIMD_CHECK8
+#undef WASM_SIMD_CHECK_SPLAT8
+#undef WASM_SIMD_CHECK16
+#undef WASM_SIMD_CHECK_SPLAT16
+#undef WASM_SIMD_CHECK_F32_LANE
+#undef WASM_SIMD_CHECK_F32x4
+#undef WASM_SIMD_CHECK_SPLAT_F32x4
+#undef WASM_SIMD_CHECK_F32_LANE_ESTIMATE
+#undef WASM_SIMD_CHECK_SPLAT_F32x4_ESTIMATE
+#undef TO_BYTE
+#undef WASM_SIMD_OP
+#undef WASM_SIMD_SPLAT
+#undef WASM_SIMD_UNOP
+#undef WASM_SIMD_BINOP
+#undef WASM_SIMD_SHIFT_OP
+#undef WASM_SIMD_CONCAT_OP
+#undef WASM_SIMD_SELECT
+#undef WASM_SIMD_F32x4_SPLAT
+#undef WASM_SIMD_F32x4_EXTRACT_LANE
+#undef WASM_SIMD_F32x4_REPLACE_LANE
+#undef WASM_SIMD_I32x4_SPLAT
+#undef WASM_SIMD_I32x4_EXTRACT_LANE
+#undef WASM_SIMD_I32x4_REPLACE_LANE
+#undef WASM_SIMD_I16x8_SPLAT
+#undef WASM_SIMD_I16x8_EXTRACT_LANE
+#undef WASM_SIMD_I16x8_REPLACE_LANE
+#undef WASM_SIMD_I8x16_SPLAT
+#undef WASM_SIMD_I8x16_EXTRACT_LANE
+#undef WASM_SIMD_I8x16_REPLACE_LANE
+#undef WASM_SIMD_S8x16_SHUFFLE_OP
+#undef WASM_SIMD_LOAD_MEM
+#undef WASM_SIMD_STORE_MEM
+#undef WASM_SIMD_SELECT_TEST
+#undef WASM_SIMD_NON_CANONICAL_SELECT_TEST
+#undef WASM_SIMD_BOOL_REDUCTION_TEST
+
+} // namespace test_run_wasm_simd
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index 53bfc06674..3b27c78f60 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -15,15 +15,14 @@
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
-using namespace v8::base;
-using namespace v8::internal;
-using namespace v8::internal::compiler;
-using namespace v8::internal::wasm;
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace test_run_wasm {
// for even shorter tests.
#define B1(a) WASM_BLOCK(a)
#define B2(a, b) WASM_BLOCK(a, b)
-#define B3(a, b, c) WASM_BLOCK(a, b, c)
#define RET(x) x, kExprReturn
#define RET_I8(x) WASM_I32V_2(x), kExprReturn
@@ -650,7 +649,6 @@ WASM_EXEC_TEST(IfElse_P) {
CHECK_EQ(expected, r.Call(*i));
}
}
-#define EMPTY
WASM_EXEC_TEST(If_empty1) {
WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_mode);
@@ -1941,18 +1939,21 @@ static void TestBuildGraphForSimpleExpression(WasmOpcode opcode) {
Zone zone(isolate->allocator(), ZONE_NAME);
HandleScope scope(isolate);
// Enable all optional operators.
- CommonOperatorBuilder common(&zone);
- MachineOperatorBuilder machine(&zone, MachineType::PointerRepresentation(),
- MachineOperatorBuilder::kAllOptionalOps);
- Graph graph(&zone);
- JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+ compiler::CommonOperatorBuilder common(&zone);
+ compiler::MachineOperatorBuilder machine(
+ &zone, MachineType::PointerRepresentation(),
+ compiler::MachineOperatorBuilder::kAllOptionalOps);
+ compiler::Graph graph(&zone);
+ compiler::JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr,
+ &machine);
FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (sig->parameter_count() == 1) {
byte code[] = {WASM_NO_LOCALS, kExprGetLocal, 0, static_cast<byte>(opcode),
WASM_END};
TestBuildingGraph(&zone, &jsgraph, nullptr, sig, nullptr, code,
- code + arraysize(code));
+ code + arraysize(code),
+ compiler::kNoRuntimeExceptionSupport);
} else {
CHECK_EQ(2, sig->parameter_count());
byte code[] = {WASM_NO_LOCALS,
@@ -1963,7 +1964,8 @@ static void TestBuildGraphForSimpleExpression(WasmOpcode opcode) {
static_cast<byte>(opcode),
WASM_END};
TestBuildingGraph(&zone, &jsgraph, nullptr, sig, nullptr, code,
- code + arraysize(code));
+ code + arraysize(code),
+ compiler::kNoRuntimeExceptionSupport);
}
}
@@ -2381,7 +2383,7 @@ WASM_EXEC_TEST(MixedCall_2) { Run_WasmMixedCall_N(execution_mode, 2); }
WASM_EXEC_TEST(MixedCall_3) { Run_WasmMixedCall_N(execution_mode, 3); }
WASM_EXEC_TEST(AddCall) {
- WasmRunner<int32_t, int32_t> r(kExecuteCompiled);
+ WasmRunner<int32_t, int32_t> r(execution_mode);
WasmFunctionCompiler& t1 = r.NewFunction<int32_t, int32_t, int32_t>();
BUILD(t1, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -3037,6 +3039,47 @@ WASM_EXEC_TEST(BranchOverUnreachableCode) {
CHECK_EQ(18, r.Call());
}
+WASM_EXEC_TEST(BranchOverUnreachableCodeInLoop0) {
+ WasmRunner<int32_t> r(execution_mode);
+ BUILD(r,
+ WASM_BLOCK_I(
+ // Start a loop which breaks in the middle (hence unreachable code
+ // afterwards) and continue execution after this loop.
+ // This should validate even though there is no value on the stack
+ // at the end of the loop.
+ WASM_LOOP_I(WASM_BRV(1, WASM_I32V_1(17)))),
+ // Add one to the 17 returned from the block.
+ WASM_ONE, kExprI32Add);
+ CHECK_EQ(18, r.Call());
+}
+
+WASM_EXEC_TEST(BranchOverUnreachableCodeInLoop1) {
+ WasmRunner<int32_t> r(execution_mode);
+ BUILD(r,
+ WASM_BLOCK_I(
+ // Start a loop which breaks in the middle (hence unreachable code
+ // afterwards) and continue execution after this loop.
+ // Even though unreachable, the loop leaves one value on the stack.
+ WASM_LOOP_I(WASM_BRV(1, WASM_I32V_1(17)), WASM_ONE)),
+ // Add one to the 17 returned from the block.
+ WASM_ONE, kExprI32Add);
+ CHECK_EQ(18, r.Call());
+}
+
+WASM_EXEC_TEST(BranchOverUnreachableCodeInLoop2) {
+ WasmRunner<int32_t> r(execution_mode);
+ BUILD(r,
+ WASM_BLOCK_I(
+ // Start a loop which breaks in the middle (hence unreachable code
+ // afterwards) and continue execution after this loop.
+ // The unreachable code is allowed to pop non-existing values off
+ // the stack and push back the result.
+ WASM_LOOP_I(WASM_BRV(1, WASM_I32V_1(17)), kExprI32Add)),
+ // Add one to the 17 returned from the block.
+ WASM_ONE, kExprI32Add);
+ CHECK_EQ(18, r.Call());
+}
+
WASM_EXEC_TEST(BlockInsideUnreachable) {
WasmRunner<int32_t> r(execution_mode);
BUILD(r, WASM_RETURN1(WASM_I32V_1(17)), WASM_BLOCK(WASM_BR(0)));
@@ -3050,3 +3093,14 @@ WASM_EXEC_TEST(IfInsideUnreachable) {
WASM_IF_ELSE_I(WASM_ONE, WASM_BRV(0, WASM_ONE), WASM_RETURN1(WASM_ONE)));
CHECK_EQ(17, r.Call());
}
+
+#undef B1
+#undef B2
+#undef RET
+#undef RET_I8
+#undef ADD_CODE
+
+} // namespace test_run_wasm
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
new file mode 100644
index 0000000000..0e541efbbd
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -0,0 +1,820 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api.h"
+#include "src/objects-inl.h"
+#include "src/v8.h"
+#include "src/vector.h"
+
+#include "src/wasm/compilation-manager.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/streaming-decoder.h"
+#include "src/wasm/wasm-module-builder.h"
+#include "src/wasm/wasm-module.h"
+
+#include "test/cctest/cctest.h"
+
+#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class MockPlatform final : public TestPlatform {
+ public:
+ MockPlatform() : old_platform_(i::V8::GetCurrentPlatform()) {
+ // Now that it's completely constructed, make this the current platform.
+ i::V8::SetPlatformForTesting(this);
+ }
+ virtual ~MockPlatform() {
+ // Delete all remaining tasks in the queue.
+ while (!tasks_.empty()) {
+ Task* task = tasks_.back();
+ tasks_.pop_back();
+ delete task;
+ }
+ i::V8::SetPlatformForTesting(old_platform_);
+ }
+
+ void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
+ tasks_.push_back(task);
+ }
+
+ void CallOnBackgroundThread(v8::Task* task,
+ ExpectedRuntime expected_runtime) override {
+ tasks_.push_back(task);
+ }
+
+ bool IdleTasksEnabled(v8::Isolate* isolate) override { return false; }
+
+ void ExecuteTasks() {
+ while (!tasks_.empty()) {
+ Task* task = tasks_.back();
+ tasks_.pop_back();
+ task->Run();
+ delete task;
+ }
+ }
+
+ private:
+ // We do not execute tasks concurrently, so we only need one list of tasks.
+ std::vector<Task*> tasks_;
+ v8::Platform* old_platform_;
+};
+
+namespace {
+
+class StreamTester {
+ public:
+ StreamTester() : zone_(&allocator_, "StreamTester") {
+ v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* i_isolate = CcTest::i_isolate();
+
+ // Create the promise for the streaming compilation.
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<Promise::Resolver> resolver;
+ CHECK(Promise::Resolver::New(context).ToLocal(&resolver));
+ CHECK(!i_isolate->has_scheduled_exception());
+ promise_ = resolver->GetPromise();
+
+ i::Handle<i::JSPromise> i_promise = v8::Utils::OpenHandle(*promise_);
+
+ stream_ = i_isolate->wasm_compilation_manager()->StartStreamingCompilation(
+ i_isolate, v8::Utils::OpenHandle(*context), i_promise);
+ }
+
+ std::shared_ptr<StreamingDecoder> stream() { return stream_; }
+
+ // Run all compiler tasks, both foreground and background tasks.
+ void RunCompilerTasks() {
+ static_cast<MockPlatform*>(i::V8::GetCurrentPlatform())->ExecuteTasks();
+ }
+
+ bool IsPromiseFulfilled() {
+ return promise_->State() == v8::Promise::kFulfilled;
+ }
+
+ bool IsPromiseRejected() {
+ return promise_->State() == v8::Promise::kRejected;
+ }
+
+ bool IsPromisePending() { return promise_->State() == v8::Promise::kPending; }
+
+ void OnBytesReceived(const uint8_t* start, size_t length) {
+ stream_->OnBytesReceived(Vector<const uint8_t>(start, length));
+ }
+
+ void FinishStream() { stream_->Finish(); }
+
+ Zone* zone() { return &zone_; }
+
+ private:
+ AccountingAllocator allocator_;
+ Zone zone_;
+ v8::Local<v8::Promise> promise_;
+ std::shared_ptr<StreamingDecoder> stream_;
+};
+} // namespace
+
+#define STREAM_TEST(name) \
+ void RunStream_##name(); \
+ TEST(name) { \
+ MockPlatform platform; \
+ CcTest::InitializeVM(); \
+ v8::HandleScope handle_scope(CcTest::isolate()); \
+ i::HandleScope internal_scope(CcTest::i_isolate()); \
+ RunStream_##name(); \
+ } \
+ void RunStream_##name()
+
+// Create a valid module with 3 functions.
+ZoneBuffer GetValidModuleBytes(Zone* zone) {
+ ZoneBuffer buffer(zone);
+ TestSignatures sigs;
+ WasmModuleBuilder builder(zone);
+ {
+ WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
+ uint8_t code[] = {kExprGetLocal, 0, kExprEnd};
+ f->EmitCode(code, arraysize(code));
+ }
+ {
+ WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
+ uint8_t code[] = {kExprGetLocal, 1, kExprEnd};
+ f->EmitCode(code, arraysize(code));
+ }
+ {
+ WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
+ uint8_t code[] = {kExprGetLocal, 2, kExprEnd};
+ f->EmitCode(code, arraysize(code));
+ }
+ builder.WriteTo(buffer);
+ return buffer;
+}
+
+// Test that all bytes arrive before doing any compilation. FinishStream is
+// called immediately.
+STREAM_TEST(TestAllBytesArriveImmediatelyStreamFinishesFirst) {
+ StreamTester tester;
+ ZoneBuffer buffer = GetValidModuleBytes(tester.zone());
+
+ tester.OnBytesReceived(buffer.begin(), buffer.end() - buffer.begin());
+ tester.FinishStream();
+
+ tester.RunCompilerTasks();
+
+ CHECK(tester.IsPromiseFulfilled());
+}
+
+// Test that all bytes arrive before doing any compilation. FinishStream is
+// called after the compilation is done.
+STREAM_TEST(TestAllBytesArriveAOTCompilerFinishesFirst) {
+ StreamTester tester;
+ ZoneBuffer buffer = GetValidModuleBytes(tester.zone());
+
+ tester.OnBytesReceived(buffer.begin(), buffer.end() - buffer.begin());
+
+ tester.RunCompilerTasks();
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+
+ CHECK(tester.IsPromiseFulfilled());
+}
+
+size_t GetFunctionOffset(i::Isolate* isolate, const uint8_t* buffer,
+ size_t size, size_t index) {
+ ModuleResult result = SyncDecodeWasmModule(isolate, buffer, buffer + size,
+ false, ModuleOrigin::kWasmOrigin);
+ CHECK(result.ok());
+ std::unique_ptr<WasmModule> module = std::move(result.val);
+ const WasmFunction* func = &module->functions[1];
+ return func->code.offset();
+}
+
+// Test that some functions come in the beginning, some come after some
+// functions already got compiled.
+STREAM_TEST(TestCutAfterOneFunctionStreamFinishesFirst) {
+ i::Isolate* isolate = CcTest::i_isolate();
+ StreamTester tester;
+ ZoneBuffer buffer = GetValidModuleBytes(tester.zone());
+
+ size_t offset = GetFunctionOffset(isolate, buffer.begin(), buffer.size(), 1);
+ tester.OnBytesReceived(buffer.begin(), offset);
+ tester.RunCompilerTasks();
+ CHECK(tester.IsPromisePending());
+ tester.OnBytesReceived(buffer.begin() + offset, buffer.size() - offset);
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+ CHECK(tester.IsPromiseFulfilled());
+}
+
+// Test that some functions come in the beginning, some come after some
+// functions already got compiled. Call FinishStream after the compilation is
+// done.
+STREAM_TEST(TestCutAfterOneFunctionCompilerFinishesFirst) {
+ i::Isolate* isolate = CcTest::i_isolate();
+ StreamTester tester;
+ ZoneBuffer buffer = GetValidModuleBytes(tester.zone());
+
+ size_t offset = GetFunctionOffset(isolate, buffer.begin(), buffer.size(), 1);
+ tester.OnBytesReceived(buffer.begin(), offset);
+ tester.RunCompilerTasks();
+ CHECK(tester.IsPromisePending());
+ tester.OnBytesReceived(buffer.begin() + offset, buffer.size() - offset);
+ tester.RunCompilerTasks();
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+ CHECK(tester.IsPromiseFulfilled());
+}
+
+// Create a module with an invalid global section.
+ZoneBuffer GetModuleWithInvalidSection(Zone* zone) {
+ ZoneBuffer buffer(zone);
+ TestSignatures sigs;
+ WasmModuleBuilder builder(zone);
+ // Add an invalid global to the module. The decoder will fail there.
+ builder.AddGlobal(kWasmStmt, false, true,
+ WasmInitExpr(WasmInitExpr::kGlobalIndex, 12));
+ {
+ WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
+ uint8_t code[] = {kExprGetLocal, 0, kExprEnd};
+ f->EmitCode(code, arraysize(code));
+ }
+ {
+ WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
+ uint8_t code[] = {kExprGetLocal, 1, kExprEnd};
+ f->EmitCode(code, arraysize(code));
+ }
+ {
+ WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
+ uint8_t code[] = {kExprGetLocal, 2, kExprEnd};
+ f->EmitCode(code, arraysize(code));
+ }
+ builder.WriteTo(buffer);
+ return buffer;
+}
+
+// Test an error in a section, found by the ModuleDecoder.
+STREAM_TEST(TestErrorInSectionStreamFinishesFirst) {
+ StreamTester tester;
+ ZoneBuffer buffer = GetModuleWithInvalidSection(tester.zone());
+
+ tester.OnBytesReceived(buffer.begin(), buffer.end() - buffer.begin());
+ tester.FinishStream();
+
+ tester.RunCompilerTasks();
+
+ CHECK(tester.IsPromiseRejected());
+}
+
+STREAM_TEST(TestErrorInSectionCompilerFinishesFirst) {
+ StreamTester tester;
+ ZoneBuffer buffer = GetModuleWithInvalidSection(tester.zone());
+
+ tester.OnBytesReceived(buffer.begin(), buffer.end() - buffer.begin());
+ tester.RunCompilerTasks();
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+
+ CHECK(tester.IsPromiseRejected());
+}
+
+STREAM_TEST(TestErrorInSectionWithCuts) {
+ StreamTester tester;
+ ZoneBuffer buffer = GetModuleWithInvalidSection(tester.zone());
+
+ const uint8_t* current = buffer.begin();
+ size_t remaining = buffer.end() - buffer.begin();
+ while (current < buffer.end()) {
+ size_t size = std::min(remaining, size_t{10});
+ tester.OnBytesReceived(current, size);
+ tester.RunCompilerTasks();
+ current += 10;
+ remaining -= size;
+ }
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+
+ CHECK(tester.IsPromiseRejected());
+}
+
+ZoneBuffer GetModuleWithInvalidSectionSize(Zone* zone) {
+ // We get a valid module and overwrite the size of the first section with an
+ // invalid value.
+ ZoneBuffer buffer = GetValidModuleBytes(zone);
+ // 9 == 4 (wasm magic) + 4 (version) + 1 (section code)
+ uint8_t* section_size_address = const_cast<uint8_t*>(buffer.begin()) + 9;
+ // 0x808080800f is an invalid module size in leb encoding.
+ section_size_address[0] = 0x80;
+ section_size_address[1] = 0x80;
+ section_size_address[2] = 0x80;
+ section_size_address[3] = 0x80;
+ section_size_address[4] = 0x0f;
+ return buffer;
+}
+
+STREAM_TEST(TestErrorInSectionSizeStreamFinishesFirst) {
+ StreamTester tester;
+ ZoneBuffer buffer = GetModuleWithInvalidSectionSize(tester.zone());
+ tester.OnBytesReceived(buffer.begin(), buffer.end() - buffer.begin());
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+
+ CHECK(tester.IsPromiseRejected());
+}
+
+STREAM_TEST(TestErrorInSectionSizeCompilerFinishesFirst) {
+ StreamTester tester;
+ ZoneBuffer buffer = GetModuleWithInvalidSectionSize(tester.zone());
+ tester.OnBytesReceived(buffer.begin(), buffer.end() - buffer.begin());
+ tester.RunCompilerTasks();
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+
+ CHECK(tester.IsPromiseRejected());
+}
+
+STREAM_TEST(TestErrorInSectionSizeWithCuts) {
+ StreamTester tester;
+ ZoneBuffer buffer = GetModuleWithInvalidSectionSize(tester.zone());
+ const uint8_t* current = buffer.begin();
+ size_t remaining = buffer.end() - buffer.begin();
+ while (current < buffer.end()) {
+ size_t size = std::min(remaining, size_t{10});
+ tester.OnBytesReceived(current, size);
+ tester.RunCompilerTasks();
+ current += 10;
+ remaining -= size;
+ }
+ tester.RunCompilerTasks();
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+
+ CHECK(tester.IsPromiseRejected());
+}
+
+// Test an error in the code section, found by the ModuleDecoder. The error is a
+// functions count in the code section which differs from the functions count in
+// the function section.
+STREAM_TEST(TestErrorInCodeSectionDetectedByModuleDecoder) {
+ StreamTester tester;
+
+ uint8_t code[] = {
+ U32V_1(4), // body size
+ U32V_1(0), // locals count
+ kExprGetLocal, 0, kExprEnd // body
+ };
+
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 3), // section size
+ U32V_1(3), // functions count
+ 0, // signature index
+ 0, // signature index
+ 0, // signature index
+ kCodeSectionCode, // section code
+ U32V_1(1 + arraysize(code) * 2), // section size
+ U32V_1(2), // !!! invalid function count !!!
+ };
+
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.FinishStream();
+
+ tester.RunCompilerTasks();
+
+ CHECK(tester.IsPromiseRejected());
+}
+
+// Test an error in the code section, found by the StreamingDecoder. The error
+// is an invalid function body size, so that there are not enough bytes in the
+// code section for the function body.
+STREAM_TEST(TestErrorInCodeSectionDetectedByStreamingDecoder) {
+ StreamTester tester;
+
+ uint8_t code[] = {
+ U32V_1(26), // !!! invalid body size !!!
+ U32V_1(0), // locals count
+ kExprGetLocal, 0, kExprEnd // body
+ };
+
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 3), // section size
+ U32V_1(3), // functions count
+ 0, // signature index
+ 0, // signature index
+ 0, // signature index
+ kCodeSectionCode, // section code
+ U32V_1(1 + arraysize(code) * 3), // section size
+ U32V_1(3), // functions count
+ };
+
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.FinishStream();
+
+ tester.RunCompilerTasks();
+
+ CHECK(tester.IsPromiseRejected());
+}
+
+// Test an error in the code section, found by the Compiler. The error is an
+// invalid return type.
+STREAM_TEST(TestErrorInCodeSectionDetectedByCompiler) {
+ StreamTester tester;
+
+ uint8_t code[] = {
+ U32V_1(4), // !!! invalid body size !!!
+ U32V_1(0), // locals count
+ kExprGetLocal, 0, kExprEnd // body
+ };
+
+ uint8_t invalid_code[] = {
+ U32V_1(4), // !!! invalid body size !!!
+ U32V_1(0), // locals count
+ kExprI64Const, 0, kExprEnd // body
+ };
+
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 3), // section size
+ U32V_1(3), // functions count
+ 0, // signature index
+ 0, // signature index
+ 0, // signature index
+ kCodeSectionCode, // section code
+ U32V_1(1 + arraysize(code) * 2 +
+ arraysize(invalid_code)), // section size
+ U32V_1(3), // functions count
+ };
+
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.RunCompilerTasks();
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.RunCompilerTasks();
+ tester.OnBytesReceived(invalid_code, arraysize(invalid_code));
+ tester.RunCompilerTasks();
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.RunCompilerTasks();
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+
+ CHECK(tester.IsPromiseRejected());
+}
+
+// Test Abort before any bytes arrive.
+STREAM_TEST(TestAbortImmediately) {
+ StreamTester tester;
+ tester.stream()->Abort();
+ tester.RunCompilerTasks();
+}
+
+// Test Abort within a section.
+STREAM_TEST(TestAbortWithinSection1) {
+ StreamTester tester;
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1) // type count
+ // Type section is not yet complete.
+ };
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.RunCompilerTasks();
+ tester.stream()->Abort();
+ tester.RunCompilerTasks();
+}
+
+// Test Abort within a section.
+STREAM_TEST(TestAbortWithinSection2) {
+ StreamTester tester;
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 3), // section size
+ U32V_1(3), // functions count
+ // Function section is not yet complete.
+ };
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.RunCompilerTasks();
+ tester.stream()->Abort();
+ tester.RunCompilerTasks();
+}
+
+// Test Abort just before the code section.
+STREAM_TEST(TestAbortAfterSection) {
+ StreamTester tester;
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ };
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.RunCompilerTasks();
+ tester.stream()->Abort();
+ tester.RunCompilerTasks();
+}
+
+// Test Abort after the function count in the code section. The compiler tasks
+// execute before the abort.
+STREAM_TEST(TestAbortAfterFunctionsCount1) {
+ StreamTester tester;
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 3), // section size
+ U32V_1(3), // functions count
+ 0, // signature index
+ 0, // signature index
+ 0, // signature index
+ kCodeSectionCode, // section code
+ U32V_1(20), // section size
+ U32V_1(3), // functions count
+ };
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.RunCompilerTasks();
+ tester.stream()->Abort();
+ tester.RunCompilerTasks();
+}
+
+// Test Abort after the function count in the code section. The compiler tasks
+// do not execute before the abort.
+STREAM_TEST(TestAbortAfterFunctionsCount2) {
+ StreamTester tester;
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 3), // section size
+ U32V_1(3), // functions count
+ 0, // signature index
+ 0, // signature index
+ 0, // signature index
+ kCodeSectionCode, // section code
+ U32V_1(20), // section size
+ U32V_1(3), // functions count
+ };
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.stream()->Abort();
+ tester.RunCompilerTasks();
+}
+
+// Test Abort after some functions got compiled. The compiler tasks execute
+// before the abort.
+STREAM_TEST(TestAbortAfterFunctionGotCompiled1) {
+ StreamTester tester;
+
+ uint8_t code[] = {
+ U32V_1(4), // !!! invalid body size !!!
+ U32V_1(0), // locals count
+ kExprGetLocal, 0, kExprEnd // body
+ };
+
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 3), // section size
+ U32V_1(3), // functions count
+ 0, // signature index
+ 0, // signature index
+ 0, // signature index
+ kCodeSectionCode, // section code
+ U32V_1(20), // section size
+ U32V_1(3), // functions count
+ };
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.RunCompilerTasks();
+ tester.stream()->Abort();
+ tester.RunCompilerTasks();
+}
+
+// Test Abort after some functions got compiled. The compiler tasks execute
+// before the abort.
+STREAM_TEST(TestAbortAfterFunctionGotCompiled2) {
+ StreamTester tester;
+
+ uint8_t code[] = {
+ U32V_1(4), // !!! invalid body size !!!
+ U32V_1(0), // locals count
+ kExprGetLocal, 0, kExprEnd // body
+ };
+
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 3), // section size
+ U32V_1(3), // functions count
+ 0, // signature index
+ 0, // signature index
+ 0, // signature index
+ kCodeSectionCode, // section code
+ U32V_1(20), // section size
+ U32V_1(3), // functions count
+ };
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.stream()->Abort();
+ tester.RunCompilerTasks();
+}
+
+// Test Abort after all functions got compiled.
+STREAM_TEST(TestAbortAfterCodeSection1) {
+ StreamTester tester;
+
+ uint8_t code[] = {
+ U32V_1(4), // body size
+ U32V_1(0), // locals count
+ kExprGetLocal, 0, kExprEnd // body
+ };
+
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 3), // section size
+ U32V_1(3), // functions count
+ 0, // signature index
+ 0, // signature index
+ 0, // signature index
+ kCodeSectionCode, // section code
+ U32V_1(1 + arraysize(code) * 3), // section size
+ U32V_1(3), // functions count
+ };
+
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.RunCompilerTasks();
+ tester.stream()->Abort();
+ tester.RunCompilerTasks();
+}
+
+// Test Abort after all functions got compiled.
+STREAM_TEST(TestAbortAfterCodeSection2) {
+ StreamTester tester;
+
+ uint8_t code[] = {
+ U32V_1(4), // body size
+ U32V_1(0), // locals count
+ kExprGetLocal, 0, kExprEnd // body
+ };
+
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 3), // section size
+ U32V_1(3), // functions count
+ 0, // signature index
+ 0, // signature index
+ 0, // signature index
+ kCodeSectionCode, // section code
+ U32V_1(1 + arraysize(code) * 3), // section size
+ U32V_1(3), // functions count
+ };
+
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.stream()->Abort();
+ tester.RunCompilerTasks();
+}
+
+STREAM_TEST(TestAbortAfterCompilationError1) {
+ StreamTester tester;
+
+ uint8_t code[] = {
+ U32V_1(4), // !!! invalid body size !!!
+ U32V_1(0), // locals count
+ kExprGetLocal, 0, kExprEnd // body
+ };
+
+ uint8_t invalid_code[] = {
+ U32V_1(4), // !!! invalid body size !!!
+ U32V_1(0), // locals count
+ kExprI64Const, 0, kExprEnd // body
+ };
+
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 3), // section size
+ U32V_1(3), // functions count
+ 0, // signature index
+ 0, // signature index
+ 0, // signature index
+ kCodeSectionCode, // section code
+ U32V_1(1 + arraysize(code) * 2 +
+ arraysize(invalid_code)), // section size
+ U32V_1(3), // functions count
+ };
+
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.OnBytesReceived(invalid_code, arraysize(invalid_code));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.RunCompilerTasks();
+ tester.stream()->Abort();
+ tester.RunCompilerTasks();
+}
+
+STREAM_TEST(TestAbortAfterCompilationError2) {
+ StreamTester tester;
+
+ uint8_t code[] = {
+ U32V_1(4), // !!! invalid body size !!!
+ U32V_1(0), // locals count
+ kExprGetLocal, 0, kExprEnd // body
+ };
+
+ uint8_t invalid_code[] = {
+ U32V_1(4), // !!! invalid body size !!!
+ U32V_1(0), // locals count
+ kExprI64Const, 0, kExprEnd // body
+ };
+
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 3), // section size
+ U32V_1(3), // functions count
+ 0, // signature index
+ 0, // signature index
+ 0, // signature index
+ kCodeSectionCode, // section code
+ U32V_1(1 + arraysize(code) * 2 +
+ arraysize(invalid_code)), // section size
+ U32V_1(3), // functions count
+ };
+
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.OnBytesReceived(invalid_code, arraysize(invalid_code));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.stream()->Abort();
+ tester.RunCompilerTasks();
+}
+
+#undef STREAM_TEST
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index 67909c5264..492ec4670e 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -7,7 +7,7 @@
#include "src/frames-inl.h"
#include "src/property-descriptor.h"
#include "src/utils.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
@@ -15,9 +15,9 @@
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-namespace debug = v8::debug;
+namespace v8 {
+namespace internal {
+namespace wasm {
namespace {
@@ -422,3 +422,7 @@ TEST(WasmGetLocalsAndStack) {
Handle<Object> args[]{handle(Smi::FromInt(7), isolate)};
CHECK(!Execution::Call(isolate, main_fun_wrapper, global, 1, args).is_null());
}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
index ca85e5fcd4..4f7b66ead4 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
@@ -12,9 +12,9 @@
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-namespace debug = v8::debug;
+namespace v8 {
+namespace internal {
+namespace wasm {
/**
* We test the interface from Wasm compiled code to the Wasm interpreter by
@@ -257,3 +257,7 @@ TEST(TestArgumentPassing_AllTypes) {
CheckCall(i32, i64, f32, f64);
}
}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index b5d771b2c8..2a489b58b6 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/api.h"
#include "src/assembler-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
@@ -9,10 +10,10 @@
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
-using namespace v8::base;
-using namespace v8::internal;
-using namespace v8::internal::compiler;
-using namespace v8::internal::wasm;
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace test_wasm_stack {
using v8::Local;
using v8::Utils;
@@ -47,8 +48,8 @@ void PrintStackTrace(v8::Isolate* isolate, v8::Local<v8::StackTrace> stack) {
struct ExceptionInfo {
const char* func_name;
- int line_nr;
- int column;
+ int line_nr; // 1-based
+ int column; // 1-based
};
template <int N>
@@ -70,11 +71,33 @@ void CheckExceptionInfos(v8::internal::Isolate* i_isolate, Handle<Object> exc,
v8::Local<v8::StackFrame> frame = stack->GetFrame(frameNr);
v8::String::Utf8Value funName(v8_isolate, frame->GetFunctionName());
CHECK_CSTREQ(excInfos[frameNr].func_name, *funName);
+ // Line and column are 1-based in v8::StackFrame, just as in ExceptionInfo.
CHECK_EQ(excInfos[frameNr].line_nr, frame->GetLineNumber());
CHECK_EQ(excInfos[frameNr].column, frame->GetColumn());
}
+
+ CheckComputeLocation(i_isolate, exc, excInfos[0]);
+}
+
+void CheckComputeLocation(v8::internal::Isolate* i_isolate, Handle<Object> exc,
+ const ExceptionInfo& topLocation) {
+ MessageLocation loc;
+ CHECK(i_isolate->ComputeLocationFromStackTrace(&loc, exc));
+ printf("loc start: %d, end: %d\n", loc.start_pos(), loc.end_pos());
+ Handle<JSMessageObject> message = i_isolate->CreateMessage(exc, nullptr);
+ printf("msg start: %d, end: %d, line: %d, col: %d\n",
+ message->start_position(), message->end_position(),
+ message->GetLineNumber(), message->GetColumnNumber());
+ CHECK_EQ(loc.start_pos(), message->start_position());
+ CHECK_EQ(loc.end_pos(), message->end_position());
+ // In the message, the line is 1-based, but the column is 0-based.
+ CHECK_EQ(topLocation.line_nr, message->GetLineNumber());
+ CHECK_LE(1, topLocation.column);
+ CHECK_EQ(topLocation.column - 1, message->GetColumnNumber());
}
+#undef CHECK_CSTREQ
+
} // namespace
// Call from JS to wasm to JS and throw an Error from JS.
@@ -82,9 +105,12 @@ TEST(CollectDetailedWasmStack_ExplicitThrowFromJs) {
WasmRunner<void> r(kExecuteCompiled);
TestSignatures sigs;
+ Handle<FixedArray> js_imports_table =
+ r.main_isolate()->factory()->NewFixedArray(2 * 3 + 1, TENURED);
uint32_t js_throwing_index = r.builder().AddJsFunction(
sigs.v_v(),
- "(function js() {\n function a() {\n throw new Error(); };\n a(); })");
+ "(function js() {\n function a() {\n throw new Error(); };\n a(); })",
+ js_imports_table);
// Add a nop such that we don't always get position 1.
BUILD(r, WASM_NOP, WASM_CALL_FUNCTION0(js_throwing_index));
@@ -125,40 +151,55 @@ TEST(CollectDetailedWasmStack_ExplicitThrowFromJs) {
// Trigger a trap in wasm, stack should be JS -> wasm -> wasm.
TEST(CollectDetailedWasmStack_WasmError) {
- TestSignatures sigs;
- // Create a WasmRunner with stack checks and traps enabled.
- WasmRunner<int> r(kExecuteCompiled, "main", true);
-
- BUILD(r, WASM_UNREACHABLE);
- uint32_t wasm_index_1 = r.function()->func_index;
-
- WasmFunctionCompiler& f2 = r.NewFunction<int>("call_main");
- BUILD(f2, WASM_CALL_FUNCTION0(0));
- uint32_t wasm_index_2 = f2.function_index();
-
- Handle<JSFunction> js_wasm_wrapper = r.builder().WrapCode(wasm_index_2);
-
- Handle<JSFunction> js_trampoline = Handle<JSFunction>::cast(
- v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- CompileRun("(function callFn(fn) { fn(); })"))));
-
- Isolate* isolate = js_wasm_wrapper->GetIsolate();
- isolate->SetCaptureStackTraceForUncaughtExceptions(true, 10,
- v8::StackTrace::kOverview);
- Handle<Object> global(isolate->context()->global_object(), isolate);
- MaybeHandle<Object> maybe_exc;
- Handle<Object> args[] = {js_wasm_wrapper};
- MaybeHandle<Object> maybe_return_obj =
- Execution::TryCall(isolate, js_trampoline, global, 1, args,
- Execution::MessageHandling::kReport, &maybe_exc);
- CHECK(maybe_return_obj.is_null());
-
- // Line and column are 1-based, so add 1 for the expected wasm output.
- ExceptionInfo expected_exceptions[] = {
- {"main", static_cast<int>(wasm_index_1) + 1, 2}, // -
- {"call_main", static_cast<int>(wasm_index_2) + 1, 2}, // -
- {"callFn", 1, 24} //-
- };
- CheckExceptionInfos(isolate, maybe_exc.ToHandleChecked(),
- expected_exceptions);
+ for (int pos_shift = 0; pos_shift < 3; ++pos_shift) {
+ // Test a position with 1, 2 or 3 bytes needed to represent it.
+ int unreachable_pos = 1 << (8 * pos_shift);
+ TestSignatures sigs;
+ // Create a WasmRunner with stack checks and traps enabled.
+ WasmRunner<int> r(kExecuteCompiled, "main",
+ compiler::kRuntimeExceptionSupport);
+
+ std::vector<byte> code(unreachable_pos + 1, kExprNop);
+ code[unreachable_pos] = kExprUnreachable;
+ r.Build(code.data(), code.data() + code.size());
+
+ uint32_t wasm_index_1 = r.function()->func_index;
+
+ WasmFunctionCompiler& f2 = r.NewFunction<int>("call_main");
+ BUILD(f2, WASM_CALL_FUNCTION0(0));
+ uint32_t wasm_index_2 = f2.function_index();
+
+ Handle<JSFunction> js_wasm_wrapper = r.builder().WrapCode(wasm_index_2);
+
+ Handle<JSFunction> js_trampoline = Handle<JSFunction>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
+ CompileRun("(function callFn(fn) { fn(); })"))));
+
+ Isolate* isolate = js_wasm_wrapper->GetIsolate();
+ isolate->SetCaptureStackTraceForUncaughtExceptions(
+ true, 10, v8::StackTrace::kOverview);
+ Handle<Object> global(isolate->context()->global_object(), isolate);
+ MaybeHandle<Object> maybe_exc;
+ Handle<Object> args[] = {js_wasm_wrapper};
+ MaybeHandle<Object> maybe_return_obj =
+ Execution::TryCall(isolate, js_trampoline, global, 1, args,
+ Execution::MessageHandling::kReport, &maybe_exc);
+ CHECK(maybe_return_obj.is_null());
+ Handle<Object> exception = maybe_exc.ToHandleChecked();
+
+ static constexpr int kMainLocalsLength = 1;
+ // Line and column are 1-based, so add 1 for the expected wasm output.
+ const int expected_main_pos = unreachable_pos + kMainLocalsLength + 1;
+ ExceptionInfo expected_exceptions[] = {
+ {"main", static_cast<int>(wasm_index_1) + 1, expected_main_pos}, // -
+ {"call_main", static_cast<int>(wasm_index_2) + 1, 2}, // -
+ {"callFn", 1, 24} //-
+ };
+ CheckExceptionInfos(isolate, exception, expected_exceptions);
+ }
}
+
+} // namespace test_wasm_stack
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
index 7e9e595a81..0926ab0754 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/api.h"
#include "src/assembler-inl.h"
#include "src/trap-handler/trap-handler.h"
#include "test/cctest/cctest.h"
@@ -10,10 +11,10 @@
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
-using namespace v8::base;
-using namespace v8::internal;
-using namespace v8::internal::compiler;
-using namespace v8::internal::wasm;
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace test_wasm_trap_position {
using v8::Local;
using v8::Utils;
@@ -61,12 +62,15 @@ void CheckExceptionInfos(v8::internal::Isolate* i_isolate, Handle<Object> exc,
}
}
+#undef CHECK_CSTREQ
+
} // namespace
// Trigger a trap for executing unreachable.
-TEST(Unreachable) {
+WASM_EXEC_TEST(Unreachable) {
// Create a WasmRunner with stack checks and traps enabled.
- WasmRunner<void> r(kExecuteCompiled, "main", true);
+ WasmRunner<void> r(execution_mode, "main",
+ compiler::kRuntimeExceptionSupport);
TestSignatures sigs;
BUILD(r, WASM_UNREACHABLE);
@@ -99,8 +103,9 @@ TEST(Unreachable) {
}
// Trigger a trap for loading from out-of-bounds.
-TEST(IllegalLoad) {
- WasmRunner<void> r(kExecuteCompiled, "main", true);
+WASM_EXEC_TEST(IllegalLoad) {
+ WasmRunner<void> r(execution_mode, "main",
+ compiler::kRuntimeExceptionSupport);
TestSignatures sigs;
r.builder().AddMemory(0L);
@@ -141,3 +146,8 @@ TEST(IllegalLoad) {
CheckExceptionInfos(isolate, maybe_exc.ToHandleChecked(),
expected_exceptions);
}
+
+} // namespace test_wasm_trap_position
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
new file mode 100644
index 0000000000..1b674ab60c
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -0,0 +1,522 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/wasm/wasm-run-utils.h"
+
+#include "src/api.h"
+#include "src/assembler-inl.h"
+#include "src/wasm/wasm-memory.h"
+#include "src/wasm/wasm-objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+TestingModuleBuilder::TestingModuleBuilder(
+ Zone* zone, WasmExecutionMode mode,
+ compiler::RuntimeExceptionSupport exception_support)
+ : test_module_ptr_(&test_module_),
+ isolate_(CcTest::InitIsolateOnce()),
+ global_offset(0),
+ mem_start_(nullptr),
+ mem_size_(0),
+ interpreter_(nullptr),
+ runtime_exception_support_(exception_support),
+ lower_simd_(mode == kExecuteSimdLowered) {
+ WasmJs::Install(isolate_, true);
+ test_module_.globals_size = kMaxGlobalsSize;
+ memset(globals_data_, 0, sizeof(globals_data_));
+ instance_object_ = InitInstanceObject();
+ if (mode == kExecuteInterpreted) {
+ interpreter_ = WasmDebugInfo::SetupForTesting(instance_object_);
+ }
+}
+
+byte* TestingModuleBuilder::AddMemory(uint32_t size) {
+ CHECK(!test_module_.has_memory);
+ CHECK_NULL(mem_start_);
+ CHECK_EQ(0, mem_size_);
+ DCHECK(!instance_object_->has_memory_buffer());
+ DCHECK(!instance_object_->has_memory_object());
+ test_module_.has_memory = true;
+ const bool enable_guard_regions =
+ trap_handler::UseTrapHandler() && test_module_.is_wasm();
+ uint32_t alloc_size =
+ enable_guard_regions ? RoundUp(size, base::OS::CommitPageSize()) : size;
+ Handle<JSArrayBuffer> new_buffer =
+ wasm::NewArrayBuffer(isolate_, alloc_size, enable_guard_regions);
+ CHECK(!new_buffer.is_null());
+ instance_object_->set_memory_buffer(*new_buffer);
+ mem_start_ = reinterpret_cast<byte*>(new_buffer->backing_store());
+ mem_size_ = size;
+ CHECK(size == 0 || mem_start_);
+ memset(mem_start_, 0, size);
+
+ if (interpreter_) {
+ interpreter_->UpdateMemory(mem_start_, mem_size_);
+ }
+ // Create the WasmMemoryObject.
+ Handle<WasmMemoryObject> memory_object = WasmMemoryObject::New(
+ isolate_, new_buffer,
+ (test_module_.maximum_pages != 0) ? test_module_.maximum_pages : -1);
+ instance_object_->set_memory_object(*memory_object);
+ WasmMemoryObject::AddInstance(isolate_, memory_object, instance_object_);
+ // TODO(wasm): Delete the following two lines when test-run-wasm will use a
+ // multiple of kPageSize as memory size. At the moment, the effect of these
+ // two lines is used to shrink the memory for testing purposes.
+ instance_object_->wasm_context()->mem_start = mem_start_;
+ instance_object_->wasm_context()->mem_size = mem_size_;
+ return mem_start_;
+}
+
+uint32_t TestingModuleBuilder::AddFunction(FunctionSig* sig, Handle<Code> code,
+ const char* name) {
+ if (test_module_.functions.size() == 0) {
+ // TODO(titzer): Reserving space here to avoid the underlying WasmFunction
+ // structs from moving.
+ test_module_.functions.reserve(kMaxFunctions);
+ }
+ uint32_t index = static_cast<uint32_t>(test_module_.functions.size());
+ test_module_.functions.push_back(
+ {sig, index, 0, {0, 0}, {0, 0}, false, false});
+ if (name) {
+ Vector<const byte> name_vec = Vector<const byte>::cast(CStrVector(name));
+ test_module_.functions.back().name = {
+ AddBytes(name_vec), static_cast<uint32_t>(name_vec.length())};
+ }
+ function_code_.push_back(code);
+ if (interpreter_) {
+ interpreter_->AddFunctionForTesting(&test_module_.functions.back());
+ }
+ DCHECK_LT(index, kMaxFunctions); // limited for testing.
+ return index;
+}
+
+uint32_t TestingModuleBuilder::AddJsFunction(
+ FunctionSig* sig, const char* source, Handle<FixedArray> js_imports_table) {
+ Handle<JSFunction> jsfunc = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(CompileRun(source))));
+ uint32_t index = AddFunction(sig, Handle<Code>::null(), nullptr);
+ js_imports_table->set(0, *isolate_->native_context());
+ Handle<Code> code = compiler::CompileWasmToJSWrapper(
+ isolate_, jsfunc, sig, index, test_module_.origin(), js_imports_table);
+ function_code_[index] = code;
+ return index;
+}
+
+Handle<JSFunction> TestingModuleBuilder::WrapCode(uint32_t index) {
+ // Wrap the code so it can be called as a JS function.
+ Handle<Code> code = function_code_[index];
+ byte* context_address =
+ test_module_.has_memory
+ ? reinterpret_cast<byte*>(instance_object_->wasm_context())
+ : nullptr;
+ Handle<Code> ret_code = compiler::CompileJSToWasmWrapper(
+ isolate_, &test_module_, code, index, context_address);
+ Handle<JSFunction> ret = WasmExportedFunction::New(
+ isolate_, instance_object(), MaybeHandle<String>(),
+ static_cast<int>(index),
+ static_cast<int>(test_module_.functions[index].sig->parameter_count()),
+ ret_code);
+
+ // Add weak reference to exported functions.
+ Handle<WasmCompiledModule> compiled_module(
+ instance_object()->compiled_module(), isolate_);
+ Handle<FixedArray> old_arr = compiled_module->weak_exported_functions();
+ Handle<FixedArray> new_arr =
+ isolate_->factory()->NewFixedArray(old_arr->length() + 1);
+ old_arr->CopyTo(0, *new_arr, 0, old_arr->length());
+ Handle<WeakCell> weak_fn = isolate_->factory()->NewWeakCell(ret);
+ new_arr->set(old_arr->length(), *weak_fn);
+ compiled_module->set_weak_exported_functions(new_arr);
+
+ return ret;
+}
+
+void TestingModuleBuilder::AddIndirectFunctionTable(uint16_t* function_indexes,
+ uint32_t table_size) {
+ test_module_.function_tables.emplace_back();
+ WasmIndirectFunctionTable& table = test_module_.function_tables.back();
+ table.initial_size = table_size;
+ table.maximum_size = table_size;
+ table.has_maximum_size = true;
+ for (uint32_t i = 0; i < table_size; ++i) {
+ table.values.push_back(function_indexes[i]);
+ table.map.FindOrInsert(test_module_.functions[function_indexes[i]].sig);
+ }
+
+ function_tables_.push_back(
+ isolate_->global_handles()
+ ->Create(*isolate_->factory()->NewFixedArray(table_size))
+ .address());
+ signature_tables_.push_back(
+ isolate_->global_handles()
+ ->Create(*isolate_->factory()->NewFixedArray(table_size))
+ .address());
+}
+
+void TestingModuleBuilder::PopulateIndirectFunctionTable() {
+ if (interpret()) return;
+ // Initialize the fixed arrays in instance->function_tables.
+ for (uint32_t i = 0; i < function_tables_.size(); i++) {
+ WasmIndirectFunctionTable& table = test_module_.function_tables[i];
+ Handle<FixedArray> function_table(
+ reinterpret_cast<FixedArray**>(function_tables_[i]));
+ Handle<FixedArray> signature_table(
+ reinterpret_cast<FixedArray**>(signature_tables_[i]));
+ int table_size = static_cast<int>(table.values.size());
+ for (int j = 0; j < table_size; j++) {
+ WasmFunction& function = test_module_.functions[table.values[j]];
+ signature_table->set(j, Smi::FromInt(table.map.Find(function.sig)));
+ function_table->set(j, *function_code_[function.func_index]);
+ }
+ }
+}
+
+uint32_t TestingModuleBuilder::AddBytes(Vector<const byte> bytes) {
+ Handle<SeqOneByteString> old_bytes(
+ instance_object_->compiled_module()->module_bytes(), isolate_);
+ uint32_t old_size = static_cast<uint32_t>(old_bytes->length());
+ // Avoid placing strings at offset 0, this might be interpreted as "not
+ // set", e.g. for function names.
+ uint32_t bytes_offset = old_size ? old_size : 1;
+ ScopedVector<byte> new_bytes(bytes_offset + bytes.length());
+ memcpy(new_bytes.start(), old_bytes->GetChars(), old_size);
+ memcpy(new_bytes.start() + bytes_offset, bytes.start(), bytes.length());
+ Handle<SeqOneByteString> new_bytes_str = Handle<SeqOneByteString>::cast(
+ isolate_->factory()->NewStringFromOneByte(new_bytes).ToHandleChecked());
+ instance_object_->compiled_module()->shared()->set_module_bytes(
+ *new_bytes_str);
+ return bytes_offset;
+}
+
+compiler::ModuleEnv TestingModuleBuilder::CreateModuleEnv() {
+ std::vector<SignatureMap*> signature_maps;
+ for (size_t i = 0; i < test_module_.function_tables.size(); i++) {
+ auto& function_table = test_module_.function_tables[i];
+ signature_maps.push_back(&function_table.map);
+ }
+ return {
+ &test_module_,
+ function_tables_,
+ signature_tables_,
+ signature_maps,
+ function_code_,
+ Handle<Code>::null(),
+ reinterpret_cast<uintptr_t>(globals_data_),
+ };
+}
+
+const WasmGlobal* TestingModuleBuilder::AddGlobal(ValueType type) {
+ byte size = WasmOpcodes::MemSize(WasmOpcodes::MachineTypeFor(type));
+ global_offset = (global_offset + size - 1) & ~(size - 1); // align
+ test_module_.globals.push_back(
+ {type, true, WasmInitExpr(), global_offset, false, false});
+ global_offset += size;
+ // limit number of globals.
+ CHECK_LT(global_offset, kMaxGlobalsSize);
+ return &test_module_.globals.back();
+}
+
+Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
+ Handle<SeqOneByteString> empty_string = Handle<SeqOneByteString>::cast(
+ isolate_->factory()->NewStringFromOneByte({}).ToHandleChecked());
+ // The lifetime of the wasm module is tied to this object's, and we cannot
+ // rely on the mechanics of Managed<T>.
+ Handle<Foreign> module_wrapper = isolate_->factory()->NewForeign(
+ reinterpret_cast<Address>(&test_module_ptr_));
+ Handle<Script> script =
+ isolate_->factory()->NewScript(isolate_->factory()->empty_string());
+ script->set_type(Script::TYPE_WASM);
+ Handle<WasmSharedModuleData> shared_module_data =
+ WasmSharedModuleData::New(isolate_, module_wrapper, empty_string, script,
+ Handle<ByteArray>::null());
+ Handle<FixedArray> code_table = isolate_->factory()->NewFixedArray(0);
+ Handle<FixedArray> export_wrappers = isolate_->factory()->NewFixedArray(0);
+ Handle<WasmCompiledModule> compiled_module = WasmCompiledModule::New(
+ isolate_, shared_module_data, code_table, export_wrappers,
+ function_tables_, signature_tables_);
+ // This method is called when we initialize TestEnvironment. We don't
+ // have a memory yet, so we won't create it here. We'll update the
+ // interpreter when we get a memory. We do have globals, though.
+ WasmCompiledModule::recreate_globals_start(
+ compiled_module, isolate_->factory(),
+ reinterpret_cast<size_t>(globals_data_));
+ Handle<FixedArray> weak_exported = isolate_->factory()->NewFixedArray(0);
+ compiled_module->set_weak_exported_functions(weak_exported);
+ DCHECK(WasmCompiledModule::IsWasmCompiledModule(*compiled_module));
+ script->set_wasm_compiled_module(*compiled_module);
+ return WasmInstanceObject::New(isolate_, compiled_module);
+}
+
+void TestBuildingGraph(
+ Zone* zone, compiler::JSGraph* jsgraph, compiler::ModuleEnv* module,
+ FunctionSig* sig, compiler::SourcePositionTable* source_position_table,
+ const byte* start, const byte* end,
+ compiler::RuntimeExceptionSupport runtime_exception_support) {
+ compiler::WasmGraphBuilder builder(
+ module, zone, jsgraph, CEntryStub(jsgraph->isolate(), 1).GetCode(), sig,
+ source_position_table, runtime_exception_support);
+
+ DecodeResult result =
+ BuildTFGraph(zone->allocator(), &builder, sig, start, end);
+ if (result.failed()) {
+ if (!FLAG_trace_wasm_decoder) {
+ // Retry the compilation with the tracing flag on, to help in debugging.
+ FLAG_trace_wasm_decoder = true;
+ result = BuildTFGraph(zone->allocator(), &builder, sig, start, end);
+ }
+
+ uint32_t pc = result.error_offset();
+ std::ostringstream str;
+ str << "Verification failed; pc = +" << pc
+ << ", msg = " << result.error_msg().c_str();
+ FATAL(str.str().c_str());
+ }
+ builder.LowerInt64();
+ if (!CpuFeatures::SupportsWasmSimd128()) {
+ builder.SimdScalarLoweringForTesting();
+ }
+}
+
+WasmFunctionWrapper::WasmFunctionWrapper(Zone* zone, int num_params)
+ : GraphAndBuilders(zone),
+ inner_code_node_(nullptr),
+ context_address_(nullptr),
+ signature_(nullptr) {
+ // One additional parameter for the pointer to the return value memory.
+ Signature<MachineType>::Builder sig_builder(zone, 1, num_params + 1);
+
+ sig_builder.AddReturn(MachineType::Int32());
+ for (int i = 0; i < num_params + 1; i++) {
+ sig_builder.AddParam(MachineType::Pointer());
+ }
+ signature_ = sig_builder.Build();
+}
+
+void WasmFunctionWrapper::Init(CallDescriptor* descriptor,
+ MachineType return_type,
+ Vector<MachineType> param_types) {
+ DCHECK_NOT_NULL(descriptor);
+ DCHECK_EQ(signature_->parameter_count(), param_types.length() + 1);
+
+ // Create the TF graph for the wrapper.
+
+ // Function, context_address, effect, and control.
+ Node** parameters = zone()->NewArray<Node*>(param_types.length() + 4);
+ graph()->SetStart(graph()->NewNode(common()->Start(7)));
+ Node* effect = graph()->start();
+ int parameter_count = 0;
+
+ // Dummy node which gets replaced in SetInnerCode.
+ inner_code_node_ = graph()->NewNode(common()->Int32Constant(0));
+ parameters[parameter_count++] = inner_code_node_;
+
+ // Dummy node that gets replaced in SetContextAddress.
+ context_address_ = graph()->NewNode(IntPtrConstant(0));
+ parameters[parameter_count++] = context_address_;
+
+ int param_idx = 0;
+ for (MachineType t : param_types) {
+ DCHECK_NE(MachineType::None(), t);
+ parameters[parameter_count] = graph()->NewNode(
+ machine()->Load(t),
+ graph()->NewNode(common()->Parameter(param_idx++), graph()->start()),
+ graph()->NewNode(common()->Int32Constant(0)), effect, graph()->start());
+ effect = parameters[parameter_count++];
+ }
+
+ parameters[parameter_count++] = effect;
+ parameters[parameter_count++] = graph()->start();
+ Node* call =
+ graph()->NewNode(common()->Call(descriptor), parameter_count, parameters);
+
+ if (!return_type.IsNone()) {
+ effect = graph()->NewNode(
+ machine()->Store(compiler::StoreRepresentation(
+ return_type.representation(), WriteBarrierKind::kNoWriteBarrier)),
+ graph()->NewNode(common()->Parameter(param_types.length()),
+ graph()->start()),
+ graph()->NewNode(common()->Int32Constant(0)), call, effect,
+ graph()->start());
+ }
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* r = graph()->NewNode(
+ common()->Return(), zero,
+ graph()->NewNode(common()->Int32Constant(WASM_WRAPPER_RETURN_VALUE)),
+ effect, graph()->start());
+ graph()->SetEnd(graph()->NewNode(common()->End(1), r));
+}
+
+Handle<Code> WasmFunctionWrapper::GetWrapperCode() {
+ if (code_.is_null()) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+
+ CallDescriptor* descriptor =
+ compiler::Linkage::GetSimplifiedCDescriptor(zone(), signature_, true);
+
+ if (kPointerSize == 4) {
+ size_t num_params = signature_->parameter_count();
+ // One additional parameter for the pointer of the return value.
+ Signature<MachineRepresentation>::Builder rep_builder(zone(), 1,
+ num_params + 1);
+
+ rep_builder.AddReturn(MachineRepresentation::kWord32);
+ for (size_t i = 0; i < num_params + 1; i++) {
+ rep_builder.AddParam(MachineRepresentation::kWord32);
+ }
+ compiler::Int64Lowering r(graph(), machine(), common(), zone(),
+ rep_builder.Build());
+ r.LowerGraph();
+ }
+
+ CompilationInfo info(ArrayVector("testing"), isolate, graph()->zone(),
+ Code::STUB);
+ code_ = compiler::Pipeline::GenerateCodeForTesting(&info, descriptor,
+ graph(), nullptr);
+ CHECK(!code_.is_null());
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_opt_code) {
+ OFStream os(stdout);
+ code_->Disassemble("wasm wrapper", os);
+ }
+#endif
+ }
+
+ return code_;
+}
+
+void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
+ size_t locals_size = local_decls.Size();
+ size_t total_size = end - start + locals_size + 1;
+ byte* buffer = static_cast<byte*>(zone()->New(total_size));
+ // Prepend the local decls to the code.
+ local_decls.Emit(buffer);
+ // Emit the code.
+ memcpy(buffer + locals_size, start, end - start);
+ // Append an extra end opcode.
+ buffer[total_size - 1] = kExprEnd;
+
+ start = buffer;
+ end = buffer + total_size;
+
+ CHECK_GE(kMaxInt, end - start);
+ int len = static_cast<int>(end - start);
+ function_->code = {builder_->AddBytes(Vector<const byte>(start, len)),
+ static_cast<uint32_t>(len)};
+
+ if (interpreter_) {
+ // Add the code to the interpreter.
+ interpreter_->SetFunctionCodeForTesting(function_, start, end);
+ }
+
+ Handle<WasmCompiledModule> compiled_module(
+ builder_->instance_object()->compiled_module(), isolate());
+ Handle<SeqOneByteString> wire_bytes(compiled_module->module_bytes(),
+ isolate());
+
+ compiler::ModuleEnv module_env = builder_->CreateModuleEnv();
+ ErrorThrower thrower(isolate(), "WasmFunctionCompiler::Build");
+ ScopedVector<uint8_t> func_wire_bytes(function_->code.length());
+ memcpy(func_wire_bytes.start(),
+ wire_bytes->GetChars() + function_->code.offset(),
+ func_wire_bytes.length());
+ ScopedVector<char> func_name(function_->name.length());
+ memcpy(func_name.start(), wire_bytes->GetChars() + function_->name.offset(),
+ func_name.length());
+
+ FunctionBody func_body{function_->sig, function_->code.offset(),
+ func_wire_bytes.start(), func_wire_bytes.end()};
+ compiler::WasmCompilationUnit unit(
+ isolate(), &module_env, func_body, func_name, function_->func_index,
+ CEntryStub(isolate(), 1).GetCode(), isolate()->counters(),
+ builder_->runtime_exception_support(), builder_->lower_simd());
+ unit.ExecuteCompilation();
+ Handle<Code> code = unit.FinishCompilation(&thrower).ToHandleChecked();
+ CHECK(!thrower.error());
+
+ // Manually add the deoptimization info that would otherwise be added
+ // during instantiation. Deopt data holds <WeakCell<wasm_instance>,
+ // func_index>.
+ DCHECK_EQ(0, code->deoptimization_data()->length());
+ Handle<FixedArray> deopt_data =
+ isolate()->factory()->NewFixedArray(2, TENURED);
+ Handle<Object> weak_instance =
+ isolate()->factory()->NewWeakCell(builder_->instance_object());
+ deopt_data->set(0, *weak_instance);
+ deopt_data->set(1, Smi::FromInt(static_cast<int>(function_index())));
+ code->set_deoptimization_data(*deopt_data);
+
+ // Build the TurboFan graph.
+ builder_->SetFunctionCode(function_index(), code);
+
+ // Add to code table.
+ Handle<FixedArray> code_table = compiled_module->code_table();
+ if (static_cast<int>(function_index()) >= code_table->length()) {
+ Handle<FixedArray> new_arr = isolate()->factory()->NewFixedArray(
+ static_cast<int>(function_index()) + 1);
+ code_table->CopyTo(0, *new_arr, 0, code_table->length());
+ code_table = new_arr;
+ compiled_module->ReplaceCodeTableForTesting(code_table);
+ }
+ DCHECK(code_table->get(static_cast<int>(function_index()))
+ ->IsUndefined(isolate()));
+ code_table->set(static_cast<int>(function_index()), *code);
+ if (trap_handler::UseTrapHandler()) {
+ UnpackAndRegisterProtectedInstructions(isolate(), code_table);
+ }
+}
+
+WasmFunctionCompiler::WasmFunctionCompiler(Zone* zone, FunctionSig* sig,
+ TestingModuleBuilder* builder,
+ const char* name)
+ : GraphAndBuilders(zone),
+ jsgraph(builder->isolate(), this->graph(), this->common(), nullptr,
+ nullptr, this->machine()),
+ sig(sig),
+ descriptor_(nullptr),
+ builder_(builder),
+ local_decls(zone, sig),
+ source_position_table_(this->graph()),
+ interpreter_(builder->interpreter()) {
+ // Get a new function from the testing module.
+ int index = builder->AddFunction(sig, Handle<Code>::null(), name);
+ function_ = builder_->GetFunctionAt(index);
+}
+
+WasmFunctionCompiler::~WasmFunctionCompiler() {
+ if (trap_handler::UseTrapHandler() &&
+ !builder_->GetFunctionCode(function_index()).is_null()) {
+ const int handler_index = builder_->GetFunctionCode(function_index())
+ ->trap_handler_index()
+ ->value();
+ trap_handler::ReleaseHandlerData(handler_index);
+ }
+}
+
+FunctionSig* WasmRunnerBase::CreateSig(MachineType return_type,
+ Vector<MachineType> param_types) {
+ int return_count = return_type.IsNone() ? 0 : 1;
+ int param_count = param_types.length();
+
+ // Allocate storage array in zone.
+ ValueType* sig_types = zone_.NewArray<ValueType>(return_count + param_count);
+
+ // Convert machine types to local types, and check that there are no
+ // MachineType::None()'s in the parameters.
+ int idx = 0;
+ if (return_count) sig_types[idx++] = WasmOpcodes::ValueTypeFor(return_type);
+ for (MachineType param : param_types) {
+ CHECK_NE(MachineType::None(), param);
+ sig_types[idx++] = WasmOpcodes::ValueTypeFor(param);
+ }
+ return new (&zone_) FunctionSig(return_count, param_count, sig_types);
+}
+
+// static
+bool WasmRunnerBase::trap_happened;
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index 1b98d10ec3..562e3b12ce 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -29,6 +29,7 @@
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/zone/accounting-allocator.h"
@@ -39,9 +40,22 @@
#include "test/cctest/compiler/graph-builder-tester.h"
#include "test/common/wasm/flag-utils.h"
-static const uint32_t kMaxFunctions = 10;
+namespace v8 {
+namespace internal {
+namespace wasm {
-enum WasmExecutionMode { kExecuteInterpreted, kExecuteCompiled };
+constexpr uint32_t kMaxFunctions = 10;
+constexpr uint32_t kMaxGlobalsSize = 128;
+
+enum WasmExecutionMode {
+ kExecuteInterpreted,
+ kExecuteCompiled,
+ kExecuteSimdLowered
+};
+
+using compiler::CallDescriptor;
+using compiler::MachineTypeForC;
+using compiler::Node;
// TODO(titzer): check traps more robustly in tests.
// Currently, in tests, we just return 0xdeadbeef from the function in which
@@ -61,72 +75,18 @@ enum WasmExecutionMode { kExecuteInterpreted, kExecuteCompiled };
r.Build(code, code + arraysize(code)); \
} while (false)
-namespace {
-using namespace v8::base;
-using namespace v8::internal;
-using namespace v8::internal::compiler;
-using namespace v8::internal::wasm;
-
-const uint32_t kMaxGlobalsSize = 128;
-
// A buildable ModuleEnv. Globals are pre-set, however, memory and code may be
// progressively added by a test. In turn, we piecemeal update the runtime
// objects, i.e. {WasmInstanceObject}, {WasmCompiledModule} and, if necessary,
// the interpreter.
class TestingModuleBuilder {
public:
- explicit TestingModuleBuilder(Zone* zone,
- WasmExecutionMode mode = kExecuteCompiled)
- : test_module_ptr_(&test_module_),
- isolate_(CcTest::InitIsolateOnce()),
- global_offset(0),
- mem_start_(nullptr),
- mem_size_(0),
- interpreter_(nullptr) {
- WasmJs::Install(isolate_);
- test_module_.globals_size = kMaxGlobalsSize;
- memset(globals_data_, 0, sizeof(globals_data_));
- instance_object_ = InitInstanceObject();
- if (mode == kExecuteInterpreted) {
- interpreter_ = WasmDebugInfo::SetupForTesting(instance_object_);
- }
- }
+ TestingModuleBuilder(Zone*, WasmExecutionMode,
+ compiler::RuntimeExceptionSupport);
void ChangeOriginToAsmjs() { test_module_.set_origin(kAsmJsOrigin); }
- byte* AddMemory(uint32_t size) {
- CHECK(!test_module_.has_memory);
- CHECK_NULL(mem_start_);
- CHECK_EQ(0, mem_size_);
- DCHECK(!instance_object_->has_memory_buffer());
- test_module_.has_memory = true;
- bool enable_guard_regions = EnableGuardRegions() && test_module_.is_wasm();
- uint32_t alloc_size =
- enable_guard_regions ? RoundUp(size, OS::CommitPageSize()) : size;
- Handle<JSArrayBuffer> new_buffer =
- wasm::NewArrayBuffer(isolate_, alloc_size, enable_guard_regions);
- CHECK(!new_buffer.is_null());
- instance_object_->set_memory_buffer(*new_buffer);
- mem_start_ = reinterpret_cast<byte*>(new_buffer->backing_store());
- mem_size_ = size;
- CHECK(size == 0 || mem_start_);
- memset(mem_start_, 0, size);
- Handle<WasmCompiledModule> compiled_module =
- handle(instance_object_->compiled_module());
- Factory* factory = CcTest::i_isolate()->factory();
- // It's not really necessary we recreate the Number objects,
- // if we happened to have one, but this is a reasonable inefficiencly,
- // given this is test.
- WasmCompiledModule::recreate_embedded_mem_size(compiled_module, factory,
- mem_size_);
- WasmCompiledModule::recreate_embedded_mem_start(
- compiled_module, factory, reinterpret_cast<size_t>(mem_start_));
-
- if (interpreter_) {
- interpreter_->UpdateMemory(mem_start_, mem_size_);
- }
- return mem_start_;
- }
+ byte* AddMemory(uint32_t size);
size_t CodeTableLength() const { return function_code_.size(); }
@@ -146,7 +106,7 @@ class TestingModuleBuilder {
byte AddSignature(FunctionSig* sig) {
test_module_.signatures.push_back(sig);
size_t size = test_module_.signatures.size();
- CHECK(size < 127);
+ CHECK_GT(127, size);
return static_cast<byte>(size - 1);
}
@@ -200,126 +160,30 @@ class TestingModuleBuilder {
void SetMaxMemPages(uint32_t maximum_pages) {
test_module_.maximum_pages = maximum_pages;
- }
-
- uint32_t AddFunction(FunctionSig* sig, Handle<Code> code, const char* name) {
- if (test_module_.functions.size() == 0) {
- // TODO(titzer): Reserving space here to avoid the underlying WasmFunction
- // structs from moving.
- test_module_.functions.reserve(kMaxFunctions);
- }
- uint32_t index = static_cast<uint32_t>(test_module_.functions.size());
- test_module_.functions.push_back(
- {sig, index, 0, {0, 0}, {0, 0}, false, false});
- if (name) {
- Vector<const byte> name_vec = Vector<const byte>::cast(CStrVector(name));
- test_module_.functions.back().name = {
- AddBytes(name_vec), static_cast<uint32_t>(name_vec.length())};
+ if (instance_object()->has_memory_object()) {
+ instance_object()->memory_object()->set_maximum_pages(maximum_pages);
}
- function_code_.push_back(code);
- if (interpreter_) {
- interpreter_->AddFunctionForTesting(&test_module_.functions.back());
- }
- DCHECK_LT(index, kMaxFunctions); // limited for testing.
- return index;
}
- uint32_t AddJsFunction(FunctionSig* sig, const char* source) {
- Handle<JSFunction> jsfunc = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
- *v8::Local<v8::Function>::Cast(CompileRun(source))));
- uint32_t index = AddFunction(sig, Handle<Code>::null(), nullptr);
- Handle<Code> code = CompileWasmToJSWrapper(
- isolate_, jsfunc, sig, index, Handle<String>::null(),
- Handle<String>::null(), test_module_.origin());
- function_code_[index] = code;
- return index;
- }
+ void SetHasSharedMemory() { test_module_.has_shared_memory = true; }
- Handle<JSFunction> WrapCode(uint32_t index) {
- // Wrap the code so it can be called as a JS function.
- Handle<Code> code = function_code_[index];
- Handle<Code> ret_code =
- compiler::CompileJSToWasmWrapper(isolate_, &test_module_, code, index);
- Handle<JSFunction> ret = WasmExportedFunction::New(
- isolate_, instance_object(), MaybeHandle<String>(),
- static_cast<int>(index),
- static_cast<int>(test_module_.functions[index].sig->parameter_count()),
- ret_code);
-
- // Add weak reference to exported functions.
- Handle<WasmCompiledModule> compiled_module(
- instance_object()->compiled_module(), isolate_);
- Handle<FixedArray> old_arr = compiled_module->weak_exported_functions();
- Handle<FixedArray> new_arr =
- isolate_->factory()->NewFixedArray(old_arr->length() + 1);
- old_arr->CopyTo(0, *new_arr, 0, old_arr->length());
- Handle<WeakCell> weak_fn = isolate_->factory()->NewWeakCell(ret);
- new_arr->set(old_arr->length(), *weak_fn);
- compiled_module->set_weak_exported_functions(new_arr);
-
- return ret;
- }
+ uint32_t AddFunction(FunctionSig* sig, Handle<Code> code, const char* name);
+
+ uint32_t AddJsFunction(FunctionSig* sig, const char* source,
+ Handle<FixedArray> js_imports_table);
+
+ Handle<JSFunction> WrapCode(uint32_t index);
void SetFunctionCode(uint32_t index, Handle<Code> code) {
function_code_[index] = code;
}
void AddIndirectFunctionTable(uint16_t* function_indexes,
- uint32_t table_size) {
- test_module_.function_tables.emplace_back();
- WasmIndirectFunctionTable& table = test_module_.function_tables.back();
- table.initial_size = table_size;
- table.maximum_size = table_size;
- table.has_maximum_size = true;
- for (uint32_t i = 0; i < table_size; ++i) {
- table.values.push_back(function_indexes[i]);
- table.map.FindOrInsert(test_module_.functions[function_indexes[i]].sig);
- }
-
- function_tables_.push_back(
- isolate_->global_handles()
- ->Create(*isolate_->factory()->NewFixedArray(table_size))
- .address());
- signature_tables_.push_back(
- isolate_->global_handles()
- ->Create(*isolate_->factory()->NewFixedArray(table_size))
- .address());
- }
+ uint32_t table_size);
- void PopulateIndirectFunctionTable() {
- if (interpret()) return;
- // Initialize the fixed arrays in instance->function_tables.
- for (uint32_t i = 0; i < function_tables_.size(); i++) {
- WasmIndirectFunctionTable& table = test_module_.function_tables[i];
- Handle<FixedArray> function_table(
- reinterpret_cast<FixedArray**>(function_tables_[i]));
- Handle<FixedArray> signature_table(
- reinterpret_cast<FixedArray**>(signature_tables_[i]));
- int table_size = static_cast<int>(table.values.size());
- for (int j = 0; j < table_size; j++) {
- WasmFunction& function = test_module_.functions[table.values[j]];
- signature_table->set(j, Smi::FromInt(table.map.Find(function.sig)));
- function_table->set(j, *function_code_[function.func_index]);
- }
- }
- }
+ void PopulateIndirectFunctionTable();
- uint32_t AddBytes(Vector<const byte> bytes) {
- Handle<SeqOneByteString> old_bytes(
- instance_object_->compiled_module()->module_bytes(), isolate_);
- uint32_t old_size = static_cast<uint32_t>(old_bytes->length());
- // Avoid placing strings at offset 0, this might be interpreted as "not
- // set", e.g. for function names.
- uint32_t bytes_offset = old_size ? old_size : 1;
- ScopedVector<byte> new_bytes(bytes_offset + bytes.length());
- memcpy(new_bytes.start(), old_bytes->GetChars(), old_size);
- memcpy(new_bytes.start() + bytes_offset, bytes.start(), bytes.length());
- Handle<SeqOneByteString> new_bytes_str = Handle<SeqOneByteString>::cast(
- isolate_->factory()->NewStringFromOneByte(new_bytes).ToHandleChecked());
- instance_object_->compiled_module()->shared()->set_module_bytes(
- *new_bytes_str);
- return bytes_offset;
- }
+ uint32_t AddBytes(Vector<const byte> bytes);
WasmFunction* GetFunctionAt(int index) {
return &test_module_.functions[index];
@@ -327,6 +191,7 @@ class TestingModuleBuilder {
WasmInterpreter* interpreter() { return interpreter_; }
bool interpret() { return interpreter_ != nullptr; }
+ bool lower_simd() { return lower_simd_; }
Isolate* isolate() { return isolate_; }
Handle<WasmInstanceObject> instance_object() { return instance_object_; }
Handle<Code> GetFunctionCode(int index) { return function_code_[index]; }
@@ -335,23 +200,10 @@ class TestingModuleBuilder {
}
Address globals_start() { return reinterpret_cast<Address>(globals_data_); }
- compiler::ModuleEnv CreateModuleEnv() {
- std::vector<SignatureMap*> signature_maps;
- for (size_t i = 0; i < test_module_.function_tables.size(); i++) {
- auto& function_table = test_module_.function_tables[i];
- signature_maps.push_back(&function_table.map);
- }
- return {
- &test_module_,
- function_tables_,
- signature_tables_,
- signature_maps,
- function_code_,
- Handle<Code>::null(),
- reinterpret_cast<uintptr_t>(mem_start_),
- mem_size_,
- reinterpret_cast<uintptr_t>(globals_data_),
- };
+ compiler::ModuleEnv CreateModuleEnv();
+
+ compiler::RuntimeExceptionSupport runtime_exception_support() const {
+ return runtime_exception_support_;
}
private:
@@ -367,143 +219,26 @@ class TestingModuleBuilder {
V8_ALIGNED(8) byte globals_data_[kMaxGlobalsSize];
WasmInterpreter* interpreter_;
Handle<WasmInstanceObject> instance_object_;
+ compiler::RuntimeExceptionSupport runtime_exception_support_;
+ bool lower_simd_;
- const WasmGlobal* AddGlobal(ValueType type) {
- byte size = WasmOpcodes::MemSize(WasmOpcodes::MachineTypeFor(type));
- global_offset = (global_offset + size - 1) & ~(size - 1); // align
- test_module_.globals.push_back(
- {type, true, WasmInitExpr(), global_offset, false, false});
- global_offset += size;
- // limit number of globals.
- CHECK_LT(global_offset, kMaxGlobalsSize);
- return &test_module_.globals.back();
- }
+ const WasmGlobal* AddGlobal(ValueType type);
- Handle<WasmInstanceObject> InitInstanceObject() {
- Handle<SeqOneByteString> empty_string = Handle<SeqOneByteString>::cast(
- isolate_->factory()->NewStringFromOneByte({}).ToHandleChecked());
- // The lifetime of the wasm module is tied to this object's, and we cannot
- // rely on the mechanics of Managed<T>.
- Handle<Foreign> module_wrapper = isolate_->factory()->NewForeign(
- reinterpret_cast<Address>(&test_module_ptr_));
- Handle<Script> script =
- isolate_->factory()->NewScript(isolate_->factory()->empty_string());
- script->set_type(Script::TYPE_WASM);
- Handle<WasmSharedModuleData> shared_module_data =
- WasmSharedModuleData::New(isolate_, module_wrapper, empty_string,
- script, Handle<ByteArray>::null());
- Handle<FixedArray> code_table = isolate_->factory()->NewFixedArray(0);
- Handle<FixedArray> export_wrappers = isolate_->factory()->NewFixedArray(0);
- Handle<WasmCompiledModule> compiled_module = WasmCompiledModule::New(
- isolate_, shared_module_data, code_table, export_wrappers,
- function_tables_, signature_tables_);
- // This method is called when we initialize TestEnvironment. We don't
- // have a memory yet, so we won't create it here. We'll update the
- // interpreter when we get a memory. We do have globals, though.
- WasmCompiledModule::recreate_globals_start(
- compiled_module, isolate_->factory(),
- reinterpret_cast<size_t>(globals_data_));
- Handle<FixedArray> weak_exported = isolate_->factory()->NewFixedArray(0);
- compiled_module->set_weak_exported_functions(weak_exported);
- DCHECK(WasmCompiledModule::IsWasmCompiledModule(*compiled_module));
- return WasmInstanceObject::New(isolate_, compiled_module);
- }
+ Handle<WasmInstanceObject> InitInstanceObject();
};
-inline void TestBuildingGraph(Zone* zone, JSGraph* jsgraph, ModuleEnv* module,
- FunctionSig* sig,
- SourcePositionTable* source_position_table,
- const byte* start, const byte* end,
- bool runtime_exception_support = false) {
- compiler::WasmGraphBuilder builder(
- module, zone, jsgraph, CEntryStub(jsgraph->isolate(), 1).GetCode(), sig,
- source_position_table);
- builder.SetRuntimeExceptionSupport(runtime_exception_support);
-
- DecodeResult result =
- BuildTFGraph(zone->allocator(), &builder, sig, start, end);
- if (result.failed()) {
- if (!FLAG_trace_wasm_decoder) {
- // Retry the compilation with the tracing flag on, to help in debugging.
- FLAG_trace_wasm_decoder = true;
- result = BuildTFGraph(zone->allocator(), &builder, sig, start, end);
- }
+void TestBuildingGraph(
+ Zone* zone, compiler::JSGraph* jsgraph, compiler::ModuleEnv* module,
+ FunctionSig* sig, compiler::SourcePositionTable* source_position_table,
+ const byte* start, const byte* end,
+ compiler::RuntimeExceptionSupport runtime_exception_support);
- uint32_t pc = result.error_offset();
- std::ostringstream str;
- str << "Verification failed; pc = +" << pc
- << ", msg = " << result.error_msg().c_str();
- FATAL(str.str().c_str());
- }
- builder.LowerInt64();
- if (!CpuFeatures::SupportsWasmSimd128()) {
- builder.SimdScalarLoweringForTesting();
- }
-}
-
-class WasmFunctionWrapper : private GraphAndBuilders {
+class WasmFunctionWrapper : private compiler::GraphAndBuilders {
public:
- explicit WasmFunctionWrapper(Zone* zone, int num_params)
- : GraphAndBuilders(zone), inner_code_node_(nullptr), signature_(nullptr) {
- // One additional parameter for the pointer to the return value memory.
- Signature<MachineType>::Builder sig_builder(zone, 1, num_params + 1);
-
- sig_builder.AddReturn(MachineType::Int32());
- for (int i = 0; i < num_params + 1; i++) {
- sig_builder.AddParam(MachineType::Pointer());
- }
- signature_ = sig_builder.Build();
- }
+ WasmFunctionWrapper(Zone* zone, int num_params);
void Init(CallDescriptor* descriptor, MachineType return_type,
- Vector<MachineType> param_types) {
- DCHECK_NOT_NULL(descriptor);
- DCHECK_EQ(signature_->parameter_count(), param_types.length() + 1);
-
- // Create the TF graph for the wrapper.
-
- // Function, effect, and control.
- Node** parameters = zone()->NewArray<Node*>(param_types.length() + 3);
- graph()->SetStart(graph()->NewNode(common()->Start(6)));
- Node* effect = graph()->start();
- int parameter_count = 0;
-
- // Dummy node which gets replaced in SetInnerCode.
- inner_code_node_ = graph()->NewNode(common()->Int32Constant(0));
- parameters[parameter_count++] = inner_code_node_;
-
- int param_idx = 0;
- for (MachineType t : param_types) {
- DCHECK_NE(MachineType::None(), t);
- parameters[parameter_count] = graph()->NewNode(
- machine()->Load(t),
- graph()->NewNode(common()->Parameter(param_idx++), graph()->start()),
- graph()->NewNode(common()->Int32Constant(0)), effect,
- graph()->start());
- effect = parameters[parameter_count++];
- }
-
- parameters[parameter_count++] = effect;
- parameters[parameter_count++] = graph()->start();
- Node* call = graph()->NewNode(common()->Call(descriptor), parameter_count,
- parameters);
-
- if (!return_type.IsNone()) {
- effect = graph()->NewNode(
- machine()->Store(StoreRepresentation(
- return_type.representation(), WriteBarrierKind::kNoWriteBarrier)),
- graph()->NewNode(common()->Parameter(param_types.length()),
- graph()->start()),
- graph()->NewNode(common()->Int32Constant(0)), call, effect,
- graph()->start());
- }
- Node* zero = graph()->NewNode(common()->Int32Constant(0));
- Node* r = graph()->NewNode(
- common()->Return(), zero,
- graph()->NewNode(common()->Int32Constant(WASM_WRAPPER_RETURN_VALUE)),
- effect, graph()->start());
- graph()->SetEnd(graph()->NewNode(common()->End(1), r));
- }
+ Vector<MachineType> param_types);
template <typename ReturnType, typename... ParamTypes>
void Init(CallDescriptor* descriptor) {
@@ -515,52 +250,28 @@ class WasmFunctionWrapper : private GraphAndBuilders {
}
void SetInnerCode(Handle<Code> code_handle) {
- NodeProperties::ChangeOp(inner_code_node_,
- common()->HeapConstant(code_handle));
+ compiler::NodeProperties::ChangeOp(inner_code_node_,
+ common()->HeapConstant(code_handle));
}
- Handle<Code> GetWrapperCode() {
- if (code_.is_null()) {
- Isolate* isolate = CcTest::InitIsolateOnce();
-
- CallDescriptor* descriptor =
- Linkage::GetSimplifiedCDescriptor(zone(), signature_, true);
-
- if (kPointerSize == 4) {
- size_t num_params = signature_->parameter_count();
- // One additional parameter for the pointer of the return value.
- Signature<MachineRepresentation>::Builder rep_builder(zone(), 1,
- num_params + 1);
-
- rep_builder.AddReturn(MachineRepresentation::kWord32);
- for (size_t i = 0; i < num_params + 1; i++) {
- rep_builder.AddParam(MachineRepresentation::kWord32);
- }
- Int64Lowering r(graph(), machine(), common(), zone(),
- rep_builder.Build());
- r.LowerGraph();
- }
-
- CompilationInfo info(ArrayVector("testing"), isolate, graph()->zone(),
- Code::ComputeFlags(Code::STUB));
- code_ =
- Pipeline::GenerateCodeForTesting(&info, descriptor, graph(), nullptr);
- CHECK(!code_.is_null());
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code) {
- OFStream os(stdout);
- code_->Disassemble("wasm wrapper", os);
- }
-#endif
- }
+ const compiler::Operator* IntPtrConstant(intptr_t value) {
+ return machine()->Is32()
+ ? common()->Int32Constant(static_cast<int32_t>(value))
+ : common()->Int64Constant(static_cast<int64_t>(value));
+ }
- return code_;
+ void SetContextAddress(Address value) {
+ compiler::NodeProperties::ChangeOp(
+ context_address_, IntPtrConstant(reinterpret_cast<uintptr_t>(value)));
}
+ Handle<Code> GetWrapperCode();
+
Signature<MachineType>* signature() const { return signature_; }
private:
Node* inner_code_node_;
+ Node* context_address_;
Handle<Code> code_;
Signature<MachineType>* signature_;
};
@@ -568,13 +279,11 @@ class WasmFunctionWrapper : private GraphAndBuilders {
// A helper for compiling wasm functions for testing.
// It contains the internal state for compilation (i.e. TurboFan graph) and
// interpretation (by adding to the interpreter manually).
-class WasmFunctionCompiler : private GraphAndBuilders {
+class WasmFunctionCompiler : public compiler::GraphAndBuilders {
public:
+ ~WasmFunctionCompiler();
+
Isolate* isolate() { return builder_->isolate(); }
- Graph* graph() const { return main_graph_; }
- Zone* zone() const { return graph()->zone(); }
- CommonOperatorBuilder* common() { return &main_common_; }
- MachineOperatorBuilder* machine() { return &main_machine_; }
CallDescriptor* descriptor() {
if (descriptor_ == nullptr) {
descriptor_ = compiler::GetWasmCallDescriptor(zone(), sig);
@@ -583,56 +292,7 @@ class WasmFunctionCompiler : private GraphAndBuilders {
}
uint32_t function_index() { return function_->func_index; }
- void Build(const byte* start, const byte* end) {
- size_t locals_size = local_decls.Size();
- size_t total_size = end - start + locals_size + 1;
- byte* buffer = static_cast<byte*>(zone()->New(total_size));
- // Prepend the local decls to the code.
- local_decls.Emit(buffer);
- // Emit the code.
- memcpy(buffer + locals_size, start, end - start);
- // Append an extra end opcode.
- buffer[total_size - 1] = kExprEnd;
-
- start = buffer;
- end = buffer + total_size;
-
- CHECK_GE(kMaxInt, end - start);
- int len = static_cast<int>(end - start);
- function_->code = {builder_->AddBytes(Vector<const byte>(start, len)),
- static_cast<uint32_t>(len)};
-
- if (interpreter_) {
- // Add the code to the interpreter.
- interpreter_->SetFunctionCodeForTesting(function_, start, end);
- }
-
- // Build the TurboFan graph.
- compiler::ModuleEnv module_env = builder_->CreateModuleEnv();
- TestBuildingGraph(zone(), &jsgraph, &module_env, sig,
- &source_position_table_, start, end,
- runtime_exception_support_);
- Handle<Code> code = Compile();
- builder_->SetFunctionCode(function_index(), code);
-
- // Add to code table.
- Handle<WasmCompiledModule> compiled_module(
- builder_->instance_object()->compiled_module(), isolate());
- Handle<FixedArray> code_table = compiled_module->code_table();
- if (static_cast<int>(function_index()) >= code_table->length()) {
- Handle<FixedArray> new_arr = isolate()->factory()->NewFixedArray(
- static_cast<int>(function_index()) + 1);
- code_table->CopyTo(0, *new_arr, 0, code_table->length());
- code_table = new_arr;
- compiled_module->ReplaceCodeTableForTesting(code_table);
- }
- DCHECK(code_table->get(static_cast<int>(function_index()))
- ->IsUndefined(isolate()));
- code_table->set(static_cast<int>(function_index()), *code);
- if (trap_handler::UseTrapHandler()) {
- UnpackAndRegisterProtectedInstructions(isolate(), code_table);
- }
- }
+ void Build(const byte* start, const byte* end);
byte AllocateLocal(ValueType type) {
uint32_t index = local_decls.AddLocals(1, type);
@@ -646,87 +306,29 @@ class WasmFunctionCompiler : private GraphAndBuilders {
private:
friend class WasmRunnerBase;
- explicit WasmFunctionCompiler(Zone* zone, FunctionSig* sig,
- TestingModuleBuilder* builder, const char* name,
- bool runtime_exception_support)
- : GraphAndBuilders(zone),
- jsgraph(builder->isolate(), this->graph(), this->common(), nullptr,
- nullptr, this->machine()),
- sig(sig),
- descriptor_(nullptr),
- builder_(builder),
- local_decls(zone, sig),
- source_position_table_(this->graph()),
- interpreter_(builder->interpreter()),
- runtime_exception_support_(runtime_exception_support) {
- // Get a new function from the testing module.
- int index = builder->AddFunction(sig, Handle<Code>::null(), name);
- function_ = builder_->GetFunctionAt(index);
- }
+ WasmFunctionCompiler(Zone* zone, FunctionSig* sig,
+ TestingModuleBuilder* builder, const char* name);
- Handle<Code> Compile() {
- CallDescriptor* desc = descriptor();
- if (kPointerSize == 4) {
- desc = compiler::GetI32WasmCallDescriptor(this->zone(), desc);
- }
- EmbeddedVector<char, 16> comp_name;
- int comp_name_len = SNPrintF(comp_name, "wasm#%u", this->function_index());
- comp_name.Truncate(comp_name_len);
- CompilationInfo info(comp_name, this->isolate(), this->zone(),
- Code::ComputeFlags(Code::WASM_FUNCTION));
- std::unique_ptr<CompilationJob> job(Pipeline::NewWasmCompilationJob(
- &info, &jsgraph, desc, &source_position_table_, nullptr,
- ModuleOrigin::kAsmJsOrigin));
- if (job->ExecuteJob() != CompilationJob::SUCCEEDED ||
- job->FinalizeJob() != CompilationJob::SUCCEEDED)
- return Handle<Code>::null();
-
- Handle<Code> code = info.code();
-
- // Deopt data holds <WeakCell<wasm_instance>, func_index>.
- DCHECK(code->deoptimization_data() == nullptr ||
- code->deoptimization_data()->length() == 0);
- Handle<FixedArray> deopt_data =
- isolate()->factory()->NewFixedArray(2, TENURED);
- Handle<Object> weak_instance =
- isolate()->factory()->NewWeakCell(builder_->instance_object());
- deopt_data->set(0, *weak_instance);
- deopt_data->set(1, Smi::FromInt(static_cast<int>(function_index())));
- code->set_deoptimization_data(*deopt_data);
-
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code) {
- OFStream os(stdout);
- code->Disassemble("wasm code", os);
- }
-#endif
-
- return code;
- }
-
- JSGraph jsgraph;
+ compiler::JSGraph jsgraph;
FunctionSig* sig;
// The call descriptor is initialized when the function is compiled.
CallDescriptor* descriptor_;
TestingModuleBuilder* builder_;
- Vector<const char> debug_name_;
WasmFunction* function_;
LocalDeclEncoder local_decls;
- SourcePositionTable source_position_table_;
+ compiler::SourcePositionTable source_position_table_;
WasmInterpreter* interpreter_;
- bool runtime_exception_support_ = false;
};
// A helper class to build a module around Wasm bytecode, generate machine
// code, and run that code.
class WasmRunnerBase : public HandleAndZoneScope {
public:
- explicit WasmRunnerBase(WasmExecutionMode execution_mode, int num_params,
- bool runtime_exception_support)
+ WasmRunnerBase(WasmExecutionMode execution_mode, int num_params,
+ compiler::RuntimeExceptionSupport runtime_exception_support)
: zone_(&allocator_, ZONE_NAME),
- builder_(&zone_, execution_mode),
- wrapper_(&zone_, num_params),
- runtime_exception_support_(runtime_exception_support) {}
+ builder_(&zone_, execution_mode, runtime_exception_support),
+ wrapper_(&zone_, num_params) {}
// Builds a graph from the given Wasm code and generates the machine
// code and call wrapper for that graph. This method must not be called
@@ -749,8 +351,8 @@ class WasmRunnerBase : public HandleAndZoneScope {
// Returns the index of the previously built function.
WasmFunctionCompiler& NewFunction(FunctionSig* sig,
const char* name = nullptr) {
- functions_.emplace_back(new WasmFunctionCompiler(
- &zone_, sig, &builder_, name, runtime_exception_support_));
+ functions_.emplace_back(
+ new WasmFunctionCompiler(&zone_, sig, &builder_, name));
return *functions_.back();
}
@@ -781,24 +383,7 @@ class WasmRunnerBase : public HandleAndZoneScope {
private:
FunctionSig* CreateSig(MachineType return_type,
- Vector<MachineType> param_types) {
- int return_count = return_type.IsNone() ? 0 : 1;
- int param_count = param_types.length();
-
- // Allocate storage array in zone.
- ValueType* sig_types =
- zone_.NewArray<ValueType>(return_count + param_count);
-
- // Convert machine types to local types, and check that there are no
- // MachineType::None()'s in the parameters.
- int idx = 0;
- if (return_count) sig_types[idx++] = WasmOpcodes::ValueTypeFor(return_type);
- for (MachineType param : param_types) {
- CHECK_NE(MachineType::None(), param);
- sig_types[idx++] = WasmOpcodes::ValueTypeFor(param);
- }
- return new (&zone_) FunctionSig(return_count, param_count, sig_types);
- }
+ Vector<MachineType> param_types);
protected:
v8::internal::AccountingAllocator allocator_;
@@ -808,7 +393,6 @@ class WasmRunnerBase : public HandleAndZoneScope {
WasmFunctionWrapper wrapper_;
bool compiled_ = false;
bool possible_nondeterminism_ = false;
- bool runtime_exception_support_ = false;
public:
// This field has to be static. Otherwise, gcc complains about the use in
@@ -819,9 +403,10 @@ class WasmRunnerBase : public HandleAndZoneScope {
template <typename ReturnType, typename... ParamTypes>
class WasmRunner : public WasmRunnerBase {
public:
- explicit WasmRunner(WasmExecutionMode execution_mode,
- const char* main_fn_name = "main",
- bool runtime_exception_support = false)
+ WasmRunner(WasmExecutionMode execution_mode,
+ const char* main_fn_name = "main",
+ compiler::RuntimeExceptionSupport runtime_exception_support =
+ compiler::kNoRuntimeExceptionSupport)
: WasmRunnerBase(execution_mode, sizeof...(ParamTypes),
runtime_exception_support) {
NewFunction<ReturnType, ParamTypes...>(main_fn_name);
@@ -843,8 +428,13 @@ class WasmRunner : public WasmRunnerBase {
set_trap_callback_for_testing(trap_callback);
wrapper_.SetInnerCode(builder_.GetFunctionCode(0));
- CodeRunner<int32_t> runner(CcTest::InitIsolateOnce(),
- wrapper_.GetWrapperCode(), wrapper_.signature());
+ if (builder().instance_object()->has_memory_object()) {
+ wrapper_.SetContextAddress(reinterpret_cast<Address>(
+ builder().instance_object()->wasm_context()));
+ }
+ compiler::CodeRunner<int32_t> runner(CcTest::InitIsolateOnce(),
+ wrapper_.GetWrapperCode(),
+ wrapper_.signature());
int32_t result = runner.Call(static_cast<void*>(&p)...,
static_cast<void*>(&return_value));
CHECK_EQ(WASM_WRAPPER_RETURN_VALUE, result);
@@ -875,9 +465,6 @@ class WasmRunner : public WasmRunnerBase {
}
};
-// Declare static variable.
-bool WasmRunnerBase::trap_happened;
-
// A macro to define tests that run in different engine configurations.
#define WASM_EXEC_TEST(name) \
void RunWasm_##name(WasmExecutionMode execution_mode); \
@@ -901,6 +488,8 @@ bool WasmRunnerBase::trap_happened;
} \
void RunWasm_##name(WasmExecutionMode execution_mode)
-} // namespace
+} // namespace wasm
+} // namespace internal
+} // namespace v8
#endif
diff --git a/deps/v8/test/common/wasm/flag-utils.h b/deps/v8/test/common/wasm/flag-utils.h
index 9bc5981dcd..b9675a2130 100644
--- a/deps/v8/test/common/wasm/flag-utils.h
+++ b/deps/v8/test/common/wasm/flag-utils.h
@@ -21,8 +21,10 @@ class FlagScope {
T previous_value_;
};
-#define EXPERIMENTAL_FLAG_SCOPE(flag) \
- FlagScope<bool> __scope_##__LINE__(&FLAG_experimental_wasm_##flag, true)
+#define FLAG_SCOPE(flag) \
+ FlagScope<bool> __scope_##flag##__LINE__(&FLAG_##flag, true)
+
+#define EXPERIMENTAL_FLAG_SCOPE(flag) FLAG_SCOPE(experimental_wasm_##flag)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index 0586d075a6..2d15bbc815 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -583,4 +583,21 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_BR_TABLEV(val, key, count, ...) \
val, key, kExprBrTable, U32V_1(count), __VA_ARGS__
+//------------------------------------------------------------------------------
+// Atomic Operations.
+//------------------------------------------------------------------------------
+#define WASM_ATOMICS_OP(op) kAtomicPrefix, static_cast<byte>(op)
+#define WASM_ATOMICS_BINOP(op, x, y, representation) \
+ x, y, WASM_ATOMICS_OP(op), \
+ static_cast<byte>(ElementSizeLog2Of(representation)), ZERO_OFFSET
+#define WASM_ATOMICS_TERNARY_OP(op, x, y, z, representation) \
+ x, y, z, WASM_ATOMICS_OP(op), \
+ static_cast<byte>(ElementSizeLog2Of(representation)), ZERO_OFFSET
+#define WASM_ATOMICS_LOAD_OP(op, x, representation) \
+ x, WASM_ATOMICS_OP(op), \
+ static_cast<byte>(ElementSizeLog2Of(representation)), ZERO_OFFSET
+#define WASM_ATOMICS_STORE_OP(op, x, y, representation) \
+ x, y, WASM_ATOMICS_OP(op), \
+ static_cast<byte>(ElementSizeLog2Of(representation)), ZERO_OFFSET
+
#endif // V8_WASM_MACRO_GEN_H_
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index 70beed5fda..d2f5e68fef 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -9,6 +9,7 @@
#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/property-descriptor.h"
+#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-js.h"
@@ -42,6 +43,59 @@ std::unique_ptr<WasmModule> DecodeWasmModuleForTesting(
return std::move(decoding_result.val);
}
+bool InterpretWasmModuleForTesting(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ const char* name, size_t argc,
+ WasmValue* args) {
+ MaybeHandle<WasmExportedFunction> maybe_function =
+ GetExportedFunction(isolate, instance, "main");
+ Handle<WasmExportedFunction> function;
+ if (!maybe_function.ToHandle(&function)) {
+ return false;
+ }
+ int function_index = function->function_index();
+ FunctionSig* signature = instance->module()->functions[function_index].sig;
+ size_t param_count = signature->parameter_count();
+ std::unique_ptr<WasmValue[]> arguments(new WasmValue[param_count]);
+
+ memcpy(arguments.get(), args, std::min(param_count, argc));
+
+ // Fill the parameters up with default values.
+ for (size_t i = argc; i < param_count; ++i) {
+ switch (signature->GetParam(i)) {
+ case MachineRepresentation::kWord32:
+ arguments[i] = WasmValue(int32_t{0});
+ break;
+ case MachineRepresentation::kWord64:
+ arguments[i] = WasmValue(int64_t{0});
+ break;
+ case MachineRepresentation::kFloat32:
+ arguments[i] = WasmValue(0.0f);
+ break;
+ case MachineRepresentation::kFloat64:
+ arguments[i] = WasmValue(0.0);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // Don't execute more than 16k steps.
+ constexpr int kMaxNumSteps = 16 * 1024;
+
+ Zone zone(isolate->allocator(), ZONE_NAME);
+
+ WasmInterpreter* interpreter = WasmDebugInfo::SetupForTesting(instance);
+ WasmInterpreter::HeapObjectsScope heap_objects_scope(interpreter, instance);
+ WasmInterpreter::Thread* thread = interpreter->GetThread(0);
+ thread->Reset();
+ thread->InitFrame(&instance->module()->functions[function_index],
+ arguments.get());
+ WasmInterpreter::State interpreter_result = thread->Run(kMaxNumSteps);
+
+ return interpreter_result != WasmInterpreter::PAUSED;
+}
+
int32_t RunWasmModuleForTesting(Isolate* isolate, Handle<JSObject> instance,
int argc, Handle<Object> argv[]) {
ErrorThrower thrower(isolate, "RunWasmModule");
@@ -111,9 +165,9 @@ int32_t InterpretWasmModule(Isolate* isolate,
}
}
-int32_t CallWasmFunctionForTesting(Isolate* isolate, Handle<JSObject> instance,
- ErrorThrower* thrower, const char* name,
- int argc, Handle<Object> argv[]) {
+MaybeHandle<WasmExportedFunction> GetExportedFunction(Isolate* isolate,
+ Handle<JSObject> instance,
+ const char* name) {
Handle<JSObject> exports_object;
Handle<Name> exports = isolate->factory()->InternalizeUtf8String("exports");
exports_object = Handle<JSObject>::cast(
@@ -123,9 +177,21 @@ int32_t CallWasmFunctionForTesting(Isolate* isolate, Handle<JSObject> instance,
PropertyDescriptor desc;
Maybe<bool> property_found = JSReceiver::GetOwnPropertyDescriptor(
isolate, exports_object, main_name, &desc);
- if (!property_found.FromMaybe(false)) return -1;
+ if (!property_found.FromMaybe(false)) return {};
+ if (!desc.value()->IsJSFunction()) return {};
- Handle<JSFunction> main_export = Handle<JSFunction>::cast(desc.value());
+ return Handle<WasmExportedFunction>::cast(desc.value());
+}
+
+int32_t CallWasmFunctionForTesting(Isolate* isolate, Handle<JSObject> instance,
+ ErrorThrower* thrower, const char* name,
+ int argc, Handle<Object> argv[]) {
+ MaybeHandle<WasmExportedFunction> maybe_export =
+ GetExportedFunction(isolate, instance, name);
+ Handle<WasmExportedFunction> main_export;
+ if (!maybe_export.ToHandle(&main_export)) {
+ return -1;
+ }
// Call the JS function.
Handle<Object> undefined = isolate->factory()->undefined_value();
@@ -152,7 +218,7 @@ int32_t CallWasmFunctionForTesting(Isolate* isolate, Handle<JSObject> instance,
}
void SetupIsolateForWasmModule(Isolate* isolate) {
- WasmJs::Install(isolate);
+ WasmJs::Install(isolate, true);
}
} // namespace testing
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.h b/deps/v8/test/common/wasm/wasm-module-runner.h
index cd3e5a1af9..4fa2ca67c1 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.h
+++ b/deps/v8/test/common/wasm/wasm-module-runner.h
@@ -27,6 +27,12 @@ std::unique_ptr<WasmModule> DecodeWasmModuleForTesting(
Isolate* isolate, ErrorThrower* thrower, const byte* module_start,
const byte* module_end, ModuleOrigin origin, bool verify_functions = false);
+// Returns a MaybeHandle to the JsToWasm wrapper of the wasm function exported
+// with the given name by the provided instance.
+MaybeHandle<WasmExportedFunction> GetExportedFunction(Isolate* isolate,
+ Handle<JSObject> instance,
+ const char* name);
+
// Call an exported wasm function by name. Returns -1 if the export does not
// exist or throws an error. Errors are cleared from the isolate before
// returning.
@@ -34,6 +40,15 @@ int32_t CallWasmFunctionForTesting(Isolate* isolate, Handle<JSObject> instance,
ErrorThrower* thrower, const char* name,
int argc, Handle<Object> argv[]);
+// Interprets an exported wasm function by name. Returns false if it was not
+// possible to execute the function (e.g. because it does not exist), or if the
+// interpretation does not finish after kMaxNumSteps. Otherwise returns true.
+// The arguments array is extended with default values if necessary.
+bool InterpretWasmModuleForTesting(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ const char* name, size_t argc,
+ WasmValue* args);
+
// Decode, verify, and run the function labeled "main" in the
// given encoded module. The module should have no imports.
int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
diff --git a/deps/v8/test/debugger/debug/debug-break-microtask.js b/deps/v8/test/debugger/debug/debug-break-microtask.js
new file mode 100644
index 0000000000..4a244b051d
--- /dev/null
+++ b/deps/v8/test/debugger/debug/debug-break-microtask.js
@@ -0,0 +1,52 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Debug = debug.Debug
+var exception = null;
+var log = [];
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ var line = exec_state.frame(0).sourceLineText();
+ log.push(line);
+ if (!/STOP/.test(line)) {
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ }
+ }
+ } catch (e) {
+ exception = e;
+ }
+};
+
+Debug.setListener(listener);
+
+function f() {
+ print(1);
+}
+
+Promise.resolve().then(f).then(
+function() {
+ return 2;
+}
+).then(
+function() {
+ throw new Error();
+}
+).catch(
+function() {
+ print(3);
+} // STOP
+);
+
+setTimeout(function() {
+ Debug.setListener(null);
+ assertNull(exception);
+ var expectation =
+ [" print(1);","}"," return 2;"," return 2;",
+ " throw new Error();"," print(3);","} // STOP"];
+ assertEquals(log, expectation);
+});
+
+Debug.setBreakPoint(f, 1);
diff --git a/deps/v8/test/debugger/debug/debug-compile-optimized.js b/deps/v8/test/debugger/debug/debug-compile-optimized.js
index c25bdfd4c0..33f199ac51 100644
--- a/deps/v8/test/debugger/debug/debug-compile-optimized.js
+++ b/deps/v8/test/debugger/debug/debug-compile-optimized.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --opt
+// Flags: --opt --no-always-opt
Debug = debug.Debug;
@@ -15,4 +15,17 @@ f();
f();
assertOptimized(f);
+var bp = Debug.setBreakPoint(f);
+assertUnoptimized(f);
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
+assertUnoptimized(f);
+
+Debug.clearBreakPoint(bp);
+%OptimizeFunctionOnNextCall(f);
+f();
+assertOptimized(f);
+
Debug.setListener(null);
diff --git a/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-ops.js b/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-ops.js
index e56645d05b..b7e49dc88f 100644
--- a/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-ops.js
+++ b/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-ops.js
@@ -57,6 +57,8 @@ function listener(event, exec_state, event_data, data) {
success(true, `T||F`);
success(false, `T?F:T`);
success(false, `!T`);
+ success(1, `+one`);
+ success(-1, `-one`);
success(-2, `~one`);
success(4, `one << two`);
success(1, `two >> one`);
diff --git a/deps/v8/test/debugger/debug/debug-liveedit-inline.js b/deps/v8/test/debugger/debug/debug-liveedit-inline.js
new file mode 100644
index 0000000000..4d20991fbc
--- /dev/null
+++ b/deps/v8/test/debugger/debug/debug-liveedit-inline.js
@@ -0,0 +1,30 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file
+
+// Flags: --allow-natives-syntax --enable-inspector
+
+Debug = debug.Debug
+
+eval("var something1 = 25; "
+ + " function ChooseAnimal() { return 'Cat'; } "
+ + " ChooseAnimal.Helper = function() { return 'Help!'; }");
+
+function foo() { return ChooseAnimal() }
+
+assertEquals("Cat", foo());
+ %OptimizeFunctionOnNextCall(foo);
+
+foo();
+
+var script = Debug.findScript(ChooseAnimal);
+
+var orig_animal = "Cat";
+var patch_pos = script.source.indexOf(orig_animal);
+var new_animal_patch = "Cap' + 'y' + 'bara";
+
+var change_log = new Array();
+
+Debug.LiveEdit.TestApi.ApplySingleChunkPatch(script, patch_pos, orig_animal.length, new_animal_patch, change_log);
+
+assertEquals("Capybara", foo());
diff --git a/deps/v8/test/debugger/debug/debug-step-microtask.js b/deps/v8/test/debugger/debug/debug-step-microtask.js
new file mode 100644
index 0000000000..258f235c58
--- /dev/null
+++ b/deps/v8/test/debugger/debug/debug-step-microtask.js
@@ -0,0 +1,52 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Debug = debug.Debug
+var exception = null;
+var log = [];
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ var line = exec_state.frame(0).sourceLineText();
+ log.push(line);
+ if (!/STOP/.test(line)) {
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ }
+ }
+ } catch (e) {
+ exception = e;
+ }
+};
+
+Debug.setListener(listener);
+
+Promise.resolve().then(
+function() {
+ print(1);
+}
+).then(
+function() {
+ return 2;
+}
+).then(
+function() {
+ throw new Error();
+}
+).catch(
+function() {
+ print(3);
+} // STOP
+);
+
+setTimeout(function() {
+ Debug.setListener(null);
+ assertNull(exception);
+ var expectation =
+ ["debugger;","debugger;"," print(1);","}"," return 2;"," return 2;",
+ " throw new Error();"," print(3);","} // STOP"];
+ assertEquals(log, expectation);
+});
+
+debugger;
diff --git a/deps/v8/test/debugger/debug/es8/promise-finally.js b/deps/v8/test/debugger/debug/es8/promise-finally.js
new file mode 100644
index 0000000000..2598ae0d82
--- /dev/null
+++ b/deps/v8/test/debugger/debug/es8/promise-finally.js
@@ -0,0 +1,46 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-promise-finally
+
+Debug = debug.Debug
+
+var exception = null;
+var step = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Exception) return;
+ try {
+ var line = exec_state.frame(0).sourceLineText();
+ var match = /Exception/.exec(line);
+ assertNotNull(match);
+ step++;
+ } catch (e) {
+ exception = e;
+ }
+}
+
+// Caught throw, events on any exception.
+Debug.setListener(listener);
+Debug.setBreakOnException();
+
+var thenable = {
+ get then() {
+ throw new Error('err'); // Exception
+ }
+};
+
+var caughtException = null;
+
+Promise.resolve()
+ .finally(() => thenable)
+ .catch(e => caughtException = e);
+
+%RunMicrotasks();
+
+Debug.setListener(null);
+Debug.clearBreakOnException();
+assertNull(exception);
+assertNotNull(caughtException);
+assertEquals(1, step);
diff --git a/deps/v8/test/debugger/debug/regress/regress-crbug-222893.js b/deps/v8/test/debugger/debug/regress/regress-crbug-222893.js
index a0fc6d56fb..b7838bb8ea 100644
--- a/deps/v8/test/debugger/debug/regress/regress-crbug-222893.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-222893.js
@@ -31,12 +31,12 @@ Debug = debug.Debug
var error = null;
var array = ["a", "b", "c"];
+var result = null;
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
- assertArrayEquals(array,
- exec_state.frame(0).evaluate('arguments').value());
+ result = exec_state.frame(0).evaluate('arguments').value();
}
} catch (e) {
error = e;
@@ -51,13 +51,35 @@ function f(a, b) {
debugger; // Arguments object is already materialized.
}
+result = null;
f.apply(this, array);
+assertArrayEquals(array, result);
+result = null;
f("a", "b", "c");
+assertArrayEquals(array, result);
assertNull(error);
function g(a, b) {
debugger; // Arguments object is not yet materialized.
}
+
+result = null;
g.apply(this, array);
+assertArrayEquals(array, result);
+result = null;
g("a", "b", "c");
+assertArrayEquals(array, result);
+assertNull(error);
+
+function h(a, b) {
+ var arguments = undefined;
+ debugger; // Arguments already used as local variable.
+}
+
+result = null;
+h.apply(this, array);
+assertEquals(undefined, result);
+result = null;
+h("a", "b", "c");
+assertEquals(undefined, result);
assertNull(error);
diff --git a/deps/v8/test/debugger/debugger.status b/deps/v8/test/debugger/debugger.status
index bbb4507bdf..98a95ebc02 100644
--- a/deps/v8/test/debugger/debugger.status
+++ b/deps/v8/test/debugger/debugger.status
@@ -44,6 +44,12 @@
}], # variant == stress
##############################################################################
+['variant == stress_incremental_marking', {
+ # BUG(chromium:772010).
+ 'debug/debug-*': [PASS, ['system == windows', SKIP]],
+}], # variant == stress_incremental_marking
+
+##############################################################################
['gc_stress == True', {
# Skip tests not suitable for GC stress.
# Tests taking too long
diff --git a/deps/v8/test/fuzzer/README.md b/deps/v8/test/fuzzer/README.md
index ed0ce1fa2a..5f10d47ec4 100644
--- a/deps/v8/test/fuzzer/README.md
+++ b/deps/v8/test/fuzzer/README.md
@@ -79,7 +79,7 @@ new fuzzer to cluster fuzz.
for more information.
2. Compile the fuzzer in chromium (for different configurations see:
- https://chromium.googlesource.com/chromium/src/+/master/testing/libfuzzer/reproducing.md):
+ https://chromium.googlesource.com/chromium/src/+/master/testing/libfuzzer/reference.md):
* `gn gen out/libfuzzer '--args=use_libfuzzer=true is_asan=true is_debug=false enable_nacl=false'`
diff --git a/deps/v8/test/fuzzer/fuzzer-support.cc b/deps/v8/test/fuzzer/fuzzer-support.cc
index 2ca35800f8..f800f49b8f 100644
--- a/deps/v8/test/fuzzer/fuzzer-support.cc
+++ b/deps/v8/test/fuzzer/fuzzer-support.cc
@@ -53,8 +53,9 @@ FuzzerSupport::FuzzerSupport(int* argc, char*** argv) {
FuzzerSupport::~FuzzerSupport() {
{
v8::Isolate::Scope isolate_scope(isolate_);
- while (PumpMessageLoop()) /* empty */
- ;
+ while (PumpMessageLoop()) {
+ // empty
+ }
v8::HandleScope handle_scope(isolate_);
context_.Reset();
diff --git a/deps/v8/test/fuzzer/fuzzer-support.h b/deps/v8/test/fuzzer/fuzzer-support.h
index 60c870ca38..e72dcc3613 100644
--- a/deps/v8/test/fuzzer/fuzzer-support.h
+++ b/deps/v8/test/fuzzer/fuzzer-support.h
@@ -34,6 +34,6 @@ class FuzzerSupport {
v8::Global<v8::Context> context_;
};
-} // namespace
+} // namespace v8_fuzzer
#endif // TEST_FUZZER_FUZZER_SUPPORT_H_
diff --git a/deps/v8/test/fuzzer/fuzzer.cc b/deps/v8/test/fuzzer/fuzzer.cc
index cb4a287d70..96c381e0b6 100644
--- a/deps/v8/test/fuzzer/fuzzer.cc
+++ b/deps/v8/test/fuzzer/fuzzer.cc
@@ -29,13 +29,13 @@ int main(int argc, char* argv[]) {
}
fseek(input, 0, SEEK_END);
- long size = ftell(input);
+ size_t size = ftell(input);
fseek(input, 0, SEEK_SET);
uint8_t* data = reinterpret_cast<uint8_t*>(malloc(size));
if (!data) {
fclose(input);
- fprintf(stderr, "Failed to allocate %ld bytes\n", size);
+ fprintf(stderr, "Failed to allocate %zu bytes\n", size);
return 1;
}
diff --git a/deps/v8/test/fuzzer/fuzzer.gyp b/deps/v8/test/fuzzer/fuzzer.gyp
index 863f90ae02..3d76018d55 100644
--- a/deps/v8/test/fuzzer/fuzzer.gyp
+++ b/deps/v8/test/fuzzer/fuzzer.gyp
@@ -116,6 +116,8 @@
'wasm.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
+ 'wasm-fuzzer-common.cc',
+ 'wasm-fuzzer-common.h',
],
},
{
@@ -145,6 +147,8 @@
'wasm-async.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
+ 'wasm-fuzzer-common.cc',
+ 'wasm-fuzzer-common.h',
],
},
{
diff --git a/deps/v8/test/fuzzer/parser.cc b/deps/v8/test/fuzzer/parser.cc
index 76666e85be..9321207c23 100644
--- a/deps/v8/test/fuzzer/parser.cc
+++ b/deps/v8/test/fuzzer/parser.cc
@@ -6,6 +6,9 @@
#include <stddef.h>
#include <stdint.h>
+#include <cctype>
+#include <list>
+
#include "include/v8.h"
#include "src/objects-inl.h"
#include "src/objects.h"
@@ -14,9 +17,6 @@
#include "src/parsing/preparser.h"
#include "test/fuzzer/fuzzer-support.h"
-#include <cctype>
-#include <list>
-
bool IsValidInput(const uint8_t* data, size_t size) {
std::list<char> parentheses;
const char* ptr = reinterpret_cast<const char*>(data);
diff --git a/deps/v8/test/fuzzer/wasm-async.cc b/deps/v8/test/fuzzer/wasm-async.cc
index b408d9c454..13b15a9d70 100644
--- a/deps/v8/test/fuzzer/wasm-async.cc
+++ b/deps/v8/test/fuzzer/wasm-async.cc
@@ -7,31 +7,26 @@
#include <stdint.h>
#include "include/v8.h"
+#include "src/api.h"
#include "src/factory.h"
#include "src/isolate-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects.h"
+#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-api.h"
#include "src/wasm/wasm-module.h"
#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/wasm-module-runner.h"
#include "test/fuzzer/fuzzer-support.h"
+#include "test/fuzzer/wasm-fuzzer-common.h"
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
-
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-using namespace v8::internal::wasm::testing;
+namespace v8 {
+namespace internal {
+class WasmModuleObject;
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic pop
-#endif
+namespace wasm {
+namespace fuzzer {
#define ASSIGN(type, var, expr) \
v8::Local<type> var; \
@@ -44,12 +39,6 @@ using namespace v8::internal::wasm::testing;
} \
} while (false)
-namespace v8 {
-namespace internal {
-class WasmModuleObject;
-}
-}
-
namespace {
// We need this helper function because we cannot use
// Handle<WasmModuleObject>::cast here. To use this function we would have to
@@ -59,44 +48,33 @@ Handle<WasmModuleObject> ToWasmModuleObjectUnchecked(Handle<Object> that) {
}
}
-void InstantiateCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+void InstantiateCallback(const FunctionCallbackInfo<Value>& args) {
DCHECK_GE(args.Length(), 1);
v8::Isolate* isolate = args.GetIsolate();
- v8::MicrotasksScope does_not_run_microtasks(
+ MicrotasksScope does_not_run_microtasks(
isolate, v8::MicrotasksScope::kDoNotRunMicrotasks);
v8::HandleScope scope(isolate);
- v8::Local<v8::Value> module = args[0];
+ Local<v8::Value> module = args[0];
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly Instantiation");
-
- i::Handle<i::WasmModuleObject> module_obj = ToWasmModuleObjectUnchecked(
- v8::Utils::OpenHandle(v8::Object::Cast(*module)));
- i::MaybeHandle<WasmInstanceObject> maybe_instance =
- i::wasm::SyncInstantiate(i_isolate, &thrower, module_obj,
- Handle<JSReceiver>::null(), // imports
- MaybeHandle<JSArrayBuffer>()); // memory
- Handle<WasmInstanceObject> instance;
- if (!maybe_instance.ToHandle(&instance)) {
- return;
- }
- RunWasmModuleForTesting(i_isolate, instance, 0, nullptr);
+ Handle<WasmModuleObject> module_obj =
+ ToWasmModuleObjectUnchecked(Utils::OpenHandle(v8::Object::Cast(*module)));
+ InterpretAndExecuteModule(i_isolate, module_obj);
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- v8::internal::FlagScope<bool> turn_on_async_compile(
+ FlagScope<bool> turn_on_async_compile(
&v8::internal::FLAG_wasm_async_compilation, true);
- v8::internal::FlagScope<uint32_t> max_mem_flag_scope(
- &v8::internal::FLAG_wasm_max_mem_pages, 32);
- v8::internal::FlagScope<uint32_t> max_table_size_scope(
+ FlagScope<uint32_t> max_mem_flag_scope(&v8::internal::FLAG_wasm_max_mem_pages,
+ 32);
+ FlagScope<uint32_t> max_table_size_scope(
&v8::internal::FLAG_wasm_max_table_size, 100);
v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
v8::Isolate* isolate = support->GetIsolate();
- v8::internal::Isolate* i_isolate =
- reinterpret_cast<v8::internal::Isolate*>(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<v8::internal::Isolate*>(isolate);
// Clear any pending exceptions from a prior run.
if (i_isolate->has_pending_exception()) {
@@ -105,30 +83,37 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
- HandleScope internal_scope(i_isolate);
+ i::HandleScope internal_scope(i_isolate);
v8::Context::Scope context_scope(support->GetContext());
- v8::TryCatch try_catch(isolate);
- v8::internal::wasm::testing::SetupIsolateForWasmModule(i_isolate);
+ TryCatch try_catch(isolate);
+ testing::SetupIsolateForWasmModule(i_isolate);
// Get the promise for async compilation.
- ASSIGN(v8::Promise::Resolver, resolver,
- v8::Promise::Resolver::New(support->GetContext()));
- v8::Local<v8::Promise> promise = resolver->GetPromise();
+ ASSIGN(Promise::Resolver, resolver,
+ Promise::Resolver::New(support->GetContext()));
+ Local<Promise> promise = resolver->GetPromise();
- AsyncCompile(i_isolate, v8::Utils::OpenHandle(*promise),
+ AsyncCompile(i_isolate, Utils::OpenHandle(*promise),
ModuleWireBytes(data, data + size));
- ASSIGN(v8::Function, instantiate_impl,
- v8::Function::New(support->GetContext(), &InstantiateCallback,
- v8::Undefined(isolate)));
+ ASSIGN(Function, instantiate_impl,
+ Function::New(support->GetContext(), &InstantiateCallback,
+ Undefined(isolate)));
- ASSIGN(v8::Promise, result,
+ ASSIGN(Promise, result,
promise->Then(support->GetContext(), instantiate_impl));
// Wait for the promise to resolve.
- while (result->State() == v8::Promise::kPending) {
- support->PumpMessageLoop(v8::platform::MessageLoopBehavior::kWaitForWork);
+ while (result->State() == Promise::kPending) {
+ support->PumpMessageLoop(platform::MessageLoopBehavior::kWaitForWork);
isolate->RunMicrotasks();
}
return 0;
}
+
+#undef ASSIGN
+
+} // namespace fuzzer
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/fuzzer/wasm-call.cc b/deps/v8/test/fuzzer/wasm-call.cc
index 24a0841a09..5c85502381 100644
--- a/deps/v8/test/fuzzer/wasm-call.cc
+++ b/deps/v8/test/fuzzer/wasm-call.cc
@@ -18,23 +18,13 @@
#include "test/fuzzer/fuzzer-support.h"
#include "test/fuzzer/wasm-fuzzer-common.h"
-#define MAX_NUM_FUNCTIONS 3
-#define MAX_NUM_PARAMS 3
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace fuzzer {
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
-
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-using namespace v8::internal::wasm::fuzzer;
-
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic pop
-#endif
+static constexpr uint32_t kMaxNumFunctions = 3;
+static constexpr uint32_t kMaxNumParams = 3;
class WasmCallFuzzer : public WasmExecutionFuzzer {
template <typename V>
@@ -43,17 +33,16 @@ class WasmCallFuzzer : public WasmExecutionFuzzer {
// that a value of type V can be read without problems.
*ok &= (*size > sizeof(V));
if (!(*ok)) return 0;
- V result = v8::internal::ReadLittleEndianValue<V>(*data);
+ V result = ReadLittleEndianValue<V>(*data);
*data += sizeof(V);
*size -= sizeof(V);
return result;
}
- static void add_argument(
- v8::internal::Isolate* isolate, ValueType type,
- WasmValue* interpreter_args,
- v8::internal::Handle<v8::internal::Object>* compiler_args, int* argc,
- const uint8_t** data, size_t* size, bool* ok) {
+ static void add_argument(Isolate* isolate, ValueType type,
+ WasmValue* interpreter_args,
+ Handle<Object>* compiler_args, int* argc,
+ const uint8_t** data, size_t* size, bool* ok) {
if (!(*ok)) return;
switch (type) {
case kWasmF32: {
@@ -82,14 +71,14 @@ class WasmCallFuzzer : public WasmExecutionFuzzer {
(*argc)++;
}
- virtual bool GenerateModule(
+ bool GenerateModule(
Isolate* isolate, Zone* zone, const uint8_t* data, size_t size,
ZoneBuffer& buffer, int32_t& num_args,
std::unique_ptr<WasmValue[]>& interpreter_args,
std::unique_ptr<Handle<Object>[]>& compiler_args) override {
bool ok = true;
uint8_t num_functions =
- (read_value<uint8_t>(&data, &size, &ok) % MAX_NUM_FUNCTIONS) + 1;
+ (read_value<uint8_t>(&data, &size, &ok) % kMaxNumFunctions) + 1;
ValueType types[] = {kWasmF32, kWasmF64, kWasmI32, kWasmI64};
@@ -99,7 +88,7 @@ class WasmCallFuzzer : public WasmExecutionFuzzer {
WasmModuleBuilder builder(zone);
for (int fun = 0; fun < num_functions; fun++) {
size_t num_params = static_cast<size_t>(
- (read_value<uint8_t>(&data, &size, &ok) % MAX_NUM_PARAMS) + 1);
+ (read_value<uint8_t>(&data, &size, &ok) % kMaxNumParams) + 1);
FunctionSig::Builder sig_builder(zone, 1, num_params);
sig_builder.AddReturn(kWasmI32);
for (size_t param = 0; param < num_params; param++) {
@@ -112,8 +101,7 @@ class WasmCallFuzzer : public WasmExecutionFuzzer {
compiler_args.get(), &num_args, &data, &size, &ok);
}
}
- v8::internal::wasm::WasmFunctionBuilder* f =
- builder.AddFunction(sig_builder.Build());
+ WasmFunctionBuilder* f = builder.AddFunction(sig_builder.Build());
uint32_t code_size = static_cast<uint32_t>(size / num_functions);
f->EmitCode(data, code_size);
uint8_t end_opcode = kExprEnd;
@@ -121,7 +109,7 @@ class WasmCallFuzzer : public WasmExecutionFuzzer {
data += code_size;
size -= code_size;
if (fun == 0) {
- builder.AddExport(v8::internal::CStrVector("main"), f);
+ builder.AddExport(CStrVector("main"), f);
}
}
@@ -139,3 +127,8 @@ class WasmCallFuzzer : public WasmExecutionFuzzer {
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
return WasmCallFuzzer().FuzzWasmModule(data, size);
}
+
+} // namespace fuzzer
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/fuzzer/wasm-code.cc b/deps/v8/test/fuzzer/wasm-code.cc
index d74e26c944..54a3da5768 100644
--- a/deps/v8/test/fuzzer/wasm-code.cc
+++ b/deps/v8/test/fuzzer/wasm-code.cc
@@ -13,23 +13,13 @@
#include "test/common/wasm/test-signatures.h"
#include "test/fuzzer/wasm-fuzzer-common.h"
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
-
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-using namespace v8::internal::wasm::fuzzer;
-
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic pop
-#endif
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace fuzzer {
class WasmCodeFuzzer : public WasmExecutionFuzzer {
- virtual bool GenerateModule(
+ bool GenerateModule(
Isolate* isolate, Zone* zone, const uint8_t* data, size_t size,
ZoneBuffer& buffer, int32_t& num_args,
std::unique_ptr<WasmValue[]>& interpreter_args,
@@ -58,3 +48,8 @@ class WasmCodeFuzzer : public WasmExecutionFuzzer {
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
return WasmCodeFuzzer().FuzzWasmModule(data, size);
}
+
+} // namespace fuzzer
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index 8d73cf852e..5b59a63e97 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -23,20 +23,10 @@
typedef uint8_t byte;
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
-
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-using namespace v8::internal::wasm::fuzzer;
-
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic pop
-#endif
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace fuzzer {
namespace {
@@ -123,7 +113,7 @@ class WasmGenerator {
}
public:
- WasmGenerator(v8::internal::wasm::WasmFunctionBuilder* fn) : builder_(fn) {}
+ explicit WasmGenerator(WasmFunctionBuilder* fn) : builder_(fn) {}
void Generate(ValueType type, DataRange data);
@@ -138,7 +128,7 @@ class WasmGenerator {
}
private:
- v8::internal::wasm::WasmFunctionBuilder* builder_;
+ WasmFunctionBuilder* builder_;
std::vector<ValueType> blocks_;
};
@@ -307,10 +297,10 @@ void WasmGenerator::Generate(ValueType type, DataRange data) {
UNREACHABLE();
}
}
-}
+} // namespace
class WasmCompileFuzzer : public WasmExecutionFuzzer {
- virtual bool GenerateModule(
+ bool GenerateModule(
Isolate* isolate, Zone* zone, const uint8_t* data, size_t size,
ZoneBuffer& buffer, int32_t& num_args,
std::unique_ptr<WasmValue[]>& interpreter_args,
@@ -319,15 +309,14 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
WasmModuleBuilder builder(zone);
- v8::internal::wasm::WasmFunctionBuilder* f =
- builder.AddFunction(sigs.i_iii());
+ WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
WasmGenerator gen(f);
gen.Generate<kWasmI32>(DataRange(data, static_cast<uint32_t>(size)));
uint8_t end_opcode = kExprEnd;
f->EmitCode(&end_opcode, 1);
- builder.AddExport(v8::internal::CStrVector("main"), f);
+ builder.AddExport(CStrVector("main"), f);
builder.SetMaxMemorySize(32);
builder.WriteTo(buffer);
@@ -346,3 +335,8 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
return WasmCompileFuzzer().FuzzWasmModule(data, size);
}
+
+} // namespace fuzzer
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index 8d83653a4d..f02d2b957e 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -7,6 +7,8 @@
#include "include/v8.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-api.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
#include "src/zone/accounting-allocator.h"
@@ -14,32 +16,20 @@
#include "test/common/wasm/wasm-module-runner.h"
#include "test/fuzzer/fuzzer-support.h"
-#define WASM_CODE_FUZZER_HASH_SEED 83
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace fuzzer {
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
+static constexpr uint32_t kWasmCodeFuzzerHashSeed = 83;
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-using namespace v8::internal::wasm::fuzzer;
+static constexpr const char* kNameString = "name";
+static constexpr size_t kNameStringLength = 4;
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic pop
-#endif
-
-static const char* kNameString = "name";
-static const size_t kNameStringLength = 4;
-
-int v8::internal::wasm::fuzzer::FuzzWasmSection(SectionCode section,
- const uint8_t* data,
- size_t size) {
+int FuzzWasmSection(SectionCode section, const uint8_t* data, size_t size) {
v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
v8::Isolate* isolate = support->GetIsolate();
- v8::internal::Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
// Clear any pending exceptions from a prior run.
if (i_isolate->has_pending_exception()) {
@@ -78,8 +68,31 @@ int v8::internal::wasm::fuzzer::FuzzWasmSection(SectionCode section,
return 0;
}
-int WasmExecutionFuzzer::FuzzWasmModule(
+void InterpretAndExecuteModule(i::Isolate* isolate,
+ Handle<WasmModuleObject> module_object) {
+ ScheduledErrorThrower thrower(isolate, "WebAssembly Instantiation");
+ // Try to instantiate and interpret the module_object.
+ MaybeHandle<WasmInstanceObject> maybe_instance =
+ SyncInstantiate(isolate, &thrower, module_object,
+ Handle<JSReceiver>::null(), // imports
+ MaybeHandle<JSArrayBuffer>()); // memory
+ Handle<WasmInstanceObject> instance;
+ if (!maybe_instance.ToHandle(&instance)) return;
+ if (!testing::InterpretWasmModuleForTesting(isolate, instance, "main", 0,
+ nullptr)) {
+ return;
+ }
+
+ // Instantiate and execute the module_object.
+ maybe_instance = SyncInstantiate(isolate, &thrower, module_object,
+ Handle<JSReceiver>::null(), // imports
+ MaybeHandle<JSArrayBuffer>()); // memory
+ if (!maybe_instance.ToHandle(&instance)) return;
+ testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr);
+}
+
+int WasmExecutionFuzzer::FuzzWasmModule(
const uint8_t* data, size_t size) {
// Save the flag so that we can change it and restore it later.
bool generate_test = FLAG_wasm_code_fuzzer_gen_test;
@@ -104,7 +117,7 @@ int WasmExecutionFuzzer::FuzzWasmModule(
}
v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
v8::Isolate* isolate = support->GetIsolate();
- Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
// Clear any pending exceptions from a prior run.
if (i_isolate->has_pending_exception()) {
@@ -129,7 +142,7 @@ int WasmExecutionFuzzer::FuzzWasmModule(
return 0;
}
- v8::internal::wasm::testing::SetupIsolateForWasmModule(i_isolate);
+ testing::SetupIsolateForWasmModule(i_isolate);
ErrorThrower interpreter_thrower(i_isolate, "Interpreter");
ModuleWireBytes wire_bytes(buffer.begin(), buffer.end());
@@ -155,11 +168,11 @@ int WasmExecutionFuzzer::FuzzWasmModule(
os << "})();" << std::endl;
}
- bool validates = wasm::SyncValidate(i_isolate, wire_bytes);
+ bool validates = SyncValidate(i_isolate, wire_bytes);
if (compiles != validates) {
uint32_t hash = StringHasher::HashSequentialString(
- data, static_cast<int>(size), WASM_CODE_FUZZER_HASH_SEED);
+ data, static_cast<int>(size), kWasmCodeFuzzerHashSeed);
V8_Fatal(__FILE__, __LINE__,
"compiles != validates (%d vs %d); WasmCodeFuzzerHash=%x",
compiles, validates, hash);
@@ -215,8 +228,13 @@ int WasmExecutionFuzzer::FuzzWasmModule(
if (result_interpreted != result_compiled) {
V8_Fatal(__FILE__, __LINE__, "WasmCodeFuzzerHash=%x",
StringHasher::HashSequentialString(data, static_cast<int>(size),
- WASM_CODE_FUZZER_HASH_SEED));
+ kWasmCodeFuzzerHashSeed));
}
}
return 0;
}
+
+} // namespace fuzzer
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.h b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
index 6aa36e1c8c..8830d716b8 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.h
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
@@ -17,8 +17,14 @@ namespace internal {
namespace wasm {
namespace fuzzer {
-int FuzzWasmSection(v8::internal::wasm::SectionCode section,
- const uint8_t* data, size_t size);
+int FuzzWasmSection(SectionCode section, const uint8_t* data, size_t size);
+
+// First instantiates and interprets the "main" function within module_object if
+// possible. If the interpretation finishes within kMaxSteps steps,
+// module_object is instantiated again and the compiled "main" function is
+// executed.
+void InterpretAndExecuteModule(Isolate* isolate,
+ Handle<WasmModuleObject> module_object);
class WasmExecutionFuzzer {
public:
diff --git a/deps/v8/test/fuzzer/wasm.cc b/deps/v8/test/fuzzer/wasm.cc
index 547dd44c1d..567e68b40a 100644
--- a/deps/v8/test/fuzzer/wasm.cc
+++ b/deps/v8/test/fuzzer/wasm.cc
@@ -12,20 +12,22 @@
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects.h"
+#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-module.h"
#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/wasm-module-runner.h"
#include "test/fuzzer/fuzzer-support.h"
+#include "test/fuzzer/wasm-fuzzer-common.h"
+
+namespace i = v8::internal;
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- v8::internal::FlagScope<uint32_t> max_mem_flag_scope(
- &v8::internal::FLAG_wasm_max_mem_pages, 32);
- v8::internal::FlagScope<uint32_t> max_table_size_scope(
- &v8::internal::FLAG_wasm_max_table_size, 100);
+ i::FlagScope<uint32_t> max_mem_flag_scope(&i::FLAG_wasm_max_mem_pages, 32);
+ i::FlagScope<uint32_t> max_table_size_scope(&i::FLAG_wasm_max_table_size,
+ 100);
v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
v8::Isolate* isolate = support->GetIsolate();
- v8::internal::Isolate* i_isolate =
- reinterpret_cast<v8::internal::Isolate*>(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
// Clear any pending exceptions from a prior run.
if (i_isolate->has_pending_exception()) {
@@ -36,8 +38,15 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::HandleScope handle_scope(isolate);
v8::Context::Scope context_scope(support->GetContext());
v8::TryCatch try_catch(isolate);
- v8::internal::wasm::testing::SetupIsolateForWasmModule(i_isolate);
- v8::internal::wasm::testing::CompileAndRunWasmModule(i_isolate, data,
- data + size);
+ i::wasm::testing::SetupIsolateForWasmModule(i_isolate);
+
+ i::HandleScope scope(i_isolate);
+ i::wasm::ErrorThrower thrower(i_isolate, "wasm fuzzer");
+ i::MaybeHandle<i::WasmModuleObject> maybe_object = SyncCompile(
+ i_isolate, &thrower, i::wasm::ModuleWireBytes(data, data + size));
+ i::Handle<i::WasmModuleObject> module_object;
+ if (maybe_object.ToHandle(&module_object)) {
+ i::wasm::fuzzer::InterpretAndExecuteModule(i_isolate, module_object);
+ }
return 0;
}
diff --git a/deps/v8/test/fuzzer/wasm_async/regression-761784.wasm b/deps/v8/test/fuzzer/wasm_async/regression-761784.wasm
new file mode 100644
index 0000000000..29cde166e1
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm_async/regression-761784.wasm
Binary files differ
diff --git a/deps/v8/test/inspector/cpu-profiler/coverage.js b/deps/v8/test/inspector/cpu-profiler/coverage.js
index 0fde457688..dbad54b6d6 100644
--- a/deps/v8/test/inspector/cpu-profiler/coverage.js
+++ b/deps/v8/test/inspector/cpu-profiler/coverage.js
@@ -226,7 +226,7 @@ InspectorTest.runTestSuite([
{
// Enabling the debugger holds onto script objects even though its
// functions can be garbage collected. We would get empty ScriptCoverage
- // entires unless we remove them.
+ // entries unless we remove them.
Protocol.Debugger.enable()
.then(Protocol.Runtime.enable)
.then(() => Protocol.Runtime.compileScript({ expression: source, sourceURL: arguments.callee.name, persistScript: true }))
diff --git a/deps/v8/test/inspector/debugger/breakpoints-expected.txt b/deps/v8/test/inspector/debugger/breakpoints-expected.txt
new file mode 100644
index 0000000000..9d0bae5666
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/breakpoints-expected.txt
@@ -0,0 +1,66 @@
+Checks breakpoints.
+
+Running test: testRemoveBreakpoint
+Debugger.removeBreakpoint when agent is disabled:
+{
+ error : {
+ code : -32000
+ message : Debugger agent is not enabled
+ }
+ id : <messageId>
+}
+Remove breakpoint with invalid breakpoint id:
+{
+ id : <messageId>
+ result : {
+ }
+}
+{
+ id : <messageId>
+ result : {
+ }
+}
+
+Running test: testSetBreakpointByUrl
+Adding conditional (arg === 1) breakpoint
+evaluating foo1(0):
+ not paused
+evaluating foo1(1):
+ hit expected breakpoint
+
+Evaluating another script with the same url
+evaluating foo2(0):
+ not paused
+evaluating foo2(1):
+ hit expected breakpoint
+
+Removing breakpoint
+evaluating foo1(1):
+ not paused
+evaluating foo2(1):
+ not paused
+
+Adding breakpoint back
+evaluating foo1(0):
+ not paused
+evaluating foo1(1):
+ hit expected breakpoint
+
+Disabling debugger agent
+evaluating foo1(1):
+ not paused
+evaluating foo2(1):
+ not paused
+
+Enabling debugger agent
+evaluating foo1(1):
+ not paused
+evaluating foo2(1):
+ not paused
+
+Running test: testSetBreakpointInScriptsWithDifferentOffsets
+Adding breakpoint
+evaluating foo1(0):
+ hit expected breakpoint
+evaluating foo2(0):
+ not paused
diff --git a/deps/v8/test/inspector/debugger/breakpoints.js b/deps/v8/test/inspector/debugger/breakpoints.js
new file mode 100644
index 0000000000..ce9ab47665
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/breakpoints.js
@@ -0,0 +1,117 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks breakpoints.');
+
+session.setupScriptMap();
+InspectorTest.runAsyncTestSuite([
+ async function testRemoveBreakpoint() {
+ InspectorTest.log('Debugger.removeBreakpoint when agent is disabled:');
+ InspectorTest.logMessage(await Protocol.Debugger.removeBreakpoint({
+ breakpointId: '1:test.js:0:0'
+ }));
+ Protocol.Debugger.enable();
+ InspectorTest.log('Remove breakpoint with invalid breakpoint id:')
+ InspectorTest.logMessage(await Protocol.Debugger.removeBreakpoint({
+ breakpointId: ''
+ }));
+ InspectorTest.logMessage(await Protocol.Debugger.removeBreakpoint({
+ breakpointId: ':::'
+ }));
+ await Protocol.Debugger.disable();
+ },
+
+ async function testSetBreakpointByUrl() {
+ await Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({expression: `
+function foo1(arg) {
+ return arg;
+}
+//# sourceURL=testSetBreakpointByUrl.js`});
+ InspectorTest.log('Adding conditional (arg === 1) breakpoint');
+ let {result:{breakpointId}} = await Protocol.Debugger.setBreakpointByUrl({
+ lineNumber: 2,
+ url: 'testSetBreakpointByUrl.js',
+ columnNumber: 2,
+ condition: 'arg === 1'
+ });
+ await evaluate('foo1(0)');
+ await evaluate('foo1(1)', breakpointId);
+
+ InspectorTest.log('\nEvaluating another script with the same url')
+ Protocol.Runtime.evaluate({expression: `
+function foo2(arg) {
+ return arg;
+}
+//# sourceURL=testSetBreakpointByUrl.js`});
+ await evaluate('foo2(0)');
+ await evaluate('foo2(1)', breakpointId);
+
+ InspectorTest.log('\nRemoving breakpoint');
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+ await evaluate('foo1(1)');
+ await evaluate('foo2(1)');
+
+ InspectorTest.log('\nAdding breakpoint back');
+ ({result:{breakpointId}} = await Protocol.Debugger.setBreakpointByUrl({
+ lineNumber: 2,
+ url: 'testSetBreakpointByUrl.js',
+ columnNumber: 2,
+ condition: 'arg === 1'
+ }));
+ await evaluate('foo1(0)');
+ await evaluate('foo1(1)', breakpointId);
+
+ InspectorTest.log('\nDisabling debugger agent');
+ await Protocol.Debugger.disable();
+ await evaluate('foo1(1)');
+ await evaluate('foo2(1)');
+
+ InspectorTest.log('\nEnabling debugger agent');
+ await Protocol.Debugger.enable();
+ await evaluate('foo1(1)');
+ await evaluate('foo2(1)');
+ },
+
+ async function testSetBreakpointInScriptsWithDifferentOffsets() {
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Adding breakpoint');
+ let {result:{breakpointId}} = await Protocol.Debugger.setBreakpointByUrl({
+ lineNumber: 2,
+ url: 'test2.js',
+ columnNumber: 2,
+ });
+ contextGroup.addScript(`
+function foo1(arg) {
+ return arg;
+}
+//# sourceURL=test2.js`);
+ contextGroup.addScript(`
+function foo2(arg) {
+ return arg;
+}
+//# sourceURL=test2.js`, 5);
+ await evaluate('foo1(0)', breakpointId);
+ await evaluate('foo2(0)');
+}
+]);
+
+async function evaluate(expression, expectedBreakpoint) {
+ InspectorTest.log('evaluating ' + expression + ':');
+ let paused = Protocol.Debugger.oncePaused();
+ let evaluate = Protocol.Runtime.evaluate({expression});
+ let result = await Promise.race([paused, evaluate]);
+ if (result.method === 'Debugger.paused') {
+ if (result.params.hitBreakpoints) {
+ if (result.params.hitBreakpoints.find(b => b === expectedBreakpoint)) {
+ InspectorTest.log(' hit expected breakpoint')
+ } else {
+ InspectorTest.log(' hit unexpected breakpoint');
+ }
+ }
+ await Protocol.Debugger.resume();
+ } else {
+ InspectorTest.log(' not paused');
+ }
+}
diff --git a/deps/v8/test/inspector/debugger/call-frame-url-expected.txt b/deps/v8/test/inspector/debugger/call-frame-url-expected.txt
new file mode 100644
index 0000000000..b27b40dd3d
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/call-frame-url-expected.txt
@@ -0,0 +1,15 @@
+Tests url in Debugger.CallFrame.
+[
+ [0] : {
+ url :
+ }
+ [1] : {
+ url : source-url.js
+ }
+ [2] : {
+ url : test.js
+ }
+ [3] : {
+ url : expr.js
+ }
+]
diff --git a/deps/v8/test/inspector/debugger/call-frame-url.js b/deps/v8/test/inspector/debugger/call-frame-url.js
new file mode 100644
index 0000000000..af8e908ca5
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/call-frame-url.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Tests url in Debugger.CallFrame.');
+
+contextGroup.addScript(`
+eval('function foo1() { debugger; }');
+eval('function foo2() { foo1() } //# sourceURL=source-url.js');
+function foo3() { foo2(); }
+`, 0, 0, 'test.js');
+
+(async function test() {
+ Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({expression: 'foo3()//# sourceURL=expr.js'});
+ let {params:{callFrames}} = await Protocol.Debugger.oncePaused();
+ InspectorTest.logMessage(callFrames.map(frame => ({url: frame.url})));
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/debugger/caught-uncaught-exceptions-expected.txt b/deps/v8/test/inspector/debugger/caught-uncaught-exceptions-expected.txt
index 039b8bd912..9c40b1c556 100644
--- a/deps/v8/test/inspector/debugger/caught-uncaught-exceptions-expected.txt
+++ b/deps/v8/test/inspector/debugger/caught-uncaught-exceptions-expected.txt
@@ -8,4 +8,6 @@ uncaught: false
paused in promiseUncaught
uncaught: true
paused in throwInMapConstructor
+uncaught: true
+paused in throwInAsyncIterator
uncaught: true \ No newline at end of file
diff --git a/deps/v8/test/inspector/debugger/caught-uncaught-exceptions.js b/deps/v8/test/inspector/debugger/caught-uncaught-exceptions.js
index 8789943a89..b7c4bd4ab7 100644
--- a/deps/v8/test/inspector/debugger/caught-uncaught-exceptions.js
+++ b/deps/v8/test/inspector/debugger/caught-uncaught-exceptions.js
@@ -16,6 +16,10 @@ contextGroup.addScript(
new Promise(function promiseUncaught() { throw new Error(); });
}
function throwInMapConstructor() { new Map('a'); }
+ function throwInAsyncIterator() {
+ let it = (async function*() {})();
+ it.next.call({});
+ }
function schedule(f) { setTimeout(f, 0); }
`);
@@ -37,4 +41,6 @@ Protocol.Runtime.evaluate({ "expression": "schedule(throwCaught);" })
{ "expression": "schedule(throwInPromiseUncaught);"}))
.then(() => Protocol.Runtime.evaluate(
{ "expression": "schedule(throwInMapConstructor);"}))
+ .then(() => Protocol.Runtime.evaluate(
+ { "expression": "schedule(throwInAsyncIterator);"}))
.then(() => InspectorTest.completeTest());
diff --git a/deps/v8/test/inspector/debugger/provisional-breakpoint-for-anonymous-script-expected.txt b/deps/v8/test/inspector/debugger/provisional-breakpoint-for-anonymous-script-expected.txt
new file mode 100644
index 0000000000..43fdca6d05
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/provisional-breakpoint-for-anonymous-script-expected.txt
@@ -0,0 +1,11 @@
+Checks provisional breakpoints by hash in anonymous scripts
+
+Running test: testNextScriptParsed
+function foo(){#}
+
+
+Running test: testPreviousScriptParsed
+var list = list ? list.concat(foo) : [foo]; function foo(){#}
+
+var list = list ? list.concat(foo) : [foo]; function foo(){#}
+
diff --git a/deps/v8/test/inspector/debugger/provisional-breakpoint-for-anonymous-script.js b/deps/v8/test/inspector/debugger/provisional-breakpoint-for-anonymous-script.js
new file mode 100644
index 0000000000..7d97753d59
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/provisional-breakpoint-for-anonymous-script.js
@@ -0,0 +1,69 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Checks provisional breakpoints by hash in anonymous scripts');
+session.setupScriptMap();
+
+InspectorTest.runAsyncTestSuite([
+ async function testNextScriptParsed() {
+ await Protocol.Debugger.enable();
+ // set breakpoint in anonymous script..
+ Protocol.Runtime.evaluate({expression: 'function foo(){}'});
+ let {params:{hash}} = await Protocol.Debugger.onceScriptParsed();
+ let {result:{breakpointId}} = await Protocol.Debugger.setBreakpointByUrl({
+ scriptHash: hash,
+ lineNumber: 0,
+ columnNumber: 15
+ });
+ // evaluate the same anonymous script again..
+ Protocol.Runtime.evaluate({expression: 'function foo(){}'});
+ // run function and check Debugger.paused event..
+ let evaluation = Protocol.Runtime.evaluate({expression: 'foo()'});
+ let result = await Promise.race([evaluation, Protocol.Debugger.oncePaused()]);
+ if (result.method !== 'Debugger.paused') {
+ InspectorTest.log('FAIL: breakpoint was ignored');
+ } else {
+ await session.logSourceLocation(result.params.callFrames[0].location);
+ }
+ // remove breakpoint and run again..
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+ evaluation = Protocol.Runtime.evaluate({expression: 'foo()'});
+ result = await Promise.race([evaluation, Protocol.Debugger.oncePaused()]);
+ if (result.method === 'Debugger.paused') {
+ InspectorTest.log('FAIL: breakpoint was not removed');
+ }
+ await Protocol.Debugger.disable();
+ },
+ async function testPreviousScriptParsed() {
+ await Protocol.Debugger.enable();
+ // run script and store function to global list..
+ await Protocol.Runtime.evaluate({expression: 'var list = list ? list.concat(foo) : [foo]; function foo(){}'});
+ // run same script again..
+ Protocol.Runtime.evaluate({expression: 'var list = list ? list.concat(foo) : [foo]; function foo(){}'});
+ let {params:{hash}} = await Protocol.Debugger.onceScriptParsed();
+ // set breakpoint by hash of latest script..
+ let {result:{breakpointId}} = await Protocol.Debugger.setBreakpointByUrl({
+ scriptHash: hash,
+ lineNumber: 0,
+ columnNumber: 49
+ });
+ // call each function in global list and wait for Debugger.paused events..
+ let evaluation = Protocol.Runtime.evaluate({expression: 'list.forEach(x => x())'});
+ let result = await Promise.race([evaluation, Protocol.Debugger.oncePaused()]);
+ while (result.method === 'Debugger.paused') {
+ await session.logSourceLocation(result.params.callFrames[0].location);
+ Protocol.Debugger.resume();
+ result = await Promise.race([evaluation, Protocol.Debugger.oncePaused()]);
+ }
+ // remove breakpoint and call functions again..
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+ evaluation = Protocol.Runtime.evaluate({expression: 'foo()'});
+ result = await Promise.race([evaluation, Protocol.Debugger.oncePaused()]);
+ if (result.method === 'Debugger.paused') {
+ InspectorTest.log('FAIL: breakpoint was not removed');
+ }
+ await Protocol.Debugger.disable();
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/restore-breakpoint.js b/deps/v8/test/inspector/debugger/restore-breakpoint.js
index e0d2b84766..020143f6d1 100644
--- a/deps/v8/test/inspector/debugger/restore-breakpoint.js
+++ b/deps/v8/test/inspector/debugger/restore-breakpoint.js
@@ -5,6 +5,7 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Checks that debugger agent uses source content to restore breakpoints.');
Protocol.Debugger.enable();
+var finishedTests = 0;
InspectorTest.runTestSuite([
function testSameSource(next) {
var source = 'function foo() {\nboo();\n}';
@@ -43,27 +44,28 @@ InspectorTest.runTestSuite([
}
]);
-var finishedTests = 0;
async function test(source, newSource, location, next) {
- var firstBreakpoint = true;
- Protocol.Debugger.onBreakpointResolved(message => {
- var lineNumber = message.params.location.lineNumber;
- var columnNumber = message.params.location.columnNumber;
- var currentSource = firstBreakpoint ? source : newSource;
- var lines = currentSource.split('\n');
+ function dumpSourceWithBreakpoint(source, location) {
+ var lineNumber = location.lineNumber;
+ var columnNumber = location.columnNumber;
+ var lines = source.split('\n');
lines = lines.map(line => line.length > 80 ? line.substring(0, 77) + '...' : line);
lines[lineNumber] = lines[lineNumber].slice(0, columnNumber) + '#' + lines[lineNumber].slice(columnNumber);
InspectorTest.log(lines.join('\n'));
- firstBreakpoint = false;
- });
+ }
+
+ Protocol.Debugger.onBreakpointResolved(message => {
+ dumpSourceWithBreakpoint(newSource, message.params.location);
+ })
var sourceURL = `test${++finishedTests}.js`;
- await Protocol.Debugger.setBreakpointByUrl({
+ await Protocol.Runtime.evaluate({ expression: `${source}\n//# sourceURL=${sourceURL}` });
+ let {result:{locations}} = await Protocol.Debugger.setBreakpointByUrl({
url: sourceURL,
lineNumber: location.lineNumber,
columnNumber: location.columnNumber
});
- await Protocol.Runtime.evaluate({ expression: `${source}\n//# sourceURL=${sourceURL}` });
+ dumpSourceWithBreakpoint(source, locations[0]);
await Protocol.Runtime.evaluate({ expression: `${newSource}\n//# sourceURL=${sourceURL}` });
next();
}
diff --git a/deps/v8/test/inspector/debugger/suspended-generator-scopes-expected.txt b/deps/v8/test/inspector/debugger/suspended-generator-scopes-expected.txt
index a59bc3281c..57dc2aa6ee 100644
--- a/deps/v8/test/inspector/debugger/suspended-generator-scopes-expected.txt
+++ b/deps/v8/test/inspector/debugger/suspended-generator-scopes-expected.txt
@@ -1,65 +1,55 @@
Tests that suspended generators produce scopes
Running test: testScopesPaused
-{
- id : <messageId>
- result : {
- result : [
- [0] : {
- configurable : true
- enumerable : true
- isOwn : true
- name : b
- value : {
- description : 42
- type : number
- value : 42
- }
- writable : true
- }
- [1] : {
- configurable : true
- enumerable : true
- isOwn : true
- name : a
- value : {
- description : 420
- type : number
- value : 420
- }
- writable : true
- }
- ]
+[
+ [0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : b
+ value : {
+ description : 42
+ type : number
+ value : 42
+ }
+ writable : true
}
-}
+ [1] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : a
+ value : {
+ description : 420
+ type : number
+ value : 420
+ }
+ writable : true
+ }
+]
Running test: testScopesNonPaused
-{
- id : <messageId>
- result : {
- result : [
- [0] : {
- configurable : true
- enumerable : true
- isOwn : true
- name : b
- value : {
- type : undefined
- }
- writable : true
- }
- [1] : {
- configurable : true
- enumerable : true
- isOwn : true
- name : a
- value : {
- description : 430
- type : number
- value : 430
- }
- writable : true
- }
- ]
+[
+ [0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : b
+ value : {
+ type : undefined
+ }
+ writable : true
+ }
+ [1] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : a
+ value : {
+ description : 430
+ type : number
+ value : 430
+ }
+ writable : true
}
-}
+]
diff --git a/deps/v8/test/inspector/debugger/suspended-generator-scopes.js b/deps/v8/test/inspector/debugger/suspended-generator-scopes.js
index f7d389072a..55a1fd57ca 100644
--- a/deps/v8/test/inspector/debugger/suspended-generator-scopes.js
+++ b/deps/v8/test/inspector/debugger/suspended-generator-scopes.js
@@ -4,78 +4,66 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Tests that suspended generators produce scopes');
-contextGroup.addScript(
-`function *gen(a) {
+contextGroup.addScript(`
+function *gen(a) {
var b = 42;
yield a;
return b;
}
-function testSuspendedGenerator()
-{
+
+function testSuspendedGenerator() {
var g = gen(420);
g.next();
-
debugger;
return g;
}`);
-Protocol.Debugger.enable().then(testSuite);
-
-function dumpInnermostScope(msg) {
- var scopes = msg.result.result;
- var inner_scope = scopes[0].value;
- return Protocol.Runtime.getProperties({ objectId : inner_scope.objectId })
- .then(InspectorTest.logMessage);
-}
-
-function dumpGeneratorScopes(msg)
-{
- var props = msg.result.internalProperties;
- var promises = props
- .filter(prop => prop.name == "[[Scopes]]")
- .map(prop => prop.value.objectId)
- .map(scopesId => Protocol.Runtime.getProperties({ objectId : scopesId })
- .then(dumpInnermostScope));
- return Promise.all(promises);
-}
-
-function fetchGeneratorProperties(objectId) {
- return Protocol.Runtime.getProperties({ objectId : objectId });
-}
+InspectorTest.runAsyncTestSuite([
+ async function testScopesPaused() {
+ Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({expression: 'testSuspendedGenerator()'});
+ let {params:{callFrames:[callFrame]}} = await Protocol.Debugger.oncePaused();
+ // Current local scope.
+ let localScope = callFrame.scopeChain.find(scope => scope.type === 'local');
+ let variables = (await Protocol.Runtime.getProperties({
+ objectId: localScope.object.objectId
+ })).result.result;
+ let genObjectId =
+ variables.find(variable => variable.name === 'g').value.objectId;
+ let {result:{internalProperties}} = await Protocol.Runtime.getProperties({
+ objectId: genObjectId
+ });
+ // Generator [[Scopes]].
+ let scopes = internalProperties.find(prop => prop.name === '[[Scopes]]');
+ let {result:{result}} = await Protocol.Runtime.getProperties({
+ objectId: scopes.value.objectId
+ });
+ // Locals from generator.
+ let scope = result.find(scope => scope.value.description === 'Local (gen)');
+ ({result:{result}} = await Protocol.Runtime.getProperties({
+ objectId: scope.value.objectId
+ }));
+ InspectorTest.logMessage(result);
+ await Protocol.Debugger.disable();
+ },
-function extractGeneratorObjectFromScope(scopeId) {
- return Protocol.Runtime.getProperties({ objectId : scopeId })
- .then(msg => {
- var generatorObjectId = msg.result.result[0].value.objectId;
- return fetchGeneratorProperties(generatorObjectId);
- });
-}
-
-function dumpGeneratorScopesOnPause(msg) {
- var scopeChain = msg.params.callFrames[0].scopeChain;
- var promises = scopeChain
- .filter(scope => scope.type === "local")
- .map(scope => scope.object.objectId)
- .map(scopeId => extractGeneratorObjectFromScope(scopeId)
- .then(dumpGeneratorScopes));
- return Promise.all(promises).then(Protocol.Debugger.resume);
-}
-
-function testSuite() {
- InspectorTest.runTestSuite([
-
- function testScopesPaused(next) {
- Protocol.Debugger.oncePaused()
- .then(dumpGeneratorScopesOnPause)
- .then(next);
- Protocol.Runtime.evaluate({ expression : "testSuspendedGenerator()" });
- },
-
- function testScopesNonPaused(next) {
- Protocol.Runtime.evaluate({ expression : "gen(430)"})
- .then(msg => fetchGeneratorProperties(msg.result.result.objectId))
- .then(dumpGeneratorScopes)
- .then(next);
- },
- ]);
-}
+ async function testScopesNonPaused() {
+ let {result:{result:{objectId}}} = await Protocol.Runtime.evaluate({
+ expression: 'gen(430)'
+ });
+ let {result:{internalProperties}} = await Protocol.Runtime.getProperties({
+ objectId
+ });
+ // Generator [[Scopes]].
+ let scopes = internalProperties.find(prop => prop.name === '[[Scopes]]');
+ let {result:{result}} = await Protocol.Runtime.getProperties({
+ objectId: scopes.value.objectId
+ });
+ // Locals from generator.
+ let scope = result.find(scope => scope.value.description === 'Local (gen)');
+ ({result:{result}} = await Protocol.Runtime.getProperties({
+ objectId: scope.value.objectId
+ }));
+ InspectorTest.logMessage(result);
+ }
+]);
diff --git a/deps/v8/test/inspector/inspector.isolate b/deps/v8/test/inspector/inspector.isolate
index afce94b2e5..38a96c57c6 100644
--- a/deps/v8/test/inspector/inspector.isolate
+++ b/deps/v8/test/inspector/inspector.isolate
@@ -15,6 +15,7 @@
'./runtime/',
'./sessions/',
'./testcfg.py',
+ './type-profiler/',
'../../src/inspector/injected-script-source.js',
'<(PRODUCT_DIR)/inspector-test<(EXECUTABLE_SUFFIX)',
],
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index 4c1d81670f..8362dd4488 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -359,7 +359,7 @@ void IsolateData::SetCurrentTimeMS(double time) {
double IsolateData::currentTimeMS() {
if (current_time_set_) return current_time_;
- return v8::base::OS::TimeCurrentMillis();
+ return v8::internal::V8::GetCurrentPlatform()->CurrentClockTimeMillis();
}
void IsolateData::SetMemoryInfo(v8::Local<v8::Value> memory_info) {
diff --git a/deps/v8/test/inspector/protocol-test.js b/deps/v8/test/inspector/protocol-test.js
index 87910cc3e4..4ae96614dc 100644
--- a/deps/v8/test/inspector/protocol-test.js
+++ b/deps/v8/test/inspector/protocol-test.js
@@ -45,8 +45,8 @@ InspectorTest.logMessage = function(originalMessage) {
for (var key in object) {
if (nonStableFields.has(key))
object[key] = `<${key}>`;
- else if (typeof object[key] === "string" && object[key].match(/\d+:\d+:\d+:debug/))
- object[key] = object[key].replace(/\d+/, '<scriptId>');
+ else if (typeof object[key] === "string" && object[key].match(/\d+:\d+:\d+:\d+/))
+ object[key] = object[key].substring(0, object[key].lastIndexOf(':')) + ":<scriptId>";
else if (typeof object[key] === "object")
objects.push(object[key]);
}
@@ -277,6 +277,25 @@ InspectorTest.Session = class {
}
}
+ async logTypeProfile(typeProfile, source) {
+ let entries = typeProfile.entries;
+
+ // Sort in reverse order so we can replace entries without invalidating
+ // the other offsets.
+ entries = entries.sort((a, b) => b.offset - a.offset);
+
+ for (let entry of entries) {
+ source = source.slice(0, entry.offset) + typeAnnotation(entry.types) +
+ source.slice(entry.offset);
+ }
+ InspectorTest.log(source);
+ return typeProfile;
+
+ function typeAnnotation(types) {
+ return `/*${types.map(t => t.name).join(', ')}*/`;
+ }
+ }
+
logAsyncStackTrace(asyncStackTrace) {
while (asyncStackTrace) {
if (asyncStackTrace.promiseCreationFrame) {
diff --git a/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt b/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt
index ed6342996a..0cbf6f1c08 100644
--- a/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt
+++ b/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt
@@ -68,6 +68,24 @@ Running test: testExceptionInFunctionExpression
exceptionId : <exceptionId>
lineNumber : 0
scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 21
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 35
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
text : Uncaught
}
result : {
@@ -154,3 +172,24 @@ Running test: testFunctionReturnRejectedPromise
}
}
}
+
+Running test: testEvaluateOnExecutionContext
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 70
+ type : number
+ value : 70
+ }
+ }
+}
+
+Running test: testPassingBothObjectIdAndExecutionContextId
+{
+ error : {
+ code : -32000
+ message : ObjectId must not be specified together with executionContextId
+ }
+ id : <messageId>
+}
diff --git a/deps/v8/test/inspector/runtime/call-function-on-async.js b/deps/v8/test/inspector/runtime/call-function-on-async.js
index a08b0777a6..277a01c468 100644
--- a/deps/v8/test/inspector/runtime/call-function-on-async.js
+++ b/deps/v8/test/inspector/runtime/call-function-on-async.js
@@ -7,13 +7,22 @@ let callFunctionOn = Protocol.Runtime.callFunctionOn.bind(Protocol.Runtime);
let remoteObject1;
let remoteObject2;
+let executionContextId;
-InspectorTest.runAsyncTestSuite([
+Protocol.Runtime.enable();
+Protocol.Runtime.onExecutionContextCreated(messageObject => {
+ executionContextId = messageObject.params.context.id;
+ InspectorTest.runAsyncTestSuite(testSuite);
+});
+
+let testSuite = [
async function prepareTestSuite() {
let result = await Protocol.Runtime.evaluate({ expression: '({a : 1})' });
remoteObject1 = result.result.result;
result = await Protocol.Runtime.evaluate({ expression: '({a : 2})' });
remoteObject2 = result.result.result;
+
+ await Protocol.Runtime.evaluate({ expression: 'globalObjectProperty = 42;' });
},
async function testArguments() {
@@ -102,8 +111,31 @@ InspectorTest.runAsyncTestSuite([
generatePreview: false,
awaitPromise: true
}));
- }
-]);
+ },
+
+ async function testEvaluateOnExecutionContext() {
+ InspectorTest.logMessage(await callFunctionOn({
+ executionContextId,
+ functionDeclaration: '(function(arg) { return this.globalObjectProperty + arg; })',
+ arguments: prepareArguments([ 28 ]),
+ returnByValue: true,
+ generatePreview: false,
+ awaitPromise: false
+ }));
+ },
+
+ async function testPassingBothObjectIdAndExecutionContextId() {
+ InspectorTest.logMessage(await callFunctionOn({
+ executionContextId,
+ objectId: remoteObject1.objectId,
+ functionDeclaration: '(function() { return 42; })',
+ arguments: prepareArguments([]),
+ returnByValue: true,
+ generatePreview: false,
+ awaitPromise: false
+ }));
+ },
+];
function prepareArguments(args) {
return args.map(arg => {
diff --git a/deps/v8/test/inspector/runtime/command-line-api-expected.txt b/deps/v8/test/inspector/runtime/command-line-api-expected.txt
index f655e7b783..11e60d9535 100644
--- a/deps/v8/test/inspector/runtime/command-line-api-expected.txt
+++ b/deps/v8/test/inspector/runtime/command-line-api-expected.txt
@@ -255,12 +255,12 @@ Running test: testDebug
foo (:0:16)
(anonymous) (:0:0)
[
- [0] : <scriptId>:0:12:debug
+ [0] : 5:0:12:<scriptId>
]
foo (:0:16)
(anonymous) (:0:0)
[
- [0] : <scriptId>:0:12:debug
+ [0] : 5:0:12:<scriptId>
]
Running test: testMonitor
diff --git a/deps/v8/test/inspector/runtime/console-methods-expected.txt b/deps/v8/test/inspector/runtime/console-methods-expected.txt
index 100789a773..81c3c76813 100644
--- a/deps/v8/test/inspector/runtime/console-methods-expected.txt
+++ b/deps/v8/test/inspector/runtime/console-methods-expected.txt
@@ -317,6 +317,85 @@ Checks console methods
params : {
args : [
[0] : {
+ className : Array
+ description : Array(2)
+ objectId : <objectId>
+ preview : {
+ description : Array(2)
+ overflow : false
+ properties : [
+ [0] : {
+ name : 0
+ subtype : array
+ type : object
+ valuePreview : {
+ description : Array(2)
+ overflow : false
+ properties : [
+ [0] : {
+ name : 1
+ type : number
+ value : 2
+ }
+ ]
+ subtype : array
+ type : object
+ }
+ }
+ [1] : {
+ name : 1
+ subtype : array
+ type : object
+ valuePreview : {
+ description : Array(2)
+ overflow : false
+ properties : [
+ [0] : {
+ name : 1
+ type : number
+ value : 4
+ }
+ ]
+ subtype : array
+ type : object
+ }
+ }
+ ]
+ subtype : array
+ type : object
+ }
+ subtype : array
+ type : object
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName : testFunction
+ lineNumber : 17
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : table
+ }
+}
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
type : string
value : trace
}
@@ -327,7 +406,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 17
+ lineNumber : 18
scriptId : <scriptId>
url : test.js
}
@@ -359,7 +438,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 18
+ lineNumber : 19
scriptId : <scriptId>
url : test.js
}
@@ -391,7 +470,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 19
+ lineNumber : 20
scriptId : <scriptId>
url : test.js
}
@@ -423,7 +502,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 20
+ lineNumber : 21
scriptId : <scriptId>
url : test.js
}
@@ -455,7 +534,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 21
+ lineNumber : 22
scriptId : <scriptId>
url : test.js
}
@@ -487,7 +566,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 22
+ lineNumber : 23
scriptId : <scriptId>
url : test.js
}
@@ -519,7 +598,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 23
+ lineNumber : 24
scriptId : <scriptId>
url : test.js
}
@@ -551,7 +630,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 24
+ lineNumber : 25
scriptId : <scriptId>
url : test.js
}
@@ -583,14 +662,14 @@ Checks console methods
[0] : {
columnNumber : 12
functionName : foo
- lineNumber : 26
+ lineNumber : 27
scriptId : <scriptId>
url : test.js
}
[1] : {
columnNumber : 2
functionName : testFunction
- lineNumber : 28
+ lineNumber : 29
scriptId : <scriptId>
url : test.js
}
@@ -622,14 +701,14 @@ Checks console methods
[0] : {
columnNumber : 12
functionName : foo
- lineNumber : 26
+ lineNumber : 27
scriptId : <scriptId>
url : test.js
}
[1] : {
columnNumber : 2
functionName : testFunction
- lineNumber : 29
+ lineNumber : 30
scriptId : <scriptId>
url : test.js
}
diff --git a/deps/v8/test/inspector/runtime/console-methods.js b/deps/v8/test/inspector/runtime/console-methods.js
index 38ab5bd83f..c9e0e1d89b 100644
--- a/deps/v8/test/inspector/runtime/console-methods.js
+++ b/deps/v8/test/inspector/runtime/console-methods.js
@@ -4,6 +4,7 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Checks console methods');
+contextGroup.setupInjectedScriptEnvironment();
contextGroup.addScript(`
function testFunction() {
console.debug('debug');
@@ -14,6 +15,7 @@ function testFunction() {
console.dir('dir');
console.dirxml('dirxml');
console.table([[1,2],[3,4]]);
+ console.table([[1,2],[3,4]], [1,2]);
console.trace('trace');
console.trace();
console.group();
diff --git a/deps/v8/test/inspector/runtime/es6-module-expected.txt b/deps/v8/test/inspector/runtime/es6-module-expected.txt
index cbe63fe718..646fd018ea 100644
--- a/deps/v8/test/inspector/runtime/es6-module-expected.txt
+++ b/deps/v8/test/inspector/runtime/es6-module-expected.txt
@@ -117,6 +117,7 @@ console.log(239)
this : {
type : undefined
}
+ url : module3
}
]
hitBreakpoints : [
diff --git a/deps/v8/test/inspector/runtime/function-scopes-expected.txt b/deps/v8/test/inspector/runtime/function-scopes-expected.txt
new file mode 100644
index 0000000000..0503e31b82
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/function-scopes-expected.txt
@@ -0,0 +1,53 @@
+Checks [[Scopes]] for functions
+Catch:
+{
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : a
+ value : {
+ description : 1
+ type : number
+ value : 1
+ }
+ writable : true
+}
+With block:
+{
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : b
+ value : {
+ description : 2
+ type : number
+ value : 2
+ }
+ writable : true
+}
+Closure (closure):
+{
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : c
+ value : {
+ description : 3
+ type : number
+ value : 3
+ }
+ writable : true
+}
+Global:
+{
+ configurable : false
+ enumerable : true
+ isOwn : true
+ name : e
+ value : {
+ description : 5
+ type : number
+ value : 5
+ }
+ writable : true
+}
diff --git a/deps/v8/test/inspector/runtime/function-scopes.js b/deps/v8/test/inspector/runtime/function-scopes.js
new file mode 100644
index 0000000000..bda069bd9a
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/function-scopes.js
@@ -0,0 +1,56 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks [[Scopes]] for functions');
+
+contextGroup.addScript(`
+var f;
+try {
+ throw 1;
+} catch (a) {
+ with({b:2}) {
+ function closure() {
+ var c = 3;
+ function foo() {
+ var d = 4;
+ return a + b + c + d;
+ }
+ return foo;
+ }
+ f = closure;
+ }
+}
+var e = 5;
+//# sourceURL=test.js`);
+
+(async function test() {
+ let {result:{result:{objectId}}} = await Protocol.Runtime.evaluate({
+ expression: 'f()'
+ });
+ let {result:{internalProperties}} = await Protocol.Runtime.getProperties({
+ objectId
+ });
+ let scopes = internalProperties.find(prop => prop.name === '[[Scopes]]');
+ let {result:{result}} = await Protocol.Runtime.getProperties({
+ objectId: scopes.value.objectId
+ });
+ await Promise.all(result.map(async scope => {
+ scope.variables = (await Protocol.Runtime.getProperties({
+ objectId: scope.value.objectId
+ })).result.result;
+ }));
+ let catchScope = result.find(scope => scope.value.description === 'Catch');
+ InspectorTest.log('Catch:');
+ InspectorTest.logMessage(catchScope.variables.find(variable => variable.name === 'a'));
+ InspectorTest.log('With block:');
+ let withScope = result.find(scope => scope.value.description === 'With Block');
+ InspectorTest.logMessage(withScope.variables.find(variable => variable.name === 'b'));
+ InspectorTest.log('Closure (closure):');
+ let closureScope = result.find(scope => scope.value.description === 'Closure (closure)');
+ InspectorTest.logMessage(closureScope.variables.find(variable => variable.name === 'c'));
+ InspectorTest.log('Global:');
+ let globalScope = result.find(scope => scope.value.description === 'Global');
+ InspectorTest.logMessage(globalScope.variables.find(variable => variable.name === 'e'));
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/runtime/internal-properties-expected.txt b/deps/v8/test/inspector/runtime/internal-properties-expected.txt
index 10c2f83b7e..a44c50b673 100644
--- a/deps/v8/test/inspector/runtime/internal-properties-expected.txt
+++ b/deps/v8/test/inspector/runtime/internal-properties-expected.txt
@@ -387,6 +387,16 @@ expression: gen2
}
}
}
+ [4] : {
+ name : [[Scopes]]
+ value : {
+ className : Array
+ description : Scopes[2]
+ objectId : <objectId>
+ subtype : internal#scopeList
+ type : object
+ }
+ }
]
}
}
@@ -433,6 +443,16 @@ expression: gen2.next();gen2
}
}
}
+ [4] : {
+ name : [[Scopes]]
+ value : {
+ className : Array
+ description : Scopes[2]
+ objectId : <objectId>
+ subtype : internal#scopeList
+ type : object
+ }
+ }
]
}
}
diff --git a/deps/v8/test/inspector/sessions/debugger-stepping-and-breakpoints-expected.txt b/deps/v8/test/inspector/sessions/debugger-stepping-and-breakpoints-expected.txt
index 2b7cc167e2..b81fde9f35 100644
--- a/deps/v8/test/inspector/sessions/debugger-stepping-and-breakpoints-expected.txt
+++ b/deps/v8/test/inspector/sessions/debugger-stepping-and-breakpoints-expected.txt
@@ -20,12 +20,12 @@ Setting breakpoints in 2
Evaluating common breakpoint in 1
Paused in 1:
reason: other
- hit breakpoints: test.js:11:0
+ hit breakpoints: 1:11:0:test.js
location: foo@11
data: null
Paused in 2:
reason: other
- hit breakpoints: test.js:11:0
+ hit breakpoints: 1:11:0:test.js
location: foo@11
data: null
Resuming in 1
@@ -48,7 +48,7 @@ Resumed in 2
Evaluating exclusive breakpoint in 1
Paused in 1:
reason: other
- hit breakpoints: test.js:14:0
+ hit breakpoints: 1:14:0:test.js
location: baz@14
data: null
Paused in 2:
@@ -62,12 +62,12 @@ Resumed in 2
Evaluating common breakpoint in 2
Paused in 1:
reason: other
- hit breakpoints: test.js:11:0
+ hit breakpoints: 1:11:0:test.js
location: foo@11
data: null
Paused in 2:
reason: other
- hit breakpoints: test.js:11:0
+ hit breakpoints: 1:11:0:test.js
location: foo@11
data: null
Resuming in 2
@@ -90,7 +90,7 @@ Resumed in 2
Evaluating exclusive breakpoint in 2
Paused in 1:
reason: other
- hit breakpoints: test.js:14:0
+ hit breakpoints: 1:14:0:test.js
location: baz@14
data: null
Paused in 2:
@@ -197,7 +197,7 @@ Skipping pauses in 1
Evaluating common breakpoint in 1
Paused in 2:
reason: other
- hit breakpoints: test.js:11:0
+ hit breakpoints: 1:11:0:test.js
location: foo@11
data: null
Resuming in 2
@@ -210,7 +210,7 @@ Deactivating breakpoints in 1
Evaluating common breakpoint in 1
Paused in 2:
reason: other
- hit breakpoints: test.js:11:0
+ hit breakpoints: 1:11:0:test.js
location: foo@11
data: null
Resuming in 2
diff --git a/deps/v8/test/inspector/task-runner.cc b/deps/v8/test/inspector/task-runner.cc
index 4128a86cff..921317d0c0 100644
--- a/deps/v8/test/inspector/task-runner.cc
+++ b/deps/v8/test/inspector/task-runner.cc
@@ -78,7 +78,7 @@ void TaskRunner::RunMessageLoop(bool only_protocol) {
}
void TaskRunner::QuitMessageLoop() {
- DCHECK(nested_loop_count_ > 0);
+ DCHECK_LT(0, nested_loop_count_);
--nested_loop_count_;
}
diff --git a/deps/v8/test/inspector/type-profiler/type-profile-disable-expected.txt b/deps/v8/test/inspector/type-profiler/type-profile-disable-expected.txt
new file mode 100644
index 0000000000..b70acbac0e
--- /dev/null
+++ b/deps/v8/test/inspector/type-profiler/type-profile-disable-expected.txt
@@ -0,0 +1,9 @@
+Turn Profiler.startTypeProfile on and off.
+
+function g(/*Object*/a, /*Array*/b, /*null*/c) {
+ return 'bye';
+/*string*/};
+g({}, [], null);
+
+[
+] \ No newline at end of file
diff --git a/deps/v8/test/inspector/type-profiler/type-profile-disable.js b/deps/v8/test/inspector/type-profiler/type-profile-disable.js
new file mode 100644
index 0000000000..38a3c6fa9b
--- /dev/null
+++ b/deps/v8/test/inspector/type-profiler/type-profile-disable.js
@@ -0,0 +1,47 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --type-profile
+
+const source =
+ `
+function g(a, b, c) {
+ return 'bye';
+};
+g({}, [], null);
+`;
+
+let {session, contextGroup, Protocol} = InspectorTest.start("Turn " +
+ "Profiler.startTypeProfile on and off.");
+
+(async function testTypeProfile() {
+ Protocol.Runtime.enable();
+ let {result: {scriptId}} = await Protocol.Runtime.compileScript({
+ expression: source,
+ sourceURL: arguments.callee.name, persistScript: true
+ });
+ await Protocol.Profiler.enable();
+ // Start, run, take.
+ await Protocol.Profiler.startTypeProfile();
+ Protocol.Runtime.runScript({scriptId});
+
+ let typeProfiles = await Protocol.Profiler.takeTypeProfile();
+ session.logTypeProfile(typeProfiles.result.result[0],
+ source);
+
+ // This should delete all data.
+ Protocol.Profiler.stopTypeProfile();
+
+ await Protocol.Profiler.startTypeProfile();
+ typeProfiles = await Protocol.Profiler.takeTypeProfile();
+
+ // Should be empty because no code was run since start.
+ InspectorTest.logMessage(typeProfiles.result.result);
+
+ Protocol.Profiler.stopTypeProfile();
+
+ Protocol.Profiler.disable();
+ await Protocol.Runtime.disable();
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/type-profiler/type-profile-expected.txt b/deps/v8/test/inspector/type-profiler/type-profile-expected.txt
new file mode 100644
index 0000000000..8001d8278e
--- /dev/null
+++ b/deps/v8/test/inspector/type-profiler/type-profile-expected.txt
@@ -0,0 +1,8 @@
+Test collecting type profile data with Profiler.takeTypeProfile.
+
+function f(/*Object, number, undefined*/a, /*Array, number, null*/b, /*boolean, Object, symbol*/c) {
+ return 'bye';
+/*string*/};
+f({}, [], true);
+f(3, 2.3, {a: 42});
+f(undefined, null, Symbol('hello'));/*string*/
diff --git a/deps/v8/test/inspector/type-profiler/type-profile-start-stop-expected.txt b/deps/v8/test/inspector/type-profiler/type-profile-start-stop-expected.txt
new file mode 100644
index 0000000000..0db7882562
--- /dev/null
+++ b/deps/v8/test/inspector/type-profiler/type-profile-start-stop-expected.txt
@@ -0,0 +1,49 @@
+Turn Profiler.startTypeProfile on and off.
+
+Running test: testTypeProfile
+
+function g(/*Object*/a, /*Array*/b, /*null*/c) {
+ return 'first';
+/*string*/};
+g({}, [], null);
+
+
+Running test: testTypeProfileFromDifferentSource
+
+function f(/*null*/a) {
+ return 'second';
+/*string*/};
+f(null);
+
+
+Running test: testStopTypeProfileDeletesFeedback
+[
+]
+
+Running test: testTypeProfileWithoutStartingItFirst
+Type profile has not been started.
+
+Running test: testTypeProfileAfterStoppingIt
+Type profile has not been started.
+
+Running test: testStartTypeProfileAfterRunning
+{
+ id : <messageId>
+ result : {
+ result : [
+ ]
+ }
+}
+
+Running test: testTypeProfileForTwoSources
+
+function g(/*Object*/a, /*Array*/b, /*null*/c) {
+ return 'first';
+/*string*/};
+g({}, [], null);
+
+
+function f(/*null*/a) {
+ return 'second';
+/*string*/};
+f(null);
diff --git a/deps/v8/test/inspector/type-profiler/type-profile-start-stop.js b/deps/v8/test/inspector/type-profiler/type-profile-start-stop.js
new file mode 100644
index 0000000000..dad9874ff7
--- /dev/null
+++ b/deps/v8/test/inspector/type-profiler/type-profile-start-stop.js
@@ -0,0 +1,170 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --type-profile
+
+const source1 =
+ `
+function g(a, b, c) {
+ return 'first';
+};
+g({}, [], null);
+`;
+
+const source2 =
+ `
+function f(a) {
+ return 'second';
+};
+f(null);
+`;
+
+let {session, contextGroup, Protocol} = InspectorTest.start("Turn " +
+ "Profiler.startTypeProfile on and off.");
+
+InspectorTest.runAsyncTestSuite([
+ async function testTypeProfile() {
+ Protocol.Runtime.enable();
+ let {result: {scriptId}} = await Protocol.Runtime.compileScript({
+ expression: source1,
+ sourceURL: arguments.callee.name, persistScript: true
+ });
+ await Protocol.Profiler.enable();
+
+ // Start, run, take.
+ await Protocol.Profiler.startTypeProfile();
+ Protocol.Runtime.runScript({scriptId});
+
+ let typeProfiles = await Protocol.Profiler.takeTypeProfile();
+ await session.logTypeProfile(typeProfiles.result.result[0],
+ source1);
+
+ Protocol.Profiler.stopTypeProfile();
+ Protocol.Profiler.disable();
+ await Protocol.Runtime.disable();
+ },
+ async function testTypeProfileFromDifferentSource() {
+ Protocol.Runtime.enable();
+ let {result: {scriptId}} = await Protocol.Runtime.compileScript({
+ expression: source2,
+ sourceURL: arguments.callee.name, persistScript: true
+ });
+ await Protocol.Profiler.enable();
+
+ // Start, run different script, take.
+ await Protocol.Profiler.startTypeProfile();
+ Protocol.Runtime.runScript({scriptId});
+
+ let typeProfiles = await Protocol.Profiler.takeTypeProfile();
+ await session.logTypeProfile(typeProfiles.result.result[0],
+ source2);
+
+ Protocol.Profiler.stopTypeProfile();
+ Protocol.Profiler.disable();
+ await Protocol.Runtime.disable();
+ },
+ async function testStopTypeProfileDeletesFeedback() {
+ Protocol.Runtime.enable();
+ let {result: {scriptId}} = await Protocol.Runtime.compileScript({
+ expression: source1,
+ sourceURL: arguments.callee.name, persistScript: true
+ });
+ await Protocol.Profiler.enable();
+
+ // Start, run, stop.
+ await Protocol.Profiler.startTypeProfile();
+ Protocol.Runtime.runScript({scriptId});
+ await Protocol.Profiler.stopTypeProfile();
+
+ // Start, take. Should be empty, because no code was run.
+ await Protocol.Profiler.startTypeProfile();
+ let typeProfiles = await Protocol.Profiler.takeTypeProfile();
+ InspectorTest.logMessage(typeProfiles.result.result);
+ await Protocol.Profiler.stopTypeProfile();
+
+ Protocol.Profiler.disable();
+ await Protocol.Runtime.disable();
+ },
+ async function testTypeProfileWithoutStartingItFirst() {
+ Protocol.Runtime.enable();
+ let {result: {scriptId}} = await Protocol.Runtime.compileScript({ expression: source1,
+ sourceURL: arguments.callee.name, persistScript: true });
+ Protocol.Runtime.runScript({ scriptId });
+ await Protocol.Profiler.enable();
+
+ // This should return an error because type profile was never started.
+ let typeProfiles = await Protocol.Profiler.takeTypeProfile();
+ InspectorTest.logObject(typeProfiles.error.message);
+
+ Protocol.Profiler.disable();
+ await Protocol.Runtime.disable();
+ },
+ async function testTypeProfileAfterStoppingIt() {
+ Protocol.Runtime.enable();
+ let {result: {scriptId}} = await Protocol.Runtime.compileScript({ expression: source1,
+ sourceURL: arguments.callee.name, persistScript: true });
+ Protocol.Runtime.runScript({ scriptId });
+ await Protocol.Profiler.enable();
+ await Protocol.Profiler.startTypeProfile();
+
+ // Make sure that this turns off type profile.
+ await Protocol.Profiler.stopTypeProfile();
+
+ // This should return an error because type profile was stopped.
+ let typeProfiles = await Protocol.Profiler.takeTypeProfile();
+ InspectorTest.logObject(typeProfiles.error.message);
+
+ Protocol.Profiler.disable();
+ await Protocol.Runtime.disable();
+ },
+ async function testStartTypeProfileAfterRunning() {
+ Protocol.Runtime.enable();
+ let {result: {scriptId}} = await Protocol.Runtime.compileScript({
+ expression: source1,
+ sourceURL: arguments.callee.name, persistScript: true
+ });
+ Protocol.Runtime.runScript({scriptId});
+
+ await Protocol.Profiler.enable();
+ await Protocol.Profiler.startTypeProfile();
+
+ let typeProfiles = await Protocol.Profiler.takeTypeProfile();
+
+ // This should be empty because type profile was started after compilation.
+ // Only the outer script is annotated with return value "string" because
+ // that does not depend on runScript().
+ InspectorTest.logMessage(typeProfiles);
+
+ Protocol.Profiler.stopTypeProfile();
+ Protocol.Profiler.disable();
+ await Protocol.Runtime.disable();
+ },
+ async function testTypeProfileForTwoSources() {
+ Protocol.Runtime.enable();
+ let {result: {scriptId: scriptId1}} = await Protocol.Runtime.compileScript({
+ expression: source1,
+ sourceURL: arguments.callee.name, persistScript: true
+ });
+ let {result: {scriptId: scriptId2}} = await Protocol.Runtime.compileScript({
+ expression: source2,
+ sourceURL: arguments.callee.name, persistScript: true
+ });
+ await Protocol.Profiler.enable();
+
+ // Start, run different script, take.
+ await Protocol.Profiler.startTypeProfile();
+ Protocol.Runtime.runScript({scriptId: scriptId1});
+ Protocol.Runtime.runScript({scriptId: scriptId2});
+
+ let typeProfiles = await Protocol.Profiler.takeTypeProfile();
+ await session.logTypeProfile(typeProfiles.result.result[0],
+ source1);
+ await session.logTypeProfile(typeProfiles.result.result[1],
+ source2);
+
+ Protocol.Profiler.stopTypeProfile();
+ Protocol.Profiler.disable();
+ await Protocol.Runtime.disable();
+ }
+]);
diff --git a/deps/v8/test/inspector/type-profiler/type-profile-with-classes-expected.txt b/deps/v8/test/inspector/type-profiler/type-profile-with-classes-expected.txt
new file mode 100644
index 0000000000..3c1cb363d7
--- /dev/null
+++ b/deps/v8/test/inspector/type-profiler/type-profile-with-classes-expected.txt
@@ -0,0 +1,15 @@
+Test collecting type profile data with Profiler.takeTypeProfile.
+
+function f(/*number*/n) {
+/*undefined*/};
+f(5);
+function g(/*Object, number*/a, /*Array, number*/b, /*Flower, Object*/c) {
+ return 'bye';
+/*string*/};
+/*undefined*/class Tree {};
+/*Flower*/class Flower extends Tree{};
+var f = new Flower();
+f.constructor = {};
+f.constructor.name = "Not a flower.";
+g({}, [], f);
+g(3, 2.3, {a: 42});/*string*/
diff --git a/deps/v8/test/inspector/type-profiler/type-profile-with-classes.js b/deps/v8/test/inspector/type-profiler/type-profile-with-classes.js
new file mode 100644
index 0000000000..b697ebfd3f
--- /dev/null
+++ b/deps/v8/test/inspector/type-profiler/type-profile-with-classes.js
@@ -0,0 +1,43 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --type-profile
+
+const source =
+ `
+function f(n) {
+};
+f(5);
+function g(a, b, c) {
+ return 'bye';
+};
+class Tree {};
+class Flower extends Tree{};
+var f = new Flower();
+f.constructor = {};
+f.constructor.name = "Not a flower.";
+g({}, [], f);
+g(3, 2.3, {a: 42});
+`;
+
+let {session, contextGroup, Protocol} = InspectorTest.start("Test collecting type profile data with Profiler.takeTypeProfile.");
+
+(async function testTypeProfile(next) {
+ await Protocol.Profiler.enable();
+ await Protocol.Profiler.startTypeProfile();
+
+ Protocol.Runtime.enable();
+ let {result: {scriptId}} = await Protocol.Runtime.compileScript({ expression: source,
+ sourceURL: arguments.callee.name, persistScript: true });
+
+ Protocol.Runtime.runScript({ scriptId });
+
+ let typeProfiles = await Protocol.Profiler.takeTypeProfile();
+ await session.logTypeProfile(typeProfiles.result.result[0],
+ source);
+
+ Protocol.Profiler.disable();
+ await Protocol.Runtime.disable();
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/type-profiler/type-profile-with-to-string-tag-expected.txt b/deps/v8/test/inspector/type-profiler/type-profile-with-to-string-tag-expected.txt
new file mode 100644
index 0000000000..fdba16517e
--- /dev/null
+++ b/deps/v8/test/inspector/type-profiler/type-profile-with-to-string-tag-expected.txt
@@ -0,0 +1,16 @@
+Test collecting type profile data with Profiler.takeTypeProfile.
+
+function g(/*Object, number*/a, /*Array, number*/b, /*Dog, Object*/c) {
+ return 'bye';
+/*string*/};
+/*undefined*/class Tree {};
+/*Flower*/class Flower extends Tree{};
+var f = new Flower();
+// We store the type when a variable is used. If a toStringTag is
+// changes the type, we want to collect that changed feedback.
+// This tests ensures that we collect that information rather than
+// for example infer the types from the internal map, which wouldn't
+// know about a toStringTag.
+f[Symbol.toStringTag] = 'Dog';
+g({}, [], f);
+g(3, 2.3, {a: 42});/*string*/
diff --git a/deps/v8/test/inspector/type-profiler/type-profile-with-to-string-tag.js b/deps/v8/test/inspector/type-profiler/type-profile-with-to-string-tag.js
new file mode 100644
index 0000000000..654f18afc5
--- /dev/null
+++ b/deps/v8/test/inspector/type-profiler/type-profile-with-to-string-tag.js
@@ -0,0 +1,46 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --type-profile
+
+const source =
+ `
+function g(a, b, c) {
+ return 'bye';
+};
+class Tree {};
+class Flower extends Tree{};
+var f = new Flower();
+// We store the type when a variable is used. If a toStringTag is
+// changes the type, we want to collect that changed feedback.
+// This tests ensures that we collect that information rather than
+// for example infer the types from the internal map, which wouldn't
+// know about a toStringTag.
+f[Symbol.toStringTag] = 'Dog';
+g({}, [], f);
+g(3, 2.3, {a: 42});
+`;
+
+let {session, contextGroup, Protocol} = InspectorTest.start("Test collecting " +
+ "type profile data with Profiler.takeTypeProfile.");
+
+(async function testTypeProfile() {
+ await Protocol.Profiler.enable();
+ await Protocol.Profiler.startTypeProfile();
+
+ Protocol.Runtime.enable();
+ let {result: {scriptId}} = await Protocol.Runtime.compileScript({ expression: source,
+ sourceURL: arguments.callee.name, persistScript: true });
+ Protocol.Runtime.runScript({ scriptId });
+ await Protocol.Profiler.startTypeProfile();
+
+ let typeProfiles = await Protocol.Profiler.takeTypeProfile();
+ await session.logTypeProfile(typeProfiles.result.result[0],
+ source);
+
+ Protocol.Profiler.stopTypeProfile();
+ Protocol.Profiler.disable();
+ await Protocol.Runtime.disable();
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/type-profiler/type-profile.js b/deps/v8/test/inspector/type-profiler/type-profile.js
new file mode 100644
index 0000000000..e912a4b940
--- /dev/null
+++ b/deps/v8/test/inspector/type-profiler/type-profile.js
@@ -0,0 +1,39 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --type-profile
+
+const source =
+ `
+function f(a, b, c) {
+ return 'bye';
+};
+f({}, [], true);
+f(3, 2.3, {a: 42});
+f(undefined, null, Symbol('hello'));
+`;
+
+let {session, contextGroup, Protocol} = InspectorTest.start("Test collecting type profile data with Profiler.takeTypeProfile.");
+
+(async function testTypeProfile() {
+ await Protocol.Profiler.enable();
+ await Protocol.Profiler.startTypeProfile();
+
+ Protocol.Runtime.enable();
+ let {result: {scriptId}} = await Protocol.Runtime.compileScript({
+ expression: source,
+ sourceURL: arguments.callee.name,
+ persistScript: true
+ });
+ Protocol.Runtime.runScript({ scriptId });
+
+ let typeProfiles = await Protocol.Profiler.takeTypeProfile();
+ await session.logTypeProfile(typeProfiles.result.result[0],
+ source);
+
+ Protocol.Profiler.stoptTypeProfile();
+ Protocol.Profiler.disable();
+ await Protocol.Runtime.disable();
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/js-perf-test/Array/filter.js b/deps/v8/test/js-perf-test/Array/filter.js
index ccaef0e5c9..94ba2651ec 100644
--- a/deps/v8/test/js-perf-test/Array/filter.js
+++ b/deps/v8/test/js-perf-test/Array/filter.js
@@ -13,7 +13,7 @@ benchy('NaiveFilterReplacement', NaiveFilter, NaiveFilterSetup);
benchy('DoubleFilter', DoubleFilter, DoubleFilterSetup);
benchy('SmiFilter', SmiFilter, SmiFilterSetup);
benchy('FastFilter', FastFilter, FastFilterSetup);
-benchy('ObjectFilter', GenericFilter, ObjectFilterSetup);
+benchy('GenericFilter', GenericFilter, ObjectFilterSetup);
benchy('OptFastFilter', OptFastFilter, FastFilterSetup);
var array;
diff --git a/deps/v8/test/js-perf-test/Array/join.js b/deps/v8/test/js-perf-test/Array/join.js
index 59eaa24285..7330ae0459 100644
--- a/deps/v8/test/js-perf-test/Array/join.js
+++ b/deps/v8/test/js-perf-test/Array/join.js
@@ -10,7 +10,7 @@ function benchy(name, test, testSetup) {
}
benchy('SmiJoin', SmiJoin, SmiJoinSetup);
-benchy('SmiJoin', SmiJoin, SmiJoinSetup);
+benchy('StringJoin', StringJoin, StringJoinSetup);
benchy('SparseSmiJoin', SparseSmiJoin, SparseSmiJoinSetup);
benchy('SparseStringJoin', SparseStringJoin, SparseStringJoinSetup);
diff --git a/deps/v8/test/js-perf-test/Array/map.js b/deps/v8/test/js-perf-test/Array/map.js
index 33cd79af59..f4ab95b065 100644
--- a/deps/v8/test/js-perf-test/Array/map.js
+++ b/deps/v8/test/js-perf-test/Array/map.js
@@ -15,7 +15,7 @@ benchy('SmallSmiToDoubleMap', SmiMap, SmiToDoubleMapSetup);
benchy('SmallSmiToFastMap', SmiMap, SmiToFastMapSetup);
benchy('SmiMap', SmiMap, SmiMapSetup);
benchy('FastMap', FastMap, FastMapSetup);
-benchy('ObjectMap', GenericMap, ObjectMapSetup);
+benchy('GenericMap', GenericMap, ObjectMapSetup);
benchy('OptFastMap', OptFastMap, FastMapSetup);
var array;
diff --git a/deps/v8/test/js-perf-test/Array/to-string.js b/deps/v8/test/js-perf-test/Array/to-string.js
index 6bfb661a36..c6a66d726b 100644
--- a/deps/v8/test/js-perf-test/Array/to-string.js
+++ b/deps/v8/test/js-perf-test/Array/to-string.js
@@ -10,7 +10,7 @@ function benchy(name, test, testSetup) {
}
benchy('SmiToString', SmiToString, SmiToStringSetup);
-benchy('SmiToString', SmiToString, SmiToStringSetup);
+benchy('StringToString', StringToString, StringToStringSetup);
benchy('SparseSmiToString', SparseSmiToString, SparseSmiToStringSetup);
benchy('SparseStringToString', SparseStringToString, SparseStringToStringSetup);
diff --git a/deps/v8/test/js-perf-test/JSTests.json b/deps/v8/test/js-perf-test/JSTests.json
index 04aade4f45..420421099a 100644
--- a/deps/v8/test/js-perf-test/JSTests.json
+++ b/deps/v8/test/js-perf-test/JSTests.json
@@ -30,7 +30,8 @@
{"name": "SetIndexWithoutTrap"},
{"name": "SetIndexWithTrap"},
{"name": "SetSymbolWithoutTrap"},
- {"name": "SetSymbolWithTrap"}
+ {"name": "SetSymbolWithTrap"},
+ {"name": "HasInIdiom"}
]
},
{
@@ -385,42 +386,38 @@
{"name": "DoubleFilter"},
{"name": "SmiFilter"},
{"name": "FastFilter"},
- {"name": "ObjectFilter"},
+ {"name": "GenericFilter"},
{"name": "OptFastFilter"},
{"name": "NaiveMapReplacement"},
{"name": "DoubleMap"},
{"name": "SmiMap"},
{"name": "FastMap"},
- {"name": "ObjectMap"},
- {"name": "OptFastMap"},
+ {"name": "GenericMap"},
+ {"name": "OptFastMap"},
{"name": "DoubleEvery"},
{"name": "SmiEvery"},
{"name": "FastEvery"},
- {"name": "ObjectEvery"},
- {"name": "OptFastEvery"},
- {"name": "SmiJoin"},
- {"name": "StringJoin"},
- {"name": "SparseSmiJoin"},
- {"name": "SparseStringJoin"},
+ {"name": "OptFastEvery"},
+ {"name": "SmiJoin"},
+ {"name": "StringJoin"},
+ {"name": "SparseSmiJoin"},
+ {"name": "SparseStringJoin"},
{"name": "DoubleSome"},
{"name": "SmiSome"},
{"name": "FastSome"},
- {"name": "ObjectSome"},
- {"name": "OptFastSome"},
+ {"name": "OptFastSome"},
{"name": "DoubleReduce"},
{"name": "SmiReduce"},
{"name": "FastReduce"},
- {"name": "ObjectReduce"},
- {"name": "OptFastReduce"},
+ {"name": "OptFastReduce"},
{"name": "DoubleReduceRight"},
{"name": "SmiReduceRight"},
{"name": "FastReduceRight"},
- {"name": "ObjectReduceRight"},
- {"name": "OptFastReduceRight"},
- {"name": "SmiToString"},
- {"name": "StringToString"},
- {"name": "SparseSmiToString"},
- {"name": "SparseStringToString"}
+ {"name": "OptFastReduceRight"},
+ {"name": "SmiToString"},
+ {"name": "StringToString"},
+ {"name": "SparseSmiToString"},
+ {"name": "SparseStringToString"}
]
},
{
@@ -612,6 +609,19 @@
{"name": "Runtime.evaluate(String16Cstor)"},
{"name": "Debugger.getPossibleBreakpoints"}
]
+ },
+ {
+ "name": "Parsing",
+ "path": ["Parsing"],
+ "main": "run.js",
+ "flags": ["--no-compilation-cache", "--allow-natives-syntax"],
+ "resources": [ "comments.js"],
+ "results_regexp": "^%s\\-Parsing\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "OneLineComment"},
+ {"name": "OneLineComments"},
+ {"name": "MultiLineComment"}
+ ]
}
]
}
diff --git a/deps/v8/test/js-perf-test/Parsing/comments.js b/deps/v8/test/js-perf-test/Parsing/comments.js
new file mode 100644
index 0000000000..e5dbab1c75
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Parsing/comments.js
@@ -0,0 +1,41 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const iterations = 100;
+
+new BenchmarkSuite('OneLineComment', [1000], [
+ new Benchmark('OneLineComment', false, true, iterations, Run, OneLineCommentSetup)
+]);
+
+new BenchmarkSuite('OneLineComments', [1000], [
+ new Benchmark('OneLineComments', false, true, iterations, Run, OneLineCommentsSetup)
+]);
+
+new BenchmarkSuite('MultiLineComment', [1000], [
+ new Benchmark('MultiLineComment', false, true, iterations, Run, MultiLineCommentSetup)
+]);
+
+let code;
+
+function OneLineCommentSetup() {
+ code = "//" + " This is a comment... ".repeat(600);
+ %FlattenString(code);
+}
+
+function OneLineCommentsSetup() {
+ code = "// This is a comment.\n".repeat(600);
+ %FlattenString(code);
+}
+
+function MultiLineCommentSetup() {
+ code = "/*" + " This is a comment... ".repeat(600) + "*/";
+ %FlattenString(code);
+}
+
+function Run() {
+ if (code == undefined) {
+ throw new Error("No test data");
+ }
+ eval(code);
+}
diff --git a/deps/v8/test/js-perf-test/Parsing/run.js b/deps/v8/test/js-perf-test/Parsing/run.js
new file mode 100644
index 0000000000..e6531af5bb
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Parsing/run.js
@@ -0,0 +1,27 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+load('../base.js');
+
+load('comments.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-Parsing(Score): ' + result);
+}
+
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError });
diff --git a/deps/v8/test/js-perf-test/Proxies/proxies.js b/deps/v8/test/js-perf-test/Proxies/proxies.js
index 6bb56bd97d..79d064e931 100644
--- a/deps/v8/test/js-perf-test/Proxies/proxies.js
+++ b/deps/v8/test/js-perf-test/Proxies/proxies.js
@@ -463,3 +463,43 @@ newBenchmark("SetSymbolWithTrap", {
return value === SOME_OTHER_NUMBER;
}
});
+
+// ----------------------------------------------------------------------------
+
+var obj20prop = {};
+var measured;
+
+newBenchmark("HasInIdiom", {
+ setup() {
+ for (var i = 0; i < 20; ++i) {
+ obj20prop['prop' + i] = SOME_NUMBER;
+ }
+ p = new Proxy(obj20prop, {
+ has: function(target, propertyKey) {
+ return true;
+ },
+ get: function(target, propertyKey, receiver) {
+ if (typeof propertyKey == 'string' && propertyKey.match('prop'))
+ return SOME_NUMBER;
+ else
+ return Reflect.get(target, propertyKey, receiver);
+ },
+ });
+ measured = function measured(o) {
+ var result = 0;
+ for (var x in o) {
+ if (Object.prototype.hasOwnProperty(o, x)) {
+ var v = o[x];
+ result += v;
+ }
+ }
+ return result;
+ }
+ },
+ run() {
+ result = measured(p);
+ },
+ teardown() {
+ return result === 20 * SOME_NUMBER;
+ }
+});
diff --git a/deps/v8/test/memory/Memory.json b/deps/v8/test/memory/Memory.json
index c6b8cd50a2..a3c8dbc0d5 100644
--- a/deps/v8/test/memory/Memory.json
+++ b/deps/v8/test/memory/Memory.json
@@ -19,6 +19,10 @@
"results_regexp": "(\\d+) bytes for startup$"
},
{
+ "name": "SnapshotSizeBuiltins",
+ "results_regexp": "(\\d+) bytes for builtins$"
+ },
+ {
"name": "SnapshotSizeContext",
"results_regexp": "(\\d+) bytes for context #0$"
}
diff --git a/deps/v8/test/message/regress/regress-crbug-691194.out b/deps/v8/test/message/regress/regress-crbug-691194.out
index 6d7db0f535..43453900db 100644
--- a/deps/v8/test/message/regress/regress-crbug-691194.out
+++ b/deps/v8/test/message/regress/regress-crbug-691194.out
@@ -6,5 +6,5 @@
"foo".repeat(1 << 30)
^
RangeError: Invalid string length
- at String.repeat (native)
+ at String.repeat (<anonymous>)
at *%(basename)s:5:7
diff --git a/deps/v8/test/message/settimeout.js b/deps/v8/test/message/settimeout.js
new file mode 100644
index 0000000000..59ed1c6517
--- /dev/null
+++ b/deps/v8/test/message/settimeout.js
@@ -0,0 +1,26 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-stress-opt
+
+var state = 0;
+function inc() {
+ console.log("increment state");
+ state++;
+}
+
+function repeat() {
+ console.log("current state: " + state);
+ if (state < 3) {
+ setTimeout(inc, 0);
+ setTimeout(repeat, 0);
+ } else {
+ setTimeout(function() { throw new Error(); });
+ }
+}
+
+setTimeout(inc, 0);
+console.log("state: " + state);
+setTimeout(repeat, 0);
+console.log("state: " + state);
diff --git a/deps/v8/test/message/settimeout.out b/deps/v8/test/message/settimeout.out
new file mode 100644
index 0000000000..7951436fdf
--- /dev/null
+++ b/deps/v8/test/message/settimeout.out
@@ -0,0 +1,13 @@
+state: 0
+state: 0
+increment state
+current state: 1
+increment state
+current state: 2
+increment state
+current state: 3
+*%(basename)s:19: Error
+ setTimeout(function() { throw new Error(); });
+ ^
+Error
+ at *%(basename)s:19:35
diff --git a/deps/v8/test/message/typedarray.out b/deps/v8/test/message/typedarray.out
index 038e49d52c..908dd7fa5e 100644
--- a/deps/v8/test/message/typedarray.out
+++ b/deps/v8/test/message/typedarray.out
@@ -1,9 +1,9 @@
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-*%(basename)s:6: RangeError: Start offset is negative
+*%(basename)s:6: RangeError: offset is out of bounds
a.set([2], -1);
^
-RangeError: Start offset is negative
- at Uint8Array.set (native)
+RangeError: offset is out of bounds
+ at Uint8Array.set (<anonymous>)
at *%(basename)s:6:3 \ No newline at end of file
diff --git a/deps/v8/test/message/wasm-trace-memory-interpreted.js b/deps/v8/test/message/wasm-trace-memory-interpreted.js
new file mode 100644
index 0000000000..75f42e82e6
--- /dev/null
+++ b/deps/v8/test/message/wasm-trace-memory-interpreted.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-stress-opt --expose-wasm --wasm-trace-memory --wasm-interpret-all
+
+load("test/message/wasm-trace-memory.js");
diff --git a/deps/v8/test/message/wasm-trace-memory-interpreted.out b/deps/v8/test/message/wasm-trace-memory-interpreted.out
new file mode 100644
index 0000000000..6854727885
--- /dev/null
+++ b/deps/v8/test/message/wasm-trace-memory-interpreted.out
@@ -0,0 +1,9 @@
+I 0+0x3 read @00000004 i32:0 / 00000000
+I 1+0x3 read @00000001 i8:0 / 00
+I 3+0x5 store @00000004 i32:305419896 / 12345678
+I 0+0x3 read @00000002 i32:1450704896 / 56780000
+I 1+0x3 read @00000006 i8:52 / 34
+I 2+0x3 read @00000002 f32:68169720922112.000000 / 56780000
+I 4+0x5 store @00000004 i8:171 / ab
+I 0+0x3 read @00000002 i32:1454047232 / 56ab0000
+I 2+0x3 read @00000002 f32:94008244174848.000000 / 56ab0000
diff --git a/deps/v8/test/message/wasm-trace-memory.js b/deps/v8/test/message/wasm-trace-memory.js
new file mode 100644
index 0000000000..6c33b900b1
--- /dev/null
+++ b/deps/v8/test/message/wasm-trace-memory.js
@@ -0,0 +1,37 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-stress-opt --expose-wasm --wasm-trace-memory
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+var builder = new WasmModuleBuilder();
+builder.addMemory(1);
+builder.addFunction('load', kSig_v_i)
+ .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0, kExprDrop])
+ .exportFunc();
+builder.addFunction('load8', kSig_v_i)
+ .addBody([kExprGetLocal, 0, kExprI32LoadMem8U, 0, 0, kExprDrop])
+ .exportFunc();
+builder.addFunction('loadf', kSig_v_i)
+ .addBody([kExprGetLocal, 0, kExprF32LoadMem, 0, 0, kExprDrop])
+ .exportFunc();
+builder.addFunction('store', kSig_v_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0])
+ .exportFunc();
+builder.addFunction('store8', kSig_v_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem8, 0, 0])
+ .exportFunc();
+var module = builder.instantiate();
+
+module.exports.load(4);
+module.exports.load8(1);
+module.exports.store(4, 0x12345678);
+module.exports.load(2);
+module.exports.load8(6);
+module.exports.loadf(2);
+module.exports.store8(4, 0xab);
+module.exports.load(2);
+module.exports.loadf(2);
diff --git a/deps/v8/test/message/wasm-trace-memory.out b/deps/v8/test/message/wasm-trace-memory.out
new file mode 100644
index 0000000000..12cbd180a0
--- /dev/null
+++ b/deps/v8/test/message/wasm-trace-memory.out
@@ -0,0 +1,9 @@
+C 0+0x3 read @00000004 i32:0 / 00000000
+C 1+0x3 read @00000001 i8:0 / 00
+C 3+0x5 store @00000004 i32:305419896 / 12345678
+C 0+0x3 read @00000002 i32:1450704896 / 56780000
+C 1+0x3 read @00000006 i8:52 / 34
+C 2+0x3 read @00000002 f32:68169720922112.000000 / 56780000
+C 4+0x5 store @00000004 i8:171 / ab
+C 0+0x3 read @00000002 i32:1454047232 / 56ab0000
+C 2+0x3 read @00000002 f32:94008244174848.000000 / 56ab0000
diff --git a/deps/v8/test/mjsunit/array-constructor-feedback.js b/deps/v8/test/mjsunit/array-constructor-feedback.js
index c1f386d983..fb4ad5a19a 100644
--- a/deps/v8/test/mjsunit/array-constructor-feedback.js
+++ b/deps/v8/test/mjsunit/array-constructor-feedback.js
@@ -106,14 +106,9 @@ function assertKind(expected, obj, name_opt) {
a = bar(10);
assertKind(elements_kind.fast, a);
assertOptimized(bar);
- bar(100000);
+ bar(10000);
assertOptimized(bar);
- // If the argument isn't a smi, things should still work.
- a = bar("oops");
- assertOptimized(bar);
- assertKind(elements_kind.fast, a);
-
function barn(one, two, three) {
return new Array(one, two, three);
}
diff --git a/deps/v8/test/mjsunit/array-reduce.js b/deps/v8/test/mjsunit/array-reduce.js
index f34d3ef6ff..4a4494a72c 100644
--- a/deps/v8/test/mjsunit/array-reduce.js
+++ b/deps/v8/test/mjsunit/array-reduce.js
@@ -537,3 +537,23 @@ var arr = [];
Object.defineProperty(arr, "0", { get: function() { delete this[0] },
configurable: true});
assertEquals(undefined, arr.reduceRight(function(val) { return val }));
+
+
+(function ReduceRightMaxIndex() {
+ const kMaxIndex = 0xffffffff-1;
+ let array = [];
+ array[kMaxIndex-2] = 'value-2';
+ array[kMaxIndex-1] = 'value-1';
+ // Use the maximum array index possible.
+ array[kMaxIndex] = 'value';
+ // Add the next index which is a normal property and thus will not show up.
+ array[kMaxIndex+1] = 'normal property';
+ assertThrowsEquals( () => {
+ array.reduceRight((sum, value) => {
+ assertEquals('initial', sum);
+ assertEquals('value', value);
+ // Throw at this point as we would very slowly loop down from kMaxIndex.
+ throw 'do not continue';
+ }, 'initial')
+ }, 'do not continue');
+})();
diff --git a/deps/v8/test/mjsunit/asm/math-max.js b/deps/v8/test/mjsunit/asm/math-max.js
new file mode 100644
index 0000000000..d5dc0d1617
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/math-max.js
@@ -0,0 +1,78 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib) {
+ "use asm";
+
+ var max = stdlib.Math.max;
+ var fround = stdlib.Math.fround;
+
+ // f: double, double -> double
+ function f(a, b) {
+ a = +a; b = +b;
+ return +max(a, b);
+ }
+
+ // g: signed, signed -> signed
+ function g(a, b) {
+ a = a | 0; b = b | 0;
+ return max(a >> 0, b >> 0) | 0;
+ }
+
+ // h: float, float -> float
+ function h(a, b) {
+ a = fround(a); b = fround(b);
+ return fround(max(a, b));
+ }
+
+ return { f: f, g: g, h: h };
+}
+
+var m = Module({ Math: Math });
+var f = m.f;
+var g = m.g;
+var h = m.h;
+
+assertTrue(isNaN(f(0, NaN)));
+assertFalse(isFinite(f(0, Infinity)));
+assertTrue(isFinite(f(0, -Infinity)));
+
+assertTrue(Object.is(+0, f(-0, +0)));
+assertTrue(Object.is(+0, f(+0, -0)));
+
+assertEquals(0.1, f( 0, 0.1));
+assertEquals(0.5, f( 0.1, 0.5));
+assertEquals(0.5, f( 0.5, -0.1));
+assertEquals(-0.1, f(-0.1, -0.5));
+assertEquals(1, f(-0.5, 1));
+assertEquals(1.1, f( 1, 1.1));
+assertEquals(1.1, f( 1.1, -1));
+assertEquals(-1, f(-1, -1.1));
+assertEquals(0, f(-1.1, 0));
+
+assertEquals( 1, g( 0, 1));
+assertEquals( 5, g( 1, 5));
+assertEquals( 5, g( 5, -1));
+assertEquals(-1, g(-1, -5));
+assertEquals( 1, g(-5, 1));
+assertEquals( 1, g( 1, -1));
+assertEquals( 0, g(-1, 0));
+
+assertEquals(Math.fround(0.1), h( 0, 0.1));
+assertEquals(Math.fround(0.5), h( 0.1, 0.5));
+assertEquals(Math.fround(0.5), h( 0.5, -0.1));
+assertEquals(Math.fround(-0.1), h(-0.1, -0.5));
+assertEquals(Math.fround(1), h(-0.5, 1));
+assertEquals(Math.fround(1.1), h( 1, 1.1));
+assertEquals(Math.fround(1.1), h( 1.1, -1));
+assertEquals(Math.fround(-1), h(-1, -1.1));
+assertEquals(Math.fround(0), h(-1.1, 0));
+
+assertEquals(1, g(0, Number.MIN_SAFE_INTEGER));
+assertEquals(0, g(0, Number.MAX_SAFE_INTEGER));
+
+assertEquals(Number.MAX_VALUE, f(Number.MIN_VALUE, Number.MAX_VALUE));
+assertEquals(Number.MAX_VALUE, f(Number.MAX_VALUE, Number.MIN_VALUE));
+assertEquals(Number.POSITIVE_INFINITY, f(Number.POSITIVE_INFINITY, 0));
+assertEquals(0, f(Number.NEGATIVE_INFINITY, 0));
diff --git a/deps/v8/test/mjsunit/asm/math-min.js b/deps/v8/test/mjsunit/asm/math-min.js
new file mode 100644
index 0000000000..5923d267d0
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/math-min.js
@@ -0,0 +1,78 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib) {
+ "use asm";
+
+ var min = stdlib.Math.min;
+ var fround = stdlib.Math.fround;
+
+ // f: double, double -> double
+ function f(a, b) {
+ a = +a; b = +b;
+ return +min(a, b);
+ }
+
+ // g: signed, signed -> signed
+ function g(a, b) {
+ a = a | 0; b = b | 0;
+ return min(a >> 0, b >> 0) | 0;
+ }
+
+ // h: float, float -> float
+ function h(a, b) {
+ a = fround(a); b = fround(b);
+ return fround(min(a, b));
+ }
+
+ return { f: f, g: g, h: h };
+}
+
+var m = Module({ Math: Math });
+var f = m.f;
+var g = m.g;
+var h = m.h;
+
+assertTrue(isNaN(f(0, NaN)));
+assertTrue(isFinite(f(0, Infinity)));
+assertFalse(isFinite(f(0, -Infinity)));
+
+assertTrue(Object.is(-0, f(-0, +0)));
+assertTrue(Object.is(-0, f(+0, -0)));
+
+assertEquals(0, f( 0, 0.1));
+assertEquals(0.1, f( 0.1, 0.5));
+assertEquals(-0.1, f( 0.5, -0.1));
+assertEquals(-0.5, f(-0.1, -0.5));
+assertEquals(-0.5, f(-0.5, 1));
+assertEquals(1, f( 1, 1.1));
+assertEquals(-1, f( 1.1, -1));
+assertEquals(-1.1, f(-1, -1.1));
+assertEquals(-1.1, f(-1.1, 0));
+
+assertEquals( 0, g( 0, 1));
+assertEquals( 1, g( 1, 5));
+assertEquals(-1, g( 5, -1));
+assertEquals(-5, g(-1, -5));
+assertEquals(-5, g(-5, 1));
+assertEquals(-1, g( 1, -1));
+assertEquals(-1, g(-1, 0));
+
+assertEquals(Math.fround(0), h( 0, 0.1));
+assertEquals(Math.fround(0.1), h( 0.1, 0.5));
+assertEquals(Math.fround(-0.1), h( 0.5, -0.1));
+assertEquals(Math.fround(-0.5), h(-0.1, -0.5));
+assertEquals(Math.fround(-0.5), h(-0.5, 1));
+assertEquals(Math.fround(1), h( 1, 1.1));
+assertEquals(Math.fround(-1), h( 1.1, -1));
+assertEquals(Math.fround(-1.1), h(-1, -1.1));
+assertEquals(Math.fround(-1.1), h(-1.1, 0));
+
+assertEquals(0, g(0, Number.MIN_SAFE_INTEGER));
+assertEquals(-1, g(0, Number.MAX_SAFE_INTEGER));
+
+assertEquals(Number.MIN_VALUE, f(Number.MIN_VALUE, Number.MAX_VALUE));
+assertEquals(Number.MIN_VALUE, f(Number.MAX_VALUE, Number.MIN_VALUE));
+assertEquals(0, f(Number.POSITIVE_INFINITY, 0));
+assertEquals(Number.NEGATIVE_INFINITY, f(Number.NEGATIVE_INFINITY, 0));
diff --git a/deps/v8/test/mjsunit/asm/noexpose-wasm.js b/deps/v8/test/mjsunit/asm/noexpose-wasm.js
new file mode 100644
index 0000000000..7336ab2727
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/noexpose-wasm.js
@@ -0,0 +1,37 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --noexpose-wasm --validate-asm
+
+assertThrows(() => { let x = WebAssembly.Module; });
+
+function Module(stdlib, foreign, heap) {
+ "use asm";
+ function f1(i) {
+ i = i|0;
+ return (i | 0) / 3 | 0;
+ }
+ function f2(i) {
+ i = i|0;
+ return (i | 0) / 13 | 0;
+ }
+ function f3(i) {
+ i = i|0;
+ return (i | 0) / 1024 | 0;
+ }
+ function f4(i) {
+ i = i|0;
+ return (i | 0) / 3733331 | 0;
+ }
+ return { f1: f1, f2: f2, f3: f3, f4: f4 };
+}
+
+var m = Module(this, {}, new ArrayBuffer(1024));
+
+for (var i = -2147483648; i < 2147483648; i += 3999777) {
+ assertEquals(i / 3 | 0, m.f1(i));
+ assertEquals(i / 13 | 0, m.f2(i));
+ assertEquals(i / 1024 | 0, m.f3(i));
+ assertEquals(i / 3733331 | 0, m.f4(i));
+}
diff --git a/deps/v8/test/mjsunit/code-coverage-precise.js b/deps/v8/test/mjsunit/code-coverage-precise.js
index 3d5d5bac9e..c5569cf010 100644
--- a/deps/v8/test/mjsunit/code-coverage-precise.js
+++ b/deps/v8/test/mjsunit/code-coverage-precise.js
@@ -3,6 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --no-always-opt
+// Flags: --no-stress-incremental-marking
// Files: test/mjsunit/code-coverage-utils.js
// Test precise code coverage.
diff --git a/deps/v8/test/mjsunit/compiler/array-push-1.js b/deps/v8/test/mjsunit/compiler/array-push-1.js
new file mode 100644
index 0000000000..58afd6ffe6
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/array-push-1.js
@@ -0,0 +1,239 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Test multiple arguments push for PACKED_SMI_ELEMENTS.
+(function() {
+ function push0(a) {
+ return a.push();
+ }
+
+ assertEquals(0, push0([]));
+ assertEquals(1, push0([1]));
+ %OptimizeFunctionOnNextCall(push0);
+ assertEquals(2, push0([1, 2]));
+
+ function push1(a) {
+ return a.push(1);
+ }
+
+ assertEquals(1, push1([]));
+ assertEquals(2, push1([1]));
+ %OptimizeFunctionOnNextCall(push1);
+ assertEquals(3, push1([1, 2]));
+
+ function push2(a) {
+ return a.push(1, 2);
+ }
+
+ assertEquals(2, push2([]));
+ assertEquals(3, push2([1]));
+ %OptimizeFunctionOnNextCall(push2);
+ assertEquals(4, push2([1, 2]));
+
+ function push3(a) {
+ return a.push(1, 2, 3);
+ }
+
+ assertEquals(3, push3([]));
+ assertEquals(4, push3([1]));
+ %OptimizeFunctionOnNextCall(push3);
+ assertEquals(5, push3([1, 2]));
+})();
+
+// Test multiple arguments push for HOLEY_SMI_ELEMENTS.
+(function() {
+ function push0(a) {
+ return a.push();
+ }
+
+ assertEquals(1, push0(new Array(1)));
+ assertEquals(2, push0(new Array(2)));
+ %OptimizeFunctionOnNextCall(push0);
+ assertEquals(3, push0(new Array(3)));
+
+ function push1(a) {
+ return a.push(1);
+ }
+
+ assertEquals(2, push1(new Array(1)));
+ assertEquals(3, push1(new Array(2)));
+ %OptimizeFunctionOnNextCall(push1);
+ assertEquals(4, push1(new Array(3)));
+
+ function push2(a) {
+ return a.push(1, 2);
+ }
+
+ assertEquals(3, push2(new Array(1)));
+ assertEquals(4, push2(new Array(2)));
+ %OptimizeFunctionOnNextCall(push2);
+ assertEquals(5, push2(new Array(3)));
+
+ function push3(a) {
+ return a.push(1, 2, 3);
+ }
+
+ assertEquals(4, push3(new Array(1)));
+ assertEquals(5, push3(new Array(2)));
+ %OptimizeFunctionOnNextCall(push3);
+ assertEquals(6, push3(new Array(3)));
+})();
+
+// Test multiple arguments push for PACKED_DOUBLE_ELEMENTS.
+(function() {
+ function push0(a) {
+ return a.push();
+ }
+
+ assertEquals(1, push0([1.1]));
+ assertEquals(2, push0([1.1, 2.2]));
+ %OptimizeFunctionOnNextCall(push0);
+ assertEquals(3, push0([1.1, 2.2, 3.3]));
+
+ function push1(a) {
+ return a.push(1.1);
+ }
+
+ assertEquals(2, push1([1.1]));
+ assertEquals(3, push1([1.1, 2.2]));
+ %OptimizeFunctionOnNextCall(push1);
+ assertEquals(4, push1([1.1, 2.2, 3.3]));
+
+ function push2(a) {
+ return a.push(1.1, 2.2);
+ }
+
+ assertEquals(3, push2([1.1]));
+ assertEquals(4, push2([1.1, 2.2]));
+ %OptimizeFunctionOnNextCall(push2);
+ assertEquals(5, push2([1.1, 2.2, 3.3]));
+
+ function push3(a) {
+ return a.push(1.1, 2.2, 3.3);
+ }
+
+ assertEquals(4, push3([1.1]));
+ assertEquals(5, push3([1.1, 2.2]));
+ %OptimizeFunctionOnNextCall(push3);
+ assertEquals(6, push3([1.1, 2.2, 3.3]));
+})();
+
+// Test multiple arguments push for HOLEY_DOUBLE_ELEMENTS.
+(function() {
+ function push0(a) {
+ return a.push();
+ }
+
+ assertEquals(2, push0([, 1.1]));
+ assertEquals(3, push0([, 1.1, 2.2]));
+ %OptimizeFunctionOnNextCall(push0);
+ assertEquals(4, push0([, 1.1, 2.2, 3.3]));
+
+ function push1(a) {
+ return a.push(1.1);
+ }
+
+ assertEquals(3, push1([, 1.1]));
+ assertEquals(4, push1([, 1.1, 2.2]));
+ %OptimizeFunctionOnNextCall(push1);
+ assertEquals(5, push1([, 1.1, 2.2, 3.3]));
+
+ function push2(a) {
+ return a.push(1.1, 2.2);
+ }
+
+ assertEquals(4, push2([, 1.1]));
+ assertEquals(5, push2([, 1.1, 2.2]));
+ %OptimizeFunctionOnNextCall(push2);
+ assertEquals(6, push2([, 1.1, 2.2, 3.3]));
+
+ function push3(a) {
+ return a.push(1.1, 2.2, 3.3);
+ }
+
+ assertEquals(5, push3([, 1.1]));
+ assertEquals(6, push3([, 1.1, 2.2]));
+ %OptimizeFunctionOnNextCall(push3);
+ assertEquals(7, push3([, 1.1, 2.2, 3.3]));
+})();
+
+// Test multiple arguments push for PACKED_ELEMENTS.
+(function() {
+ function push0(a) {
+ return a.push();
+ }
+
+ assertEquals(1, push0(['1']));
+ assertEquals(2, push0(['1', '2']));
+ %OptimizeFunctionOnNextCall(push0);
+ assertEquals(3, push0(['1', '2', '3']));
+
+ function push1(a) {
+ return a.push('1');
+ }
+
+ assertEquals(2, push1(['1']));
+ assertEquals(3, push1(['1', '2']));
+ %OptimizeFunctionOnNextCall(push1);
+ assertEquals(4, push1(['1', '2', '3']));
+
+ function push2(a) {
+ return a.push('1', '2');
+ }
+
+ assertEquals(3, push2(['1']));
+ assertEquals(4, push2(['1', '2']));
+ %OptimizeFunctionOnNextCall(push2);
+ assertEquals(5, push2(['1', '2', '3']));
+
+ function push3(a) {
+ return a.push('1', '2', '3');
+ }
+
+ assertEquals(4, push3(['1']));
+ assertEquals(5, push3(['1', '2']));
+ %OptimizeFunctionOnNextCall(push3);
+ assertEquals(6, push3(['1', '2', '3']));
+})();
+
+// Test multiple arguments push for HOLEY_ELEMENTS.
+(function() {
+ function push0(a) {
+ return a.push();
+ }
+
+ assertEquals(2, push0([, '1']));
+ assertEquals(3, push0([, '1', '2']));
+ %OptimizeFunctionOnNextCall(push0);
+ assertEquals(4, push0([, '1', '2', '3']));
+
+ function push1(a) {
+ return a.push('1');
+ }
+
+ assertEquals(3, push1([, '1']));
+ assertEquals(4, push1([, '1', '2']));
+ %OptimizeFunctionOnNextCall(push1);
+ assertEquals(5, push1([, '1', '2', '3']));
+
+ function push2(a) {
+ return a.push('1', '2');
+ }
+
+ assertEquals(4, push2([, '1']));
+ assertEquals(5, push2([, '1', '2']));
+ %OptimizeFunctionOnNextCall(push2);
+ assertEquals(6, push2([, '1', '2', '3']));
+
+ function push3(a) {
+ return a.push('1', '2', '3');
+ }
+
+ assertEquals(5, push3([, '1']));
+ assertEquals(6, push3([, '1', '2']));
+ %OptimizeFunctionOnNextCall(push3);
+ assertEquals(7, push3([, '1', '2', '3']));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/array-push-2.js b/deps/v8/test/mjsunit/compiler/array-push-2.js
new file mode 100644
index 0000000000..cb18d71d63
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/array-push-2.js
@@ -0,0 +1,65 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Test elements transition from SMI to DOUBLE.
+(function() {
+ const a = [];
+ const foo = (x, y) => a.push(x, y);
+ foo(1, 2);
+ foo(3, 4);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(5, 6.6);
+ assertEquals([1, 2, 3, 4, 5, 6.6], a);
+})();
+(function() {
+ const a = [];
+ const foo = (x, y) => a.push(x, y);
+ foo(1, 2);
+ foo(3, 4);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(5.5, 6.6);
+ assertEquals([1, 2, 3, 4, 5.5, 6.6], a);
+})();
+
+// Test elements transition from SMI to OBJECT.
+(function() {
+ const a = [];
+ const foo = (x, y) => a.push(x, y);
+ foo(1, 2);
+ foo(3, 4);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(5, '6');
+ assertEquals([1, 2, 3, 4, 5, '6'], a);
+})();
+(function() {
+ const a = [];
+ const foo = (x, y) => a.push(x, y);
+ foo(1, 2);
+ foo(3, 4);
+ %OptimizeFunctionOnNextCall(foo);
+ foo('5', '6');
+ assertEquals([1, 2, 3, 4, '5', '6'], a);
+})();
+
+// Test elements transition from DOUBLE to OBJECT.
+(function() {
+ const a = [0.5];
+ const foo = (x, y) => a.push(x, y);
+ foo(1, 2);
+ foo(3, 4);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(5, '6');
+ assertEquals([0.5, 1, 2, 3, 4, 5, '6'], a);
+})();
+(function() {
+ const a = [0.5];
+ const foo = (x, y) => a.push(x, y);
+ foo(1, 2);
+ foo(3, 4);
+ %OptimizeFunctionOnNextCall(foo);
+ foo('5', '6');
+ assertEquals([0.5, 1, 2, 3, 4, '5', '6'], a);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/array-push-3.js b/deps/v8/test/mjsunit/compiler/array-push-3.js
new file mode 100644
index 0000000000..f648eb9ed9
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/array-push-3.js
@@ -0,0 +1,51 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Test side effects on arguments evaluation.
+(function() {
+ const a = [];
+ const bar = x => { a.push(x); return x; };
+ const foo = x => a.push(bar(x), bar(x));
+ foo(1);
+ foo(2);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(3);
+ assertEquals([1,1,1,1, 2,2,2,2, 3,3,3,3], a);
+})();
+
+// Test invalidation on arguments evaluation.
+(function() {
+ let y = 1;
+ const a = [];
+ const bar = x => { a.push(y); return x; }
+ const foo = x => a.push(bar(x), bar(x));
+ foo(1);
+ y = 2;
+ foo(2);
+ %OptimizeFunctionOnNextCall(foo);
+ y = 3;
+ foo(3);
+ assertOptimized(foo);
+ y = 4.4;
+ foo(4);
+ assertEquals([1,1,1,1, 2,2,2,2, 3,3,3,3, 4.4,4.4,4,4], a);
+})();
+(function() {
+ let y = 1;
+ const a = [0.5];
+ const bar = x => { a.push(y); return x; }
+ const foo = x => a.push(bar(x), bar(x));
+ foo(1);
+ y = 2;
+ foo(2);
+ %OptimizeFunctionOnNextCall(foo);
+ y = 3;
+ foo(3);
+ assertOptimized(foo);
+ y = '4';
+ foo(4);
+ assertEquals([0.5, 1,1,1,1, 2,2,2,2, 3,3,3,3, '4','4',4,4], a);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/array-subclass.js b/deps/v8/test/mjsunit/compiler/array-subclass.js
new file mode 100644
index 0000000000..d20b669661
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/array-subclass.js
@@ -0,0 +1,396 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test Array subclass default constructor with no parameters.
+(function() {
+ const A = class A extends Array { };
+
+ function foo() { return new A; }
+
+ assertInstanceof(foo(), A);
+ assertEquals(0, foo().length);
+ assertInstanceof(foo(), A);
+ assertEquals(0, foo().length);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(0, foo().length);
+})();
+
+// Test Array subclass default constructor with small constant length.
+(function() {
+ const A = class A extends Array { };
+ const L = 4;
+
+ function foo() { return new A(L); }
+
+ assertInstanceof(foo(), A);
+ assertEquals(L, foo().length);
+ assertInstanceof(foo(), A);
+ assertEquals(L, foo().length);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(L, foo().length);
+})();
+
+// Test Array subclass default constructor with large constant length.
+(function() {
+ const A = class A extends Array { };
+ const L = 1024 * 1024;
+
+ function foo() { return new A(L); }
+
+ assertInstanceof(foo(), A);
+ assertEquals(L, foo().length);
+ assertInstanceof(foo(), A);
+ assertEquals(L, foo().length);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(L, foo().length);
+})();
+
+// Test Array subclass default constructor with known boolean.
+(function() {
+ const A = class A extends Array { };
+
+ function foo() { return new A(true); }
+
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertEquals(true, foo()[0]);
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertEquals(true, foo()[0]);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertEquals(true, foo()[0]);
+})();
+
+// Test Array subclass default constructor with known string.
+(function() {
+ const A = class A extends Array { };
+
+ function foo() { return new A(""); }
+
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertEquals("", foo()[0]);
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertEquals("", foo()[0]);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertEquals("", foo()[0]);
+})();
+
+// Test Array subclass default constructor with known object.
+(function() {
+ const A = class A extends Array { };
+ const O = {foo: "foo"};
+
+ function foo() { return new A(O); }
+
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertSame(O, foo()[0]);
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertSame(O, foo()[0]);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertSame(O, foo()[0]);
+})();
+
+// Test Array subclass default constructor with known small integers.
+(function() {
+ const A = class A extends Array { };
+
+ function foo() { return new A(1, 2, 3); }
+
+ assertInstanceof(foo(), A);
+ assertEquals(3, foo().length);
+ assertEquals(1, foo()[0]);
+ assertEquals(2, foo()[1]);
+ assertEquals(3, foo()[2]);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(3, foo().length);
+ assertEquals(1, foo()[0]);
+ assertEquals(2, foo()[1]);
+ assertEquals(3, foo()[2]);
+})();
+
+// Test Array subclass default constructor with known numbers.
+(function() {
+ const A = class A extends Array { };
+
+ function foo() { return new A(1.1, 2.2, 3.3); }
+
+ assertInstanceof(foo(), A);
+ assertEquals(3, foo().length);
+ assertEquals(1.1, foo()[0]);
+ assertEquals(2.2, foo()[1]);
+ assertEquals(3.3, foo()[2]);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(3, foo().length);
+ assertEquals(1.1, foo()[0]);
+ assertEquals(2.2, foo()[1]);
+ assertEquals(3.3, foo()[2]);
+})();
+
+// Test Array subclass default constructor with known strings.
+(function() {
+ const A = class A extends Array { };
+
+ function foo() { return new A("a", "b", "c", "d"); }
+
+ assertInstanceof(foo(), A);
+ assertEquals(4, foo().length);
+ assertEquals("a", foo()[0]);
+ assertEquals("b", foo()[1]);
+ assertEquals("c", foo()[2]);
+ assertEquals("d", foo()[3]);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(4, foo().length);
+ assertEquals("a", foo()[0]);
+ assertEquals("b", foo()[1]);
+ assertEquals("c", foo()[2]);
+ assertEquals("d", foo()[3]);
+})();
+
+// Test Array subclass constructor with no parameters.
+(function() {
+ const A = class A extends Array {
+ constructor() {
+ super();
+ this.bar = 1;
+ }
+ };
+
+ function foo() { return new A; }
+
+ assertInstanceof(foo(), A);
+ assertEquals(0, foo().length);
+ assertEquals(1, foo().bar);
+ assertInstanceof(foo(), A);
+ assertEquals(0, foo().length);
+ assertEquals(1, foo().bar);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(0, foo().length);
+ assertEquals(1, foo().bar);
+})();
+
+// Test Array subclass constructor with small constant length.
+(function() {
+ const A = class A extends Array {
+ constructor(n) {
+ super(n);
+ this.bar = 1;
+ }
+ };
+ const L = 4;
+
+ function foo() { return new A(L); }
+
+ assertInstanceof(foo(), A);
+ assertEquals(L, foo().length);
+ assertEquals(1, foo().bar);
+ assertInstanceof(foo(), A);
+ assertEquals(L, foo().length);
+ assertEquals(1, foo().bar);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(L, foo().length);
+ assertEquals(1, foo().bar);
+})();
+
+// Test Array subclass constructor with large constant length.
+(function() {
+ const A = class A extends Array {
+ constructor(n) {
+ super(n);
+ this.bar = 1;
+ }
+ };
+ const L = 1024 * 1024;
+
+ function foo() { return new A(L); }
+
+ assertInstanceof(foo(), A);
+ assertEquals(L, foo().length);
+ assertEquals(1, foo().bar);
+ assertInstanceof(foo(), A);
+ assertEquals(L, foo().length);
+ assertEquals(1, foo().bar);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(L, foo().length);
+ assertEquals(1, foo().bar);
+})();
+
+// Test Array subclass constructor with known boolean.
+(function() {
+ const A = class A extends Array {
+ constructor(n) {
+ super(n);
+ this.bar = 1;
+ }
+ };
+
+ function foo() { return new A(true); }
+
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertEquals(true, foo()[0]);
+ assertEquals(1, foo().bar);
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertEquals(true, foo()[0]);
+ assertEquals(1, foo().bar);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertEquals(true, foo()[0]);
+ assertEquals(1, foo().bar);
+})();
+
+// Test Array subclass constructor with known string.
+(function() {
+ const A = class A extends Array {
+ constructor(n) {
+ super(n);
+ this.bar = 1;
+ }
+ };
+
+ function foo() { return new A(""); }
+
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertEquals("", foo()[0]);
+ assertEquals(1, foo().bar);
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertEquals("", foo()[0]);
+ assertEquals(1, foo().bar);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertEquals("", foo()[0]);
+ assertEquals(1, foo().bar);
+})();
+
+// Test Array subclass constructor with known object.
+(function() {
+ const A = class A extends Array {
+ constructor(n) {
+ super(n);
+ this.bar = 1;
+ }
+ };
+ const O = {foo: "foo"};
+
+ function foo() { return new A(O); }
+
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertSame(O, foo()[0]);
+ assertEquals(1, foo().bar);
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertSame(O, foo()[0]);
+ assertEquals(1, foo().bar);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(1, foo().length);
+ assertSame(O, foo()[0]);
+ assertEquals(1, foo().bar);
+})();
+
+// Test Array subclass constructor with known small integers.
+(function() {
+ const A = class A extends Array {
+ constructor(x, y, z) {
+ super(x, y, z);
+ this.bar = 1;
+ }
+ };
+
+ function foo() { return new A(1, 2, 3); }
+
+ assertInstanceof(foo(), A);
+ assertEquals(3, foo().length);
+ assertEquals(1, foo()[0]);
+ assertEquals(2, foo()[1]);
+ assertEquals(3, foo()[2]);
+ assertEquals(1, foo().bar);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(3, foo().length);
+ assertEquals(1, foo()[0]);
+ assertEquals(2, foo()[1]);
+ assertEquals(3, foo()[2]);
+ assertEquals(1, foo().bar);
+})();
+
+// Test Array subclass constructor with known numbers.
+(function() {
+ const A = class A extends Array {
+ constructor(x, y, z) {
+ super(x, y, z);
+ this.bar = 1;
+ }
+ };
+
+ function foo() { return new A(1.1, 2.2, 3.3); }
+
+ assertInstanceof(foo(), A);
+ assertEquals(3, foo().length);
+ assertEquals(1.1, foo()[0]);
+ assertEquals(2.2, foo()[1]);
+ assertEquals(3.3, foo()[2]);
+ assertEquals(1, foo().bar);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(3, foo().length);
+ assertEquals(1.1, foo()[0]);
+ assertEquals(2.2, foo()[1]);
+ assertEquals(3.3, foo()[2]);
+ assertEquals(1, foo().bar);
+})();
+
+// Test Array subclass constructor with known strings.
+(function() {
+ const A = class A extends Array {
+ constructor(a, b, c, d) {
+ super(a, b, c, d);
+ this.bar = 1;
+ }
+ };
+
+ function foo() { return new A("a", "b", "c", "d"); }
+
+ assertInstanceof(foo(), A);
+ assertEquals(4, foo().length);
+ assertEquals("a", foo()[0]);
+ assertEquals("b", foo()[1]);
+ assertEquals("c", foo()[2]);
+ assertEquals("d", foo()[3]);
+ assertEquals(1, foo().bar);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertEquals(4, foo().length);
+ assertEquals("a", foo()[0]);
+ assertEquals("b", foo()[1]);
+ assertEquals("c", foo()[2]);
+ assertEquals("d", foo()[3]);
+ assertEquals(1, foo().bar);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js b/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js
new file mode 100644
index 0000000000..1ab022611c
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js
@@ -0,0 +1,35 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-always-opt --opt
+
+// Check that we properly deoptimize TurboFan'ed code when we constant-fold
+// elements from a COW array and we change the length of the array.
+(function() {
+ const a = [1, 2, 3];
+ const foo = () => a[0];
+ assertEquals(1, foo());
+ assertEquals(1, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo());
+ assertOptimized(foo);
+ a.length = 1;
+ assertEquals(1, foo());
+ assertUnoptimized(foo);
+})();
+
+// Check that we properly deoptimize TurboFan'ed code when we constant-fold
+// elements from a COW array and we change the element of the array.
+(function() {
+ const a = [1, 2, 3];
+ const foo = () => a[0];
+ assertEquals(1, foo());
+ assertEquals(1, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo());
+ assertOptimized(foo);
+ a[0] = 42;
+ assertEquals(42, foo());
+ assertUnoptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/constant-fold-tostring.js b/deps/v8/test/mjsunit/compiler/constant-fold-tostring.js
new file mode 100644
index 0000000000..e9c6d916ce
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/constant-fold-tostring.js
@@ -0,0 +1,26 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-always-opt --opt
+
+// Check that constant-folding of ToString operations works properly for NaN.
+(function() {
+ const foo = () => `${NaN}`;
+ assertEquals("NaN", foo());
+ assertEquals("NaN", foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals("NaN", foo());
+})();
+
+// Check that constant-folding of ToString operations works properly for 0/-0.
+(function() {
+ const foo = x => `${x ? 0 : -0}`;
+ assertEquals("0", foo(true));
+ assertEquals("0", foo(false));
+ assertEquals("0", foo(true));
+ assertEquals("0", foo(false));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals("0", foo(true));
+ assertEquals("0", foo(false));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/constructor-inlining.js b/deps/v8/test/mjsunit/compiler/constructor-inlining.js
index d43545ada0..4ad426ff61 100644
--- a/deps/v8/test/mjsunit/compiler/constructor-inlining.js
+++ b/deps/v8/test/mjsunit/compiler/constructor-inlining.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-restrict-constructor-return
+// Flags: --harmony-restrict-constructor-return --allow-natives-syntax --stress-inline
if (this.FLAG_harmony_restrict_constructor_return === undefined)
this.FLAG_harmony_restrict_constructor_return = true;
@@ -54,9 +54,6 @@ function Constr(use, x){
}
}
-%SetForceInlineFlag(Base);
-%SetForceInlineFlag(Derived);
-%SetForceInlineFlag(Constr);
var a = {};
var b = {};
diff --git a/deps/v8/test/mjsunit/compiler/dead-string-char-code-at.js b/deps/v8/test/mjsunit/compiler/dead-string-char-code-at.js
deleted file mode 100644
index 9f01541c90..0000000000
--- a/deps/v8/test/mjsunit/compiler/dead-string-char-code-at.js
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-var S1 = "string1";
-var S2 = "@@string2";
-
-function dead1(a, b) {
- var x = %_StringCharCodeAt(a, 4);
- return a; // x is dead code
-}
-
-function dead2(a, b) {
- var x = %_StringCharCodeAt(a, 3);
- var y = %_StringCharCodeAt(b, 1);
- return a; // x and y are both dead
-}
-
-function dead3(a, b) {
- a = a ? "11" : "12";
- b = b ? "13" : "14";
- var x = %_StringCharCodeAt(a, 2);
- var y = %_StringCharCodeAt(b, 0);
- return a; // x and y are both dead
-}
-
-function test() {
- var S3 = S1 + S2;
-
- assertEquals(S1, dead1(S1, S2));
- assertEquals(S1, dead2(S1, S2));
- assertEquals("11", dead3(S1, S2));
-
- assertEquals(S2, dead1(S2, 677));
- assertEquals(S2, dead2(S2, S3));
- assertEquals("11", dead3(S2, S3));
-
- assertEquals(S3, dead1(S3, 399));
- assertEquals(S3, dead2(S3, "false"));
- assertEquals("12", dead3(0, 32));
-
- assertEquals(S3, dead1(S3, 0));
- assertEquals(S3, dead2(S3, S1));
- assertEquals("11", dead3(S3, 0));
-
- assertEquals("true", dead1("true", 0));
- assertEquals("true", dead2("true", S3));
- assertEquals("11", dead3("true", 0));
-}
-
-test();
-test();
-%OptimizeFunctionOnNextCall(dead1);
-%OptimizeFunctionOnNextCall(dead2);
-%OptimizeFunctionOnNextCall(dead3);
-test();
diff --git a/deps/v8/test/mjsunit/compiler/dead-string-char-code-at2.js b/deps/v8/test/mjsunit/compiler/dead-string-char-code-at2.js
deleted file mode 100644
index 9f01541c90..0000000000
--- a/deps/v8/test/mjsunit/compiler/dead-string-char-code-at2.js
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-var S1 = "string1";
-var S2 = "@@string2";
-
-function dead1(a, b) {
- var x = %_StringCharCodeAt(a, 4);
- return a; // x is dead code
-}
-
-function dead2(a, b) {
- var x = %_StringCharCodeAt(a, 3);
- var y = %_StringCharCodeAt(b, 1);
- return a; // x and y are both dead
-}
-
-function dead3(a, b) {
- a = a ? "11" : "12";
- b = b ? "13" : "14";
- var x = %_StringCharCodeAt(a, 2);
- var y = %_StringCharCodeAt(b, 0);
- return a; // x and y are both dead
-}
-
-function test() {
- var S3 = S1 + S2;
-
- assertEquals(S1, dead1(S1, S2));
- assertEquals(S1, dead2(S1, S2));
- assertEquals("11", dead3(S1, S2));
-
- assertEquals(S2, dead1(S2, 677));
- assertEquals(S2, dead2(S2, S3));
- assertEquals("11", dead3(S2, S3));
-
- assertEquals(S3, dead1(S3, 399));
- assertEquals(S3, dead2(S3, "false"));
- assertEquals("12", dead3(0, 32));
-
- assertEquals(S3, dead1(S3, 0));
- assertEquals(S3, dead2(S3, S1));
- assertEquals("11", dead3(S3, 0));
-
- assertEquals("true", dead1("true", 0));
- assertEquals("true", dead2("true", S3));
- assertEquals("11", dead3("true", 0));
-}
-
-test();
-test();
-%OptimizeFunctionOnNextCall(dead1);
-%OptimizeFunctionOnNextCall(dead2);
-%OptimizeFunctionOnNextCall(dead3);
-test();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-closure.js b/deps/v8/test/mjsunit/compiler/deopt-closure.js
new file mode 100644
index 0000000000..2ce531faf0
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-closure.js
@@ -0,0 +1,47 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestMaterializeTargetOfInterpretedFrame() {
+ function f(x) {
+ function g() {
+ %_DeoptimizeNow();
+ return x + 1;
+ }
+ return g();
+ }
+ assertEquals(24, f(23));
+ assertEquals(43, f(42));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(66, f(65));
+})();
+
+(function TestMaterializeTargetOfArgumentsAdaptorFrame() {
+ function f(x) {
+ function g(a, b, c) {
+ %_DeoptimizeNow();
+ return x + 1;
+ }
+ return g();
+ }
+ assertEquals(24, f(23));
+ assertEquals(43, f(42));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(66, f(65));
+})();
+
+(function TestMaterializeTargetOfConstructStubFrame() {
+ function f(x) {
+ function g() {
+ %_DeoptimizeNow();
+ this.val = x + 1;
+ }
+ return new g();
+ }
+ assertEquals({ val: 24 }, f(23));
+ assertEquals({ val: 43 }, f(42));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals({ val: 66 }, f(65));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-simple-try-catch.js b/deps/v8/test/mjsunit/compiler/deopt-simple-try-catch.js
new file mode 100644
index 0000000000..b0e382e38c
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-simple-try-catch.js
@@ -0,0 +1,28 @@
+
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file
+
+// Flags: --allow-natives-syntax
+
+// Small program to test deoptimization with exception handling.
+
+function g() {
+ %DeoptimizeFunction(f);
+ throw 42;
+}
+%NeverOptimizeFunction(g);
+
+function f() {
+ var a = 1;
+ try {
+ g();
+ } catch (e) {
+ return e + a;
+ }
+}
+
+assertEquals(f(), 43);
+assertEquals(f(), 43);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(f(), 43);
diff --git a/deps/v8/test/mjsunit/compiler/for-in-1.js b/deps/v8/test/mjsunit/compiler/for-in-1.js
new file mode 100644
index 0000000000..80add89609
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/for-in-1.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Ensure that we properly check for elements on the receiver.
+function foo(o) {
+ var s = "";
+ for (var i in o) s += i;
+ return s;
+}
+
+var a = [];
+assertEquals("", foo(a));
+assertEquals("", foo(a));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals("", foo(a));
+a[0] = 1;
+assertEquals("0", foo(a));
diff --git a/deps/v8/test/mjsunit/compiler/for-in-2.js b/deps/v8/test/mjsunit/compiler/for-in-2.js
new file mode 100644
index 0000000000..a586aff94e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/for-in-2.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Ensure that we properly check for elements on the prototypes.
+function foo(o) {
+ var s = "";
+ for (var i in o) s += i;
+ return s;
+}
+
+var a = [];
+assertEquals("", foo(a));
+assertEquals("", foo(a));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals("", foo(a));
+Array.prototype[0] = 1;
+assertEquals("0", foo(a));
diff --git a/deps/v8/test/mjsunit/compiler/for-in-3.js b/deps/v8/test/mjsunit/compiler/for-in-3.js
new file mode 100644
index 0000000000..80f3fa50c9
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/for-in-3.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Ensure that we properly check for elements on the prototypes.
+function foo(o) {
+ var s = "";
+ for (var i in o) s += i;
+ return s;
+}
+
+var o = {};
+assertEquals("", foo(o));
+assertEquals("", foo(o));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals("", foo(o));
+Object.prototype[0] = 1;
+assertEquals("0", foo(o));
diff --git a/deps/v8/test/mjsunit/compiler/for-in-4.js b/deps/v8/test/mjsunit/compiler/for-in-4.js
new file mode 100644
index 0000000000..d15c3484dd
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/for-in-4.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Ensure that we properly check for properties on the prototypes.
+function foo(o) {
+ var s = "";
+ for (var i in o) s += i;
+ return s;
+}
+
+var a = [];
+assertEquals("", foo(a));
+assertEquals("", foo(a));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals("", foo(a));
+Array.prototype.x = 4;
+assertEquals("x", foo(a));
diff --git a/deps/v8/test/mjsunit/compiler/for-in-5.js b/deps/v8/test/mjsunit/compiler/for-in-5.js
new file mode 100644
index 0000000000..8f469ab1b3
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/for-in-5.js
@@ -0,0 +1,19 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Ensure that we properly check for properties on the prototypes.
+function foo(o) {
+ var s = "";
+ for (var i in o) s += i;
+ return s;
+}
+var o = {a:1, b:2, c:3};
+assertEquals("abc", foo(o));
+assertEquals("abc", foo(o));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals("abc", foo(o));
+Object.prototype.d = 4;
+assertEquals("abcd", foo(o));
diff --git a/deps/v8/test/mjsunit/compiler/function-caller.js b/deps/v8/test/mjsunit/compiler/function-caller.js
new file mode 100644
index 0000000000..1192e680cb
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/function-caller.js
@@ -0,0 +1,25 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestInlineAllocatedCaller() {
+ function g() {
+ var caller = g.caller;
+ caller.foo = 23;
+ assertEquals(23, caller.foo);
+ assertEquals(23, g.caller.foo);
+ assertSame(caller, g.caller);
+ }
+ %NeverOptimizeFunction(g);
+
+ function f() {
+ (function caller() { g() })();
+ }
+
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+})();
diff --git a/deps/v8/test/mjsunit/compiler/object-constructor.js b/deps/v8/test/mjsunit/compiler/object-constructor.js
new file mode 100644
index 0000000000..162416fd57
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/object-constructor.js
@@ -0,0 +1,51 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Common pattern in Webpack 3 generated bundles, see
+// https://github.com/webpack/webpack/issues/5600 for details.
+(function ObjectConstructorWithKnownFunction() {
+ "use strict";
+ class A {
+ bar() { return this; }
+ };
+ function foo(a) {
+ return Object(a.bar)();
+ }
+ assertEquals(undefined, foo(new A));
+ assertEquals(undefined, foo(new A));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(undefined, foo(new A));
+})();
+
+(function ObjectConstructorWithString() {
+ "use strict";
+ function foo() {
+ return Object("a");
+ }
+ assertEquals('object', typeof foo());
+ assertEquals('object', typeof foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals('object', typeof foo());
+})();
+
+// Object constructor subclassing via Class Factories, see
+// https://twitter.com/FremyCompany/status/905977048006402048
+// for the hint.
+(function ObjectConstructorSubClassing() {
+ "use strict";
+ const Factory = Base => class A extends Base {};
+ const A = Factory(Object);
+ function foo() {
+ return new A(1, 2, 3);
+ }
+ assertInstanceof(foo(), A);
+ assertInstanceof(foo(), Object);
+ assertInstanceof(foo(), A);
+ assertInstanceof(foo(), Object);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), A);
+ assertInstanceof(foo(), Object);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/object-is.js b/deps/v8/test/mjsunit/compiler/object-is.js
new file mode 100644
index 0000000000..9537d78e3b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/object-is.js
@@ -0,0 +1,143 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ function foo(o) { return Object.is(o, -0); }
+ assertTrue(foo(-0));
+ assertFalse(foo(0));
+ assertFalse(foo(NaN));
+ assertFalse(foo(''));
+ assertFalse(foo([]));
+ assertFalse(foo({}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(-0));
+ assertFalse(foo(0));
+ assertFalse(foo(NaN));
+ assertFalse(foo(''));
+ assertFalse(foo([]));
+ assertFalse(foo({}));
+})();
+
+(function() {
+ function foo(o) { return Object.is(-0, o); }
+ assertTrue(foo(-0));
+ assertFalse(foo(0));
+ assertFalse(foo(NaN));
+ assertFalse(foo(''));
+ assertFalse(foo([]));
+ assertFalse(foo({}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(-0));
+ assertFalse(foo(0));
+ assertFalse(foo(NaN));
+ assertFalse(foo(''));
+ assertFalse(foo([]));
+ assertFalse(foo({}));
+})();
+
+(function() {
+ function foo(o) { return Object.is(+o, -0); }
+ assertTrue(foo(-0));
+ assertFalse(foo(0));
+ assertFalse(foo(NaN));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(-0));
+ assertFalse(foo(0));
+ assertFalse(foo(NaN));
+})();
+
+(function() {
+ function foo(o) { return Object.is(-0, +o); }
+ assertTrue(foo(-0));
+ assertFalse(foo(0));
+ assertFalse(foo(NaN));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(-0));
+ assertFalse(foo(0));
+ assertFalse(foo(NaN));
+})();
+
+(function() {
+ function foo(o) { return Object.is(o, NaN); }
+ assertFalse(foo(-0));
+ assertFalse(foo(0));
+ assertTrue(foo(NaN));
+ assertFalse(foo(''));
+ assertFalse(foo([]));
+ assertFalse(foo({}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(-0));
+ assertFalse(foo(0));
+ assertTrue(foo(NaN));
+ assertFalse(foo(''));
+ assertFalse(foo([]));
+ assertFalse(foo({}));
+})();
+
+(function() {
+ function foo(o) { return Object.is(NaN, o); }
+ assertFalse(foo(-0));
+ assertFalse(foo(0));
+ assertTrue(foo(NaN));
+ assertFalse(foo(''));
+ assertFalse(foo([]));
+ assertFalse(foo({}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(-0));
+ assertFalse(foo(0));
+ assertTrue(foo(NaN));
+ assertFalse(foo(''));
+ assertFalse(foo([]));
+ assertFalse(foo({}));
+})();
+
+(function() {
+ function foo(o) { return Object.is(+o, NaN); }
+ assertFalse(foo(-0));
+ assertFalse(foo(0));
+ assertTrue(foo(NaN));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(-0));
+ assertFalse(foo(0));
+ assertTrue(foo(NaN));
+})();
+
+(function() {
+ function foo(o) { return Object.is(NaN, +o); }
+ assertFalse(foo(-0));
+ assertFalse(foo(0));
+ assertTrue(foo(NaN));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(-0));
+ assertFalse(foo(0));
+ assertTrue(foo(NaN));
+})();
+
+(function() {
+ function foo(o) { return Object.is(`${o}`, "foo"); }
+ assertFalse(foo("bar"));
+ assertTrue(foo("foo"));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo("bar"));
+ assertTrue(foo("foo"));
+})();
+
+(function() {
+ function foo(o) { return Object.is(o, o); }
+ assertTrue(foo(-0));
+ assertTrue(foo(0));
+ assertTrue(foo(NaN));
+ assertTrue(foo(''));
+ assertTrue(foo([]));
+ assertTrue(foo({}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(-0));
+ assertTrue(foo(0));
+ assertTrue(foo(NaN));
+ assertTrue(foo(''));
+ assertTrue(foo([]));
+ assertTrue(foo({}));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/polymorphic-symbols.js b/deps/v8/test/mjsunit/compiler/polymorphic-symbols.js
new file mode 100644
index 0000000000..e954d50fa8
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/polymorphic-symbols.js
@@ -0,0 +1,48 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ const symbol = Symbol('symbol');
+ const OBJS = [
+ {[symbol]: 0, a: 1},
+ {[symbol]: 1, b: 2},
+ {[symbol]: 2, c: 3},
+ {[symbol]: 3, d: 4}
+ ];
+ function foo(o) { return o[symbol]; }
+ for (let i = 0; i < OBJS.length; ++i) {
+ assertEquals(i, foo(OBJS[i]));
+ assertEquals(i, foo(OBJS[i]));
+ }
+ %OptimizeFunctionOnNextCall(foo);
+ for (let i = 0; i < OBJS.length; ++i) {
+ assertEquals(i, foo(OBJS[i]));
+ assertEquals(i, foo(OBJS[i]));
+ }
+})();
+
+(function() {
+ const symbol = Symbol('symbol');
+ const OBJS = [
+ {[symbol]: 0, a: 1},
+ {[symbol]: 1, b: 2},
+ {[symbol]: 2, c: 3},
+ {[symbol]: 3, d: 4}
+ ];
+ function foo(o) { o[symbol] = o; }
+ for (let i = 0; i < OBJS.length; ++i) {
+ foo(OBJS[i]);
+ foo(OBJS[i]);
+ }
+ %OptimizeFunctionOnNextCall(foo);
+ for (let i = 0; i < OBJS.length; ++i) {
+ foo(OBJS[i]);
+ foo(OBJS[i]);
+ }
+ for (const o of OBJS) {
+ assertEquals(o, o[symbol]);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-700883.js b/deps/v8/test/mjsunit/compiler/regress-700883.js
index 0b148b5e08..41440f3a3f 100644
--- a/deps/v8/test/mjsunit/compiler/regress-700883.js
+++ b/deps/v8/test/mjsunit/compiler/regress-700883.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --stress-inline
function add(x) {
return x + x;
@@ -10,7 +10,6 @@ function add(x) {
add(0);
add(1);
-%SetForceInlineFlag(add);
var min = Math.min;
function foo(x) {
@@ -19,5 +18,6 @@ function foo(x) {
return min(y, x);
}
+foo();
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-758096.js b/deps/v8/test/mjsunit/compiler/regress-758096.js
new file mode 100644
index 0000000000..1ed32c0263
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-758096.js
@@ -0,0 +1,54 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ var x = 1;
+ x.__proto__.f = function() { return 1; }
+
+ function g() {}
+ g.prototype.f = function() { return 3; };
+ var y = new g();
+
+ function f(obj) {
+ return obj.f();
+ }
+
+ f(x);
+ f(y);
+ f(x);
+ f(y);
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(1, f(x));
+ assertEquals(3, f(y));
+})();
+
+(function() {
+ function f() { return 1; }
+ function g() { return 2; }
+
+ var global;
+
+ function h(s) {
+ var fg;
+ var a = 0;
+ if (s) {
+ global = 0;
+ a = 1;
+ fg = f;
+ } else {
+ global = 1
+ fg = g;
+ }
+ return fg() + a;
+ }
+
+ h(0);
+ h(0);
+ h(1);
+ h(1);
+ %OptimizeFunctionOnNextCall(h);
+ assertEquals(2, h(0));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-761892.js b/deps/v8/test/mjsunit/compiler/regress-761892.js
new file mode 100644
index 0000000000..5423c59c04
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-761892.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(x) {
+ var x0 = (0 != Math.min(1, 1)) && 1;
+ 1.1!=(x||x0)
+}
+
+f(1.1);
+f(1.1);
+%OptimizeFunctionOnNextCall(f);
+f(1);
diff --git a/deps/v8/test/mjsunit/compiler/regress-762057.js b/deps/v8/test/mjsunit/compiler/regress-762057.js
new file mode 100644
index 0000000000..4b5cab6ef5
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-762057.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function* foo() {
+ yield;
+ new Set();
+ for (let x of []) {
+ for (let y of []) {
+ yield;
+ }
+ }
+}
+
+let gaga = foo();
+gaga.next();
+%OptimizeFunctionOnNextCall(foo);
+gaga.next();
diff --git a/deps/v8/test/mjsunit/compiler/regress-780658.js b/deps/v8/test/mjsunit/compiler/regress-780658.js
new file mode 100644
index 0000000000..57fdbbabed
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-780658.js
@@ -0,0 +1,29 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-escape
+
+function get1(l, b) {
+ return l[1];
+}
+
+function with_double(x) {
+ var o = {a: [x,x,x]};
+ o.a.some_property = 1;
+ return get1(o.a);
+}
+
+function with_tagged(x) {
+ var l = [{}, x,x];
+ return get1(l);
+}
+
+with_double(.5);
+with_tagged({});
+with_double(.6);
+with_tagged(null);
+with_double(.5);
+with_tagged({});
+%OptimizeFunctionOnNextCall(with_double);
+with_double(.7);
diff --git a/deps/v8/test/mjsunit/compiler/stress-deopt-count-1.js b/deps/v8/test/mjsunit/compiler/stress-deopt-count-1.js
new file mode 100644
index 0000000000..834a873e75
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/stress-deopt-count-1.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --deopt-every-n-times=0 --opt --no-always-opt
+
+// Check that --deopt-every-n-times 0 doesn't deopt
+
+function f(x) {
+ return x + 1;
+}
+
+f(0);
+%OptimizeFunctionOnNextCall(f);
+
+f(1);
+assertOptimized(f, undefined, undefined, false);
+
+f(1);
+assertOptimized(f, undefined, undefined, false);
diff --git a/deps/v8/test/mjsunit/compiler/stress-deopt-count-2.js b/deps/v8/test/mjsunit/compiler/stress-deopt-count-2.js
new file mode 100644
index 0000000000..641a9e8180
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/stress-deopt-count-2.js
@@ -0,0 +1,48 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt --deopt-every-n-times=6
+
+// Check that stress deopt count resets correctly
+
+// Function with two deopt points
+function f(x) {
+ return x + 1;
+}
+
+f(1);
+%OptimizeFunctionOnNextCall(f);
+
+// stress_deopt_count == 6
+
+f(1);
+assertOptimized(f, undefined, undefined, false);
+
+// stress_deopt_count == 4
+
+f(1);
+assertOptimized(f, undefined, undefined, false);
+
+// stress_deopt_count == 2
+
+f(1);
+// deopt & counter reset
+assertUnoptimized(f, undefined, undefined, false);
+
+// stress_deopt_count == 6
+
+%OptimizeFunctionOnNextCall(f);
+f(1);
+assertOptimized(f, undefined, undefined, false);
+
+// stress_deopt_count == 4
+
+f(1);
+assertOptimized(f, undefined, undefined, false);
+
+// stress_deopt_count == 2
+
+f(1);
+// deopt & counter reset
+assertUnoptimized(f, undefined, undefined, false);
diff --git a/deps/v8/test/mjsunit/compiler/typedarray-prototype-tostringtag.js b/deps/v8/test/mjsunit/compiler/typedarray-prototype-tostringtag.js
new file mode 100644
index 0000000000..de4b302017
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/typedarray-prototype-tostringtag.js
@@ -0,0 +1,84 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const Classes = [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Uint32Array,
+ Int32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array
+];
+const TypedArrayPrototype_toStringTag =
+ Object.getOwnPropertyDescriptor(
+ Object.getPrototypeOf(Uint8Array.prototype),
+ Symbol.toStringTag).get;
+
+(function() {
+ function foo(o) {
+ return TypedArrayPrototype_toStringTag.call(o);
+ }
+ assertEquals(undefined, foo(1));
+ assertEquals(undefined, foo({}));
+ assertEquals(undefined, foo([]));
+ Classes.forEach(C => assertEquals(C.name, foo(new C(1))));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(undefined, foo(1));
+ assertEquals(undefined, foo({}));
+ assertEquals(undefined, foo([]));
+ Classes.forEach(C => assertEquals(C.name, foo(new C(1))));
+})();
+
+(function() {
+ const ReflectApply = Reflect.apply;
+ const uncurryThis = func => (thisArg, ...args) =>
+ ReflectApply(func, thisArg, args);
+ const TypedArrayProto_toStringTag =
+ uncurryThis(TypedArrayPrototype_toStringTag);
+
+ function isTypedArray(value) {
+ return TypedArrayProto_toStringTag(value) !== undefined;
+ }
+
+ assertFalse(isTypedArray(1));
+ assertFalse(isTypedArray({}));
+ assertFalse(isTypedArray([]));
+ assertFalse(isTypedArray('Lorem ipsum'));
+ Classes.forEach(C => assertTrue(isTypedArray(new C(1))));
+ %OptimizeFunctionOnNextCall(isTypedArray);
+ assertFalse(isTypedArray(1));
+ assertFalse(isTypedArray({}));
+ assertFalse(isTypedArray([]));
+ assertFalse(isTypedArray('Lorem ipsum'));
+ Classes.forEach(C => assertTrue(isTypedArray(new C(1))));
+})();
+
+(function() {
+ const ReflectApply = Reflect.apply;
+ const uncurryThis = func => (thisArg, ...args) =>
+ ReflectApply(func, thisArg, args);
+ const TypedArrayProto_toStringTag =
+ uncurryThis(TypedArrayPrototype_toStringTag);
+
+ function isUint8Array(value) {
+ return TypedArrayProto_toStringTag(value) === 'Uint8Array';
+ }
+
+ assertFalse(isUint8Array(1));
+ assertFalse(isUint8Array({}));
+ assertFalse(isUint8Array([]));
+ assertFalse(isUint8Array('Lorem ipsum'));
+ Classes.forEach(C => assertEquals(C === Uint8Array, isUint8Array(new C(1))));
+ %OptimizeFunctionOnNextCall(isUint8Array);
+ assertFalse(isUint8Array(1));
+ assertFalse(isUint8Array({}));
+ assertFalse(isUint8Array([]));
+ assertFalse(isUint8Array('Lorem ipsum'));
+ Classes.forEach(C => assertEquals(C === Uint8Array, isUint8Array(new C(1))));
+})();
diff --git a/deps/v8/test/mjsunit/es6/new-target.js b/deps/v8/test/mjsunit/es6/new-target.js
index 14446fc40f..c77c153cc0 100644
--- a/deps/v8/test/mjsunit/es6/new-target.js
+++ b/deps/v8/test/mjsunit/es6/new-target.js
@@ -472,3 +472,12 @@ function get_new_target() { return new.target; }
tagNewTargetProp.Prop = C;
assertEquals(new tagNewTargetProp, ["tagNewTargetProp"]);
})();
+
+(function testDeleteSloppy() {
+ assertTrue(delete new.target);
+})();
+
+(function testDeleteStrict() {
+ "use strict";
+ assertTrue(delete new.target);
+})();
diff --git a/deps/v8/test/mjsunit/es6/proxies-get.js b/deps/v8/test/mjsunit/es6/proxies-get.js
index 4f7b2d0be4..f52ae1aa7e 100644
--- a/deps/v8/test/mjsunit/es6/proxies-get.js
+++ b/deps/v8/test/mjsunit/es6/proxies-get.js
@@ -193,7 +193,6 @@
'Error from proxy getOwnPropertyDescriptor trap');
})();
-
(function testGetPropertyDetailsBailout2() {
var obj = {};
Object.defineProperty(obj, 'prop', {
@@ -211,3 +210,13 @@
" property on the proxy target but the proxy did not return its actual" +
" value (expected '53' but got '42')");
})();
+
+(function test32BitIndex() {
+ var index = (1 << 31) + 1;
+ var obj = {};
+ obj[index] = 42;
+ var p = new Proxy(obj, {});
+ for (var i = 0; i < 3; ++i) {
+ assertEquals(42, p[index]);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/es6/proxies-set.js b/deps/v8/test/mjsunit/es6/proxies-set.js
index 19f39f9a65..d56cec52f9 100644
--- a/deps/v8/test/mjsunit/es6/proxies-set.js
+++ b/deps/v8/test/mjsunit/es6/proxies-set.js
@@ -308,3 +308,101 @@ TestTrapReceiverArgument(strictReflectSet);
}
}
})();
+
+
+function TestTargetProxy(mySet) {
+ var q = new Proxy({}, {});
+ var proxy = new Proxy(q, {
+ set: function(t, k, v) {
+ return Reflect.set(t, k, v);
+ }
+ });
+
+ for (var p of properties) {
+ assertTrueIf(mySet.returnsBool, mySet(proxy, p, 42));
+ assertSame(42, q[p]);
+ }
+};
+
+TestTargetProxy(sloppyDefaultSet);
+TestTargetProxy(sloppyReflectSet);
+TestTargetProxy(strictDefaultSet);
+TestTargetProxy(strictReflectSet);
+
+
+(function TestAccessorNoSet() {
+ var target = {
+ };
+ Object.defineProperty(target, 'prop', {
+ get: function() {
+ return 42;
+ },
+ configurable: false
+ })
+ var handler = {
+ set: function() { return true; }
+ }
+ var proxy = new Proxy(target, handler);
+ assertThrows(function() { proxy.prop = 0; }, TypeError);
+})();
+
+(function TestProxyInPrototype() {
+ var handler = {
+ set: function(t, k, v) {
+ Reflect.set(t, k, v);
+ }
+ };
+ var obj = {};
+ var proxy = new Proxy(obj, handler);
+ var o = Object.create(proxy);
+
+ for (var i = 0; i < 3; ++i) {
+ o.prop = 42 + i;
+ assertEquals(42 + i, obj.prop);
+ }
+})();
+
+(function TestProxyInPrototypeNoTrap() {
+ var handler = {
+ };
+ var obj = {};
+ var proxy = new Proxy(obj, handler);
+ var o = Object.create(proxy);
+
+ for (var i = 0; i < 3; ++i) {
+ o.prop = 42 + i;
+ assertEquals(42 + i, o.prop);
+ assertEquals(undefined, obj.prop);
+ }
+})();
+
+// Note: this case is currently handled by runtime.
+(function TestDifferentHolder() {
+ var obj = {
+ '1337': 100
+ };
+ var handler = {
+ set(target, name, value, receiver) {
+ if (name != '1337') return Reflect.set(target, name, value, receiver);
+
+ assertSame(target, obj);
+ assertSame(receiver, p);
+ return target[name] = value;
+ }
+ };
+ var p = new Proxy(obj, handler);
+ for (var i = 0; i < 3; ++i) {
+ assertEquals(42, p[1337] = 42);
+ }
+})();
+
+(function test32BitIndex() {
+ var index = (1 << 31) + 1;
+ var obj = {};
+ obj[index] = 42;
+ var p = new Proxy(obj, {});
+ for (var i = 0; i < 3; ++i) {
+ p[index] = 100;
+ assertEquals(100, obj[index]);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/es6/proxies.js b/deps/v8/test/mjsunit/es6/proxies.js
index 3b9a4c5119..75a80a15bd 100644
--- a/deps/v8/test/mjsunit/es6/proxies.js
+++ b/deps/v8/test/mjsunit/es6/proxies.js
@@ -50,6 +50,19 @@ function TestWithFunctionProxy(test, x, y, z) {
}
// ---------------------------------------------------------------------------
+// Test Proxy constructor properties
+
+(function TestProxyProperties() {
+ assertEquals(2, Proxy.length);
+ assertEquals(Function.__proto__, Proxy.__proto__);
+ assertEquals(null, Proxy.prototype);
+ assertEquals(undefined, Object.getOwnPropertyDescriptor(Proxy, "arguments"));
+ assertThrows(() => Proxy.arguments, TypeError);
+ assertEquals(undefined, Object.getOwnPropertyDescriptor(Proxy, "caller"));
+ assertThrows(() => Proxy.caller, TypeError);
+})();
+
+// ---------------------------------------------------------------------------
// Getting property descriptors (Object.getOwnPropertyDescriptor).
var key
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-777182.js b/deps/v8/test/mjsunit/es6/regress/regress-777182.js
new file mode 100644
index 0000000000..6d6eb55c82
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/regress/regress-777182.js
@@ -0,0 +1,9 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --enable-slow-asserts
+
+var __v_65159 = [1.3];
+__v_65159.length = 0;
+new Int8Array(10).set(__v_65159);
diff --git a/deps/v8/test/mjsunit/es6/string-html.js b/deps/v8/test/mjsunit/es6/string-html.js
index 690830ba84..1eaa7b1eb1 100644
--- a/deps/v8/test/mjsunit/es6/string-html.js
+++ b/deps/v8/test/mjsunit/es6/string-html.js
@@ -8,7 +8,8 @@
assertEquals('_'.anchor('b'), '<a name="b">_</a>');
assertEquals('<'.anchor('<'), '<a name="<"><</a>');
assertEquals('_'.anchor(0x2A), '<a name="42">_</a>');
-assertEquals('_'.anchor('\x22'), '<a name="&quot;">_</a>');
+assertEquals('_'.anchor('\x22\x22'), '<a name="&quot;&quot;">_</a>');
+assertEquals('_'.anchor(), '<a name="undefined">_</a>');
assertEquals(String.prototype.anchor.call(0x2A, 0x2A), '<a name="42">42</a>');
assertThrows(function() {
String.prototype.anchor.call(undefined);
diff --git a/deps/v8/test/mjsunit/es6/string-repeat.js b/deps/v8/test/mjsunit/es6/string-repeat.js
index d61aec066c..3649c5b09b 100644
--- a/deps/v8/test/mjsunit/es6/string-repeat.js
+++ b/deps/v8/test/mjsunit/es6/string-repeat.js
@@ -60,14 +60,19 @@ assertEquals("", "abc".repeat(0));
assertEquals("abcabc", "abc".repeat(2.0));
assertEquals("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "a".repeat(37));
+assertEquals("", "a".repeat(NaN));
assertThrows('"a".repeat(-1)', RangeError);
assertThrows('"a".repeat(Number.POSITIVE_INFINITY)', RangeError);
+assertThrows('"a".repeat(Number.NEGATIVE_INFINITY)', RangeError);
assertThrows('"a".repeat(Math.pow(2, 30))', RangeError);
assertThrows('"a".repeat(Math.pow(2, 40))', RangeError);
+assertThrows('"a".repeat(-Math.pow(2, 40))', RangeError);
// Handling empty strings
assertThrows('"".repeat(-1)', RangeError);
assertThrows('"".repeat(Number.POSITIVE_INFINITY)', RangeError);
+assertThrows('"".repeat(Number.NEGATIVE_INFINITY)', RangeError);
+assertThrows('"a".repeat(-Math.pow(2, 40))', RangeError);
assertEquals("", "".repeat(Math.pow(2, 30)));
assertEquals("", "".repeat(Math.pow(2, 40)));
diff --git a/deps/v8/test/mjsunit/es6/typedarray-set-bytelength-not-smi.js b/deps/v8/test/mjsunit/es6/typedarray-set-bytelength-not-smi.js
new file mode 100644
index 0000000000..1f842878dc
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/typedarray-set-bytelength-not-smi.js
@@ -0,0 +1,21 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --mock-arraybuffer-allocator
+
+(function TestBufferByteLengthNonSmi() {
+ const source_buffer_length = %_MaxSmi() + 1;
+ const source_buffer = new ArrayBuffer(source_buffer_length);
+ const source = new Uint16Array(source_buffer);
+ assertEquals(source_buffer_length, source_buffer.byteLength);
+ assertEquals(source_buffer_length / 2, source.length);
+
+ const target_buffer_length = %_MaxSmi() - 1;
+ const target_buffer = new ArrayBuffer(target_buffer_length);
+ const target = new Uint16Array(target_buffer);
+ assertEquals(target_buffer_length, target_buffer.byteLength);
+ assertEquals(target_buffer_length / 2, target.length);
+
+ assertThrows(() => target.set(source), RangeError);
+})();
diff --git a/deps/v8/test/mjsunit/es6/typedarray.js b/deps/v8/test/mjsunit/es6/typedarray.js
index 2f4c1b639d..5f4d3f0747 100644
--- a/deps/v8/test/mjsunit/es6/typedarray.js
+++ b/deps/v8/test/mjsunit/es6/typedarray.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --allow-natives-syntax
+
// ArrayBuffer
function TestByteLength(param, expectedByteLength) {
@@ -609,6 +611,103 @@ function TestTypedArraySet() {
a101[0] = 42;
b101.set(a101);
assertArrayPrefix([42], b101);
+
+ // Detached array buffer when accessing a source element
+ var a111 = new Int8Array(100);
+ var evilarr = new Array(100);
+ var detached = false;
+ evilarr[1] = {
+ [Symbol.toPrimitive]() {
+ %ArrayBufferNeuter(a111.buffer);
+ detached = true;
+ return 1;
+ }
+ };
+ assertThrows(() => a111.set(evilarr), TypeError);
+ assertEquals(true, detached);
+
+ // Detached array buffer when converting offset.
+ {
+ for (const klass of typedArrayConstructors) {
+ const xs = new klass(10);
+ let detached = false;
+ const offset = {
+ [Symbol.toPrimitive]() {
+ %ArrayBufferNeuter(xs.buffer);
+ detached = true;
+ return 0;
+ }
+ };
+ assertThrows(() => xs.set(xs, offset), TypeError);
+ assertEquals(true, detached);
+ }
+ }
+
+ // Detached JSTypedArray source argument.
+ {
+ for (const klass of typedArrayConstructors) {
+ const a = new klass(2);
+ for (let i = 0; i < a.length; i++) a[i] = i;
+ %ArrayBufferNeuter(a.buffer);
+
+ const b = new klass(2);
+ assertThrows(() => b.set(a), TypeError);
+ }
+ }
+
+ // Various offset edge cases.
+ {
+ for (const klass of typedArrayConstructors) {
+ const xs = new klass(10);
+ assertThrows(() => xs.set(xs, -1), RangeError);
+ assertThrows(() => xs.set(xs, -1 * 2**64), RangeError);
+ xs.set(xs, -0.0);
+ xs.set(xs, 0.0);
+ xs.set(xs, 0.5);
+ assertThrows(() => xs.set(xs, 2**64), RangeError);
+ }
+ }
+
+ // Exhaustively test elements kind combinations with JSArray source arg.
+ {
+ const kSize = 3;
+ const targets = typedArrayConstructors.map(klass => new klass(kSize));
+ const sources = [ [0,1,2] // PACKED_SMI
+ , [0,,2] // HOLEY_SMI
+ , [0.1,0.2,0.3] // PACKED_DOUBLE
+ , [0.1,,0.3] // HOLEY_DOUBLE
+ , [{},{},{}] // PACKED
+ , [{},,{}] // HOLEY
+ , [] // DICTIONARY (patched later)
+ ];
+
+ // Migrate to DICTIONARY_ELEMENTS.
+ Object.defineProperty(sources[6], 0, {});
+
+ assertTrue(%HasSmiElements(sources[0]));
+ assertTrue(%HasFastElements(sources[0]) && !%HasHoleyElements(sources[0]));
+ assertTrue(%HasSmiElements(sources[1]));
+ assertTrue(%HasFastElements(sources[1]) && %HasHoleyElements(sources[1]));
+ assertTrue(%HasDoubleElements(sources[2]));
+ assertTrue(%HasFastElements(sources[2]) && !%HasHoleyElements(sources[2]));
+ assertTrue(%HasDoubleElements(sources[3]));
+ assertTrue(%HasFastElements(sources[3]) && %HasHoleyElements(sources[3]));
+ assertTrue(%HasObjectElements(sources[4]));
+ assertTrue(%HasFastElements(sources[4]) && !%HasHoleyElements(sources[4]));
+ assertTrue(%HasObjectElements(sources[4]));
+ assertTrue(%HasFastElements(sources[4]) && !%HasHoleyElements(sources[4]));
+ assertTrue(%HasObjectElements(sources[5]));
+ assertTrue(%HasFastElements(sources[5]) && %HasHoleyElements(sources[5]));
+ assertTrue(%HasDictionaryElements(sources[6]));
+
+ for (const target of targets) {
+ for (const source of sources) {
+ target.set(source);
+ %HeapObjectVerify(target);
+ %HeapObjectVerify(source);
+ }
+ }
+ }
}
TestTypedArraySet();
@@ -885,3 +984,12 @@ for(i = 0; i < typedArrayConstructors.length; i++) {
e.message);
}
})();
+
+// Regression test 761654
+assertThrows(function LargeSourceArray() {
+ let v0 = {};
+ v0.length = 2 ** 32; // too large for uint32
+ let a = new Int8Array();
+
+ a.set(v0);
+});
diff --git a/deps/v8/test/mjsunit/function-bind.js b/deps/v8/test/mjsunit/function-bind.js
index 826986943b..7d37da57d2 100644
--- a/deps/v8/test/mjsunit/function-bind.js
+++ b/deps/v8/test/mjsunit/function-bind.js
@@ -41,21 +41,25 @@ var f = foo.bind(foo);
assertEquals([foo, 3, 1], f(1, 2, 3));
assertEquals(3, f.length);
assertEquals("function () { [native code] }", f.toString());
+%HeapObjectVerify(f);
f = foo.bind(foo, 1);
assertEquals([foo, 3, 1], f(2, 3));
assertEquals(2, f.length);
assertEquals("function () { [native code] }", f.toString());
+%HeapObjectVerify(f);
f = foo.bind(foo, 1, 2);
assertEquals([foo, 3, 1], f(3));
assertEquals(1, f.length);
assertEquals("function () { [native code] }", f.toString());
+%HeapObjectVerify(f);
f = foo.bind(foo, 1, 2, 3);
assertEquals([foo, 3, 1], f());
assertEquals(0, f.length);
assertEquals("function () { [native code] }", f.toString());
+%HeapObjectVerify(f);
// Test that length works correctly even if more than the actual number
// of arguments are given when binding.
@@ -63,6 +67,7 @@ f = foo.bind(foo, 1, 2, 3, 4, 5, 6, 7, 8, 9);
assertEquals([foo, 9, 1], f());
assertEquals(0, f.length);
assertEquals("function () { [native code] }", f.toString());
+%HeapObjectVerify(f);
// Use a different bound object.
var obj = {x: 42, y: 43};
@@ -78,11 +83,13 @@ assertEquals(3, f_bound_this(1))
f = f_bound_this.bind(obj);
assertEquals(2, f(1));
assertEquals(1, f.length);
+%HeapObjectVerify(f);
f = f_bound_this.bind(obj, 2);
assertEquals(3, f());
assertEquals(0, f.length);
assertEquals('[object Function]', Object.prototype.toString.call(f));
+%HeapObjectVerify(f);
// Test chained binds.
@@ -90,65 +97,80 @@ assertEquals('[object Function]', Object.prototype.toString.call(f));
// the same effect.
f = foo.bind(foo);
assertEquals([foo, 3, 1], f(1, 2, 3));
+%HeapObjectVerify(f);
var not_foo = {};
f = foo.bind(foo).bind(not_foo).bind(not_foo).bind(not_foo);
assertEquals([foo, 3, 1], f(1, 2, 3));
assertEquals(3, f.length);
+%HeapObjectVerify(f);
// Giving bound parameters should work at any place in the chain.
f = foo.bind(foo, 1).bind(not_foo).bind(not_foo).bind(not_foo);
assertEquals([foo, 3, 1], f(2, 3));
assertEquals(2, f.length);
+%HeapObjectVerify(f);
f = foo.bind(foo).bind(not_foo, 1).bind(not_foo).bind(not_foo);
assertEquals([foo, 3, 1], f(2, 3));
assertEquals(2, f.length);
+%HeapObjectVerify(f);
f = foo.bind(foo).bind(not_foo).bind(not_foo,1 ).bind(not_foo);
assertEquals([foo, 3, 1], f(2, 3));
assertEquals(2, f.length);
+%HeapObjectVerify(f);
f = foo.bind(foo).bind(not_foo).bind(not_foo).bind(not_foo, 1);
assertEquals([foo, 3, 1], f(2, 3));
assertEquals(2, f.length);
+%HeapObjectVerify(f);
// Several parameters can be given, and given in different bind invocations.
f = foo.bind(foo, 1, 2).bind(not_foo).bind(not_foo).bind(not_foo);
assertEquals([foo, 3, 1], f(3));
assertEquals(1, f.length);
+%HeapObjectVerify(f);
f = foo.bind(foo).bind(not_foo, 1, 2).bind(not_foo).bind(not_foo);
assertEquals([foo, 3, 1], f(1));
assertEquals(1, f.length);
+%HeapObjectVerify(f);
f = foo.bind(foo).bind(not_foo, 1, 2).bind(not_foo).bind(not_foo);
assertEquals([foo, 3, 1], f(3));
assertEquals(1, f.length);
+%HeapObjectVerify(f);
f = foo.bind(foo).bind(not_foo).bind(not_foo, 1, 2).bind(not_foo);
assertEquals([foo, 3, 1], f(1));
assertEquals(1, f.length);
+%HeapObjectVerify(f);
f = foo.bind(foo).bind(not_foo).bind(not_foo).bind(not_foo, 1, 2);
assertEquals([foo, 3, 1], f(3));
assertEquals(1, f.length);
+%HeapObjectVerify(f);
f = foo.bind(foo, 1).bind(not_foo, 2).bind(not_foo).bind(not_foo);
assertEquals([foo, 3, 1], f(3));
assertEquals(1, f.length);
+%HeapObjectVerify(f);
f = foo.bind(foo, 1).bind(not_foo).bind(not_foo, 2).bind(not_foo);
assertEquals([foo, 3, 1], f(3));
assertEquals(1, f.length);
+%HeapObjectVerify(f);
f = foo.bind(foo, 1).bind(not_foo).bind(not_foo).bind(not_foo, 2);
assertEquals([foo, 3, 1], f(3));
assertEquals(1, f.length);
+%HeapObjectVerify(f);
f = foo.bind(foo).bind(not_foo, 1).bind(not_foo).bind(not_foo, 2);
assertEquals([foo, 3, 1], f(3));
assertEquals(1, f.length);
+%HeapObjectVerify(f);
// The wrong number of arguments can be given to bound functions too.
f = foo.bind(foo);
@@ -158,6 +180,7 @@ assertEquals([foo, 1, 1], f(1));
assertEquals([foo, 2, 1], f(1, 2));
assertEquals([foo, 3, 1], f(1, 2, 3));
assertEquals([foo, 4, 1], f(1, 2, 3, 4));
+%HeapObjectVerify(f);
f = foo.bind(foo, 1);
assertEquals(2, f.length);
@@ -165,21 +188,25 @@ assertEquals([foo, 1, 1], f());
assertEquals([foo, 2, 1], f(2));
assertEquals([foo, 3, 1], f(2, 3));
assertEquals([foo, 4, 1], f(2, 3, 4));
+%HeapObjectVerify(f);
f = foo.bind(foo, 1, 2);
assertEquals(1, f.length);
assertEquals([foo, 2, 1], f());
assertEquals([foo, 3, 1], f(3));
assertEquals([foo, 4, 1], f(3, 4));
+%HeapObjectVerify(f);
f = foo.bind(foo, 1, 2, 3);
assertEquals(0, f.length);
assertEquals([foo, 3, 1], f());
assertEquals([foo, 4, 1], f(4));
+%HeapObjectVerify(f);
f = foo.bind(foo, 1, 2, 3, 4);
assertEquals(0, f.length);
assertEquals([foo, 4, 1], f());
+%HeapObjectVerify(f);
// Test constructor calls.
@@ -194,24 +221,32 @@ var obj2 = new f(1,2,3);
assertEquals(1, obj2.x);
assertEquals(2, obj2.y);
assertEquals(3, obj2.z);
+%HeapObjectVerify(f);
+%HeapObjectVerify(obj2);
f = bar.bind(bar, 1);
obj2 = new f(2,3);
assertEquals(1, obj2.x);
assertEquals(2, obj2.y);
assertEquals(3, obj2.z);
+%HeapObjectVerify(f);
+%HeapObjectVerify(obj2);
f = bar.bind(bar, 1, 2);
obj2 = new f(3);
assertEquals(1, obj2.x);
assertEquals(2, obj2.y);
assertEquals(3, obj2.z);
+%HeapObjectVerify(f);
+%HeapObjectVerify(obj2);
f = bar.bind(bar, 1, 2, 3);
obj2 = new f();
assertEquals(1, obj2.x);
assertEquals(2, obj2.y);
assertEquals(3, obj2.z);
+%HeapObjectVerify(f);
+%HeapObjectVerify(obj2);
// Test bind chains when used as a constructor.
@@ -220,6 +255,8 @@ obj2 = new f();
assertEquals(1, obj2.x);
assertEquals(2, obj2.y);
assertEquals(3, obj2.z);
+%HeapObjectVerify(f);
+%HeapObjectVerify(obj2);
// Test obj2 is instanceof both bar and f.
assertTrue(obj2 instanceof bar);
@@ -235,22 +272,29 @@ assertTrue(obj3 instanceof f);
assertFalse(obj3 instanceof foo);
assertFalse(obj3 instanceof Function);
assertFalse(obj3 instanceof String);
+%HeapObjectVerify(f);
+%HeapObjectVerify(obj3);
// thisArg is converted to object.
f = foo.bind(undefined);
assertEquals([this, 0, undefined], f());
+%HeapObjectVerify(f);
f = foo.bind(null);
assertEquals([this, 0, undefined], f());
+%HeapObjectVerify(f);
f = foo.bind(42);
assertEquals([Object(42), 0, undefined], f());
+%HeapObjectVerify(f);
f = foo.bind("foo");
assertEquals([Object("foo"), 0, undefined], f());
+%HeapObjectVerify(f);
f = foo.bind(true);
assertEquals([Object(true), 0, undefined], f());
+%HeapObjectVerify(f);
// Strict functions don't convert thisArg.
function soo(x, y, z) {
@@ -260,18 +304,23 @@ function soo(x, y, z) {
var s = soo.bind(undefined);
assertEquals([undefined, 0, undefined], s());
+%HeapObjectVerify(s);
s = soo.bind(null);
assertEquals([null, 0, undefined], s());
+%HeapObjectVerify(s);
s = soo.bind(42);
assertEquals([42, 0, undefined], s());
+%HeapObjectVerify(s);
s = soo.bind("foo");
assertEquals(["foo", 0, undefined], s());
+%HeapObjectVerify(s);
s = soo.bind(true);
assertEquals([true, 0, undefined], s());
+%HeapObjectVerify(s);
// Test that .arguments and .caller are poisoned according to the ES5 spec.
@@ -316,11 +365,14 @@ assertThrows(function() { f.arguments = 42; }, TypeError);
Object.setPrototypeOf(fun, proto);
var bound = fun.bind({});
assertEquals(proto, Object.getPrototypeOf(bound));
+ %HeapObjectVerify(bound);
var bound2 = fun.bind({});
assertTrue(%HaveSameMap(new bound, new bound2));
+ %HeapObjectVerify(bound2);
Object.setPrototypeOf(fun, null);
bound = Function.prototype.bind.call(fun, {});
assertEquals(null, Object.getPrototypeOf(bound));
+ %HeapObjectVerify(bound);
})();
diff --git a/deps/v8/test/mjsunit/harmony/array-sort-comparefn.js b/deps/v8/test/mjsunit/harmony/array-sort-comparefn.js
new file mode 100644
index 0000000000..1ae470a351
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-sort-comparefn.js
@@ -0,0 +1,38 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Array.prototype.sort + TypedArray.prototype.sort: comparefn must be either a
+// function or undefined.
+// https://github.com/tc39/ecma262/pull/785
+
+const types = [
+ Array,
+ Int8Array, Uint8Array,
+ Int16Array, Uint16Array,
+ Int32Array, Uint32Array,
+ Uint8ClampedArray,
+ Float32Array, Float64Array,
+];
+
+for (const type of types) {
+ const array = new type();
+ array[0] = 1;
+ array[1] = 2;
+ array[2] = 3;
+
+ array.sort();
+ array.sort(undefined);
+ array.sort(() => {});
+
+ assertThrows(() => { array.sort(null); }, TypeError);
+ assertThrows(() => { array.sort(true); }, TypeError);
+ assertThrows(() => { array.sort(false); }, TypeError);
+ assertThrows(() => { array.sort(''); }, TypeError);
+ assertThrows(() => { array.sort(0); }, TypeError);
+ assertThrows(() => { array.sort(42); }, TypeError);
+ assertThrows(() => { array.sort([]); }, TypeError);
+ assertThrows(() => { array.sort(/./); }, TypeError);
+ assertThrows(() => { array.sort({}); }, TypeError);
+ assertThrows(() => { array.sort(Symbol()); }, TypeError);
+}
diff --git a/deps/v8/test/mjsunit/harmony/bigint.js b/deps/v8/test/mjsunit/harmony/bigint.js
new file mode 100644
index 0000000000..4406f12b11
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint.js
@@ -0,0 +1,355 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-bigint --no-opt
+
+'use strict'
+
+const zero = BigInt(0);
+const another_zero = BigInt(0);
+const one = BigInt(1);
+const another_one = BigInt(1);
+const two = BigInt(2);
+const three = BigInt(3);
+const six = BigInt(6);
+
+// BigInt
+{
+ assertSame(BigInt, BigInt.prototype.constructor)
+}
+
+// typeof
+{
+ assertEquals(typeof zero, "bigint");
+ assertEquals(typeof one, "bigint");
+}
+{
+ // TODO(neis): Enable once --no-opt can be removed.
+ //
+ // function Typeof(x) { return typeof x }
+ // assertEquals(Typeof(zero), "bigint");
+ // assertEquals(Typeof(zero), "bigint");
+ // %OptimizeFunctionOnNextCall(Typeof);
+ // assertEquals(Typeof(zero), "bigint");
+}
+
+// ToString
+{
+ assertEquals(String(zero), "0");
+ assertEquals(String(one), "1");
+}
+
+// .toString(radix)
+{
+ // Single-digit BigInts: random-generated inputs close to kMaxInt.
+ // Expectations computed with the following Python program:
+ // def Format(x, base):
+ // s = ""
+ // while x > 0:
+ // s = "0123456789abcdefghijklmnopqrstuvwxyz"[x % base] + s
+ // x = x / base
+ // return s
+ assertEquals("10100110000100101000011100101", BigInt(0x14c250e5).toString(2));
+ assertEquals("-110110100010011111001011111", BigInt(-0x6d13e5f).toString(2));
+ assertEquals("1001222020000100000", BigInt(0x18c72873).toString(3));
+ assertEquals("-1212101122110102020", BigInt(-0x2b19aebe).toString(3));
+ assertEquals("120303133110120", BigInt(0x18cdf518).toString(4));
+ assertEquals("-113203101020122", BigInt(-0x178d121a).toString(4));
+ assertEquals("1323302233400", BigInt(0x18de6256).toString(5));
+ assertEquals("-2301033210212", BigInt(-0x25f7f454).toString(5));
+ assertEquals("131050115130", BigInt(0x211f0d5e).toString(6));
+ assertEquals("-104353333321", BigInt(-0x186bbe91).toString(6));
+ assertEquals("25466260221", BigInt(0x2f69f47e).toString(7));
+ assertEquals("-31051540346", BigInt(-0x352c7efa).toString(7));
+ assertEquals("5004630525", BigInt(0x28133155).toString(8));
+ assertEquals("-7633240703", BigInt(-0x3e6d41c3).toString(8));
+ assertEquals("705082365", BigInt(0x121f4264).toString(9));
+ assertEquals("-780654431", BigInt(-0x1443b36e).toString(9));
+ assertEquals("297019028", BigInt(0x11b42694).toString(10));
+ assertEquals("-721151126", BigInt(-0x2afbe496).toString(10));
+ assertEquals("312914074", BigInt(0x27ca6879).toString(11));
+ assertEquals("-198025592", BigInt(-0x1813d3a7).toString(11));
+ assertEquals("191370997", BigInt(0x2d14f083).toString(12));
+ assertEquals("-1b8aab4a2", BigInt(-0x32b52efa).toString(12));
+ assertEquals("7818062c", BigInt(0x1c84a48c).toString(13));
+ assertEquals("-7529695b", BigInt(-0x1badffee).toString(13));
+ assertEquals("6bc929c4", BigInt(0x2b0a91d0).toString(14));
+ assertEquals("-63042008", BigInt(-0x270dff78).toString(14));
+ assertEquals("5e8b8dec", BigInt(0x3cd27d7f).toString(15));
+ assertEquals("-4005433d", BigInt(-0x28c0821a).toString(15));
+ assertEquals("10b35ca3", BigInt(0x10b35ca3).toString(16));
+ assertEquals("-23d4d9d6", BigInt(-0x23d4d9d6).toString(16));
+ assertEquals("28c3d5e3", BigInt(0x3d75d48c).toString(17));
+ assertEquals("-10c06328", BigInt(-0x1979b7f0).toString(17));
+ assertEquals("eb8d349", BigInt(0x1dacf0a5).toString(18));
+ assertEquals("-1217015h", BigInt(-0x28b3c23f).toString(18));
+ assertEquals("1018520b", BigInt(0x357da01a).toString(19));
+ assertEquals("-9c64e33", BigInt(-0x1b0e9571).toString(19));
+ assertEquals("d7bf9ab", BigInt(0x3309daa3).toString(20));
+ assertEquals("-58h0h9h", BigInt(-0x14c30c55).toString(20));
+ assertEquals("64igi9h", BigInt(0x1fdd329c).toString(21));
+ assertEquals("-45cbc4a", BigInt(-0x15cf9682).toString(21));
+ assertEquals("7bi7d1h", BigInt(0x32f0dfe3).toString(22));
+ assertEquals("-61j743l", BigInt(-0x291ff61f).toString(22));
+ assertEquals("5g5gg25", BigInt(0x325a10bd).toString(23));
+ assertEquals("-3359flb", BigInt(-0x1bb653c9).toString(23));
+ assertEquals("392f5ec", BigInt(0x267ed69c).toString(24));
+ assertEquals("-2ab3icb", BigInt(-0x1bbf7bab).toString(24));
+ assertEquals("3jb2afo", BigInt(0x36f93c24).toString(25));
+ assertEquals("-30bcheh", BigInt(-0x2bec76fa).toString(25));
+ assertEquals("3845agk", BigInt(0x3d04bf64).toString(26));
+ assertEquals("-1gpjl3g", BigInt(-0x1e720b1a).toString(26));
+ assertEquals("20bpaf0", BigInt(0x2e8ff627).toString(27));
+ assertEquals("-292i3c2", BigInt(-0x35f751fe).toString(27));
+ assertEquals("266113k", BigInt(0x3fd26738).toString(28));
+ assertEquals("-1eh16bo", BigInt(-0x2bb5726c).toString(28));
+ assertEquals("19gj7qa", BigInt(0x2f28e8d8).toString(29));
+ assertEquals("-13a0apf", BigInt(-0x278b4588).toString(29));
+ assertEquals("iasrb8", BigInt(0x1a99b3be).toString(30));
+ assertEquals("-frlhoc", BigInt(-0x17106f48).toString(30));
+ assertEquals("bfe4p2", BigInt(0x139f1ea3).toString(31));
+ assertEquals("-ioal1a", BigInt(-0x200e49fa).toString(31));
+ assertEquals("m0v0kf", BigInt(0x2c0f828f).toString(32));
+ assertEquals("-g4bab5", BigInt(-0x2045a965).toString(32));
+ assertEquals("9i1kit", BigInt(0x16450a9f).toString(33));
+ assertEquals("-fqb0e7", BigInt(-0x24d9e889).toString(33));
+ assertEquals("gb9r6m", BigInt(0x2c3acf46).toString(34));
+ assertEquals("-jcaemv", BigInt(-0x346f72b3).toString(34));
+ assertEquals("cw4mbk", BigInt(0x2870cdcb).toString(35));
+ assertEquals("-hw4eki", BigInt(-0x3817c29b).toString(35));
+ assertEquals("alzwgj", BigInt(0x263e2c13).toString(36));
+ assertEquals("-bo4ukz", BigInt(-0x2a0f97d3).toString(36));
+
+ // Multi-digit BigInts.
+ // Test parseInt/toString round trip on a list of randomly generated
+ // string representations of numbers in various bases.
+ var positive = [0, 0, // Skip base 0 and 1.
+ "1100110001100010110011110110010010001011100111100101111000111101100001000",
+ "1001200022210010220101120212021002011002201122200002211102120120021011020",
+ "1111113020012203332320220022231110130001001320122012131311333110012023232",
+ "4214313040222110434114402342013144321401424143322013320403411012033300312",
+ "5025302003542512450341430541203424555035430434034243510233043041501130015",
+ "6231052230016515343200525230300322104013130605414211331345043144525012021",
+ "1146340505617030644211355340006353546230356336306352536433054143503442135",
+ "7262360724624787621528668212168232276348417717770383567066203032200270570",
+ "7573792356581293501680046955899735043496925151216904903504319328753434194",
+ "4a627927557579898720a42647639128174a8689889766a219342133671449069a2235011",
+ "1a574a5848289924996342a32893380690322330393633b587ba5a15b7b82080222400464",
+ "5163304c74c387b7a443c92466688595b671a3329b42083b1499b0c10a74a9298a06c3a5a",
+ "4b63c834356a03c80946133284a709cbbc2a75022757207dc31c14abd4c160dc122327c17",
+ "d8d59cbb4ca2860de7c002eee4ab3c215b90069200d20dbdc0111cb1e1bab97e8c7609670",
+ "22d4b69398a7f848e6ae36798811cd1a63d90f340d8607f3ce5566c97c18468787eb2b9fd",
+ "1176gf69afd32cc105fa70c705927a384dbdb1g8d952f28028g31ebdc9e32a89f16e825ee",
+ "5d64b74f4d70632h4ee07h7c1e2da9125c42g2727f4b6d95e5cec6ga49566hh731ab5f544",
+ "7ff8cg7f05dd72916a09a4761ii7b0ibcg68ba39b10436f14efg76ge817317badcbi4gffc",
+ "6d7c4hci6cd72e4ja26j354i12i71gb0cbj12gi145j91h02hde3b72c65geb7ff9bi9d0c2b",
+ "c96997f50abe425d13a53kk4af631kg7db208ka5j5bfg8ca5f9c0bjf69j5kgg4jb5h7hi86",
+ "3g5fd800d9ib9j0i8all5jgb23dh9483ab6le5ad9g4kja8a0b3j5jbjfge7k5fffg2kbheee",
+ "9j1119d1cd61kmdm7kma105cki313f678fc3h25f4664281bbmg3fk97kfbh7d48j89j178ch",
+ "d2933cdc9jfe4hl3794kb3e13dg2lihad968ib9jg19dgf1fi482b27ji0d10c6kfkdge5764",
+ "bf6o0njkm1ij5in5nh7h94584bd80el02b07el5ojk9k9g0gn906do70gbbnckl048c0kdmao",
+ "8gb7jnge9p9cdgigo394oa33gfaenc3gnb53eceg4b8511gkkm88b0dod85e5bggpc861d7d5",
+ "qbbnqhkpleb4o8ndaddpc34h5b2iljn3jgnjdn5k57bi3n9i09hjle9hqgqdpgbnk499mak56",
+ "akg7e2976arn8i2m53gif0dp59bmfd7mk9erlg2qm3fc76da9glf397eh4ooij9il0nfl9gac",
+ "mehpbfrj5ah2ef3p2hl637gjp1pm5grqn4037pm1qfgfpr9cfljfc145hljehjjb48bb1n6en",
+ "rg6ik3agnb3p6t2rtja9h4il76i8fkqlt6gplap3fq6pfr7bbcfcp5ffncf3nm4kamap39hse",
+ "bk8rp9r9r8pltdqpb7euc6s9rcm33969pcq6uk3mtfoktt86di8589oacbam5tn29b9b6dq3j",
+ "npth8juld44rss3e57iigjg65po3d1h02heo4r103jmg3ocv89buqtgiov35k39rdf8j9t4ca",
+ "vrmqlwrrrd0uml1womae49jpa9tadh44fw7mucgk06l0uk4uqwuo37t6kwn7wwrm3a6oq081s",
+ "n5cft6gvufqd8iksquu2amghokk17gbtpguidc290af634p7k7rhmfu7bf1s62ej4megoa1j4",
+ "3v3gcrmlfc2tl0tefgkiogj41f6y2tmj9w5bxke8y03xqf49ox8gh9wbrhycrkluicqajtnur",
+ "z2m7b0sy2tzergtkqts5yj0dkrlfkxls81ijgxgfequizpntcwggv2d4rdzcncd0kj9mrmnrb",
+ ];
+ var negative = [0, 0, // Skip base 0 and 1.
+ "-100010011110111010111111110001100100111010101000001011010010101100101000",
+ "-110012122000122102021210112200001000122011010120101201001122000002022102",
+ "-203210320111001002200122200001312300221100221321010300023323201113122333",
+ "-133042441230110320040323303341320302144241224443231311022240124413104131",
+ "-311325230504055004330150145105331121322231155401110315251422505233103112",
+ "-643153641664240231336166403516403454646560261062114326443664602606315326",
+ "-200057252627665476551635525303641543165622340301637556323453513664337277",
+ "-826688166214270516331644053744613530235020517172322840763172114078364165",
+ "-743042397390679269240157150971957535458122650450558451124173993544604852",
+ "-73528688500003573942a56a504a2996a1384129563098512a63196697975038692aaa63",
+ "-616576a2948a9029316290168b71137b027851639a0283150b125b664b74b767a3597805",
+ "-b875467540719b371b7a36047a7886872a5399c4c630c37149bc3182917a7a7c124475bb",
+ "-3860411b61d35977721bc81bd715c386c9b70a752940913d265505d8c7c5dd2624b591d7",
+ "-bad5dd79b083ee0da9a6296664e72c246d827762357116ae7076a22bb369acbc3a201d03",
+ "-f9b37352aff265124303942a463917a252ff1a2ff4a33777f490b4c103bdcd1a655dbe2c",
+ "-805fg8c74125214g383a8d8g573c49fa7c4035fbc6db61g5gb5g6beb8f90dae4a9a5g7cc",
+ "-70aae113459d3h5084b1gg209g3695d20e78d01gcbb71bh1bd4gdge31haf5hc02dghf14e",
+ "-c55a57haf47b7ih2gh6ea93098ig02b42icga6ead254e0aeeic7g53h5fd6637ge03b2e20",
+ "-e32f7204624ie596j731g72136cejc25ebbgb0140i4997fcdf477f021d86ci4e10db543a",
+ "-i7f32c817i3cac1c24c7786k6ig185f47cj1471ki6bb7agiae838027gjge9g59if9f88g6",
+ "-i30aha2030a9605c270h92e1ca3i02j996hl918gh52fbhb7i16ik1i919ieak3cj384kb61",
+ "-58jmem8e59li67aellid2083dabh4kh51ci1jg7c6a3k4l1hdgfkdha0fglfm4805kida5b9",
+ "-cl9iecjg9ak087cad4151lll44296heae2349g70fbjj37998m2ddn6427fgcl2aknhgn1a1",
+ "-alfjfhho4gf8bi4j2bi3743mhg2aache4c6jcinkmf5ddm7kf9gg350hlja16ealbdlk201j",
+ "-bhh1146ho3o2m3b839c565hbgjnhjh96oofbmdl7gn8h4f94kli94hkk180o79pc4d2l0721",
+ "-p00gknh7e05k6a3apg6i9lb46f4a9qeeiq1778ak8il5dcponk5gl2fiednb4pmo1agmoqph",
+ "-4j8lo4d4p508fnd2hkfb76e8ri81k6hq0op3pr14ca0cn96pccplk7rbahc9cdkdce1q16dn",
+ "-ednlo3ogf2i8annrel9rm323bpf00meed3oi47n0qrdgnd2n3il4bnsc9s2jd7loh44im8ra",
+ "-bjjg6fsbpcc2tc1o09m9r6fd6eoq5480har62a5offn9thcfahbno9kf9magl2akl0jgncj9",
+ "-sonuhat2h60glpbpej9jjado2s5l86122d26tudoc1d6aic2oitu793gk0mlac3dk1dufp1q",
+ "-i9pbvm53ubh8jqifuarauch8cbgk9cjsl6rlioka1phs1lskg1oosll23hjoli2subgr1rto",
+ "-w1ncn5t60b5dv669ekwnvk8n2g7djrsl8cdkwun8o3m5divc3jhnkp2381rhj70gc71a6wff",
+ "-buiq8v33p5ex44ps4s45enj6lrluivm19lcowkvntu72u0xguw13bxgxxe7mdlwt1a4qksae",
+ "-woiycfmea6i12r2yai49mf4lbd7w2jdoebiogfhnh1i4rwgox57obci8qbsfpb4w07nu19m5",
+ "-tbttuip1r6ioca6g6dw354o4m78qep9yh03nojx47yq29fqime6zstwllb74501qct8eskxn",
+ ];
+ for (var base = 2; base <= 36; base++) {
+ var input = positive[base];
+ assertEquals(input, BigInt.parseInt(input, base).toString(base));
+ input = negative[base];
+ assertEquals(input, BigInt.parseInt(input, base).toString(base));
+ }
+}
+
+// .parseInt
+{
+ assertEquals("hellobigint", BigInt.parseInt("hellobigint", 32).toString(32));
+ assertEquals("abc", BigInt.parseInt("101010111100", 2).toString(16));
+ // Detect "0x" prefix.
+ assertEquals("f00dcafe", BigInt.parseInt("0xf00dcafe").toString(16));
+ // Default base is 10, trailing junk is skipped.
+ assertEquals("abc", BigInt.parseInt("2748junk").toString(16));
+ // Objects are converted to string.
+ let obj = {toString: () => "0x12345"};
+ assertEquals("12345", BigInt.parseInt(obj).toString(16));
+ // Empty and invalid strings throw.
+ assertThrows("BigInt.parseInt('')", SyntaxError);
+ assertThrows("BigInt.parseInt('nope', 2)", SyntaxError);
+}
+
+// .valueOf
+{
+ assertEquals(Object(zero).valueOf(), another_zero);
+ assertThrows(() => { return BigInt.prototype.valueOf.call("string"); },
+ TypeError);
+ // TODO(jkummerow): Add tests for (new BigInt(...)).valueOf() when we
+ // can construct BigInt wrappers.
+}
+
+// ToBoolean
+{
+ assertTrue(!zero);
+ assertFalse(!!zero);
+ assertTrue(!!!zero);
+
+ assertFalse(!one);
+ assertTrue(!!one);
+ assertFalse(!!!one);
+}
+
+// Strict equality
+{
+ assertTrue(zero === zero);
+ assertFalse(zero !== zero);
+
+ assertTrue(zero === another_zero);
+ assertFalse(zero !== another_zero);
+
+ assertFalse(zero === one);
+ assertTrue(zero !== one);
+ assertTrue(one !== zero);
+ assertFalse(one === zero);
+
+ assertFalse(zero === 0);
+ assertTrue(zero !== 0);
+ assertFalse(0 === zero);
+ assertTrue(0 !== zero);
+}
+
+// SameValue
+{
+ const obj = Object.defineProperty({}, 'foo',
+ {value: zero, writable: false, configurable: false});
+
+ assertTrue(Reflect.defineProperty(obj, 'foo', {value: zero}));
+ assertTrue(Reflect.defineProperty(obj, 'foo', {value: another_zero}));
+ assertFalse(Reflect.defineProperty(obj, 'foo', {value: one}));
+}
+
+// SameValueZero
+{
+ assertTrue([zero].includes(zero));
+ assertTrue([zero].includes(another_zero));
+
+ assertFalse([zero].includes(+0));
+ assertFalse([zero].includes(-0));
+
+ assertFalse([+0].includes(zero));
+ assertFalse([-0].includes(zero));
+
+ assertTrue([one].includes(one));
+ assertTrue([one].includes(another_one));
+
+ assertFalse([one].includes(1));
+ assertFalse([1].includes(one));
+}{
+ assertTrue(new Set([zero]).has(zero));
+ assertTrue(new Set([zero]).has(another_zero));
+
+ assertFalse(new Set([zero]).has(+0));
+ assertFalse(new Set([zero]).has(-0));
+
+ assertFalse(new Set([+0]).has(zero));
+ assertFalse(new Set([-0]).has(zero));
+
+ assertTrue(new Set([one]).has(one));
+ assertTrue(new Set([one]).has(another_one));
+}{
+ assertTrue(new Map([[zero, 42]]).has(zero));
+ assertTrue(new Map([[zero, 42]]).has(another_zero));
+
+ assertFalse(new Map([[zero, 42]]).has(+0));
+ assertFalse(new Map([[zero, 42]]).has(-0));
+
+ assertFalse(new Map([[+0, 42]]).has(zero));
+ assertFalse(new Map([[-0, 42]]).has(zero));
+
+ assertTrue(new Map([[one, 42]]).has(one));
+ assertTrue(new Map([[one, 42]]).has(another_one));
+}
+
+// Binary ops.
+{
+ assertTrue(one + two === three);
+ assertEquals("hello1", "hello" + one);
+ assertEquals("2hello", two + "hello");
+ assertThrows("one + 2", TypeError);
+ assertThrows("2 + one", TypeError);
+ assertThrows("one + 0.5", TypeError);
+ assertThrows("0.5 + one", TypeError);
+ assertThrows("one + null", TypeError);
+ assertThrows("null + one", TypeError);
+
+ assertTrue(three - two === one);
+ assertThrows("two - 1", TypeError);
+ assertThrows("2 - one", TypeError);
+ assertThrows("two - 0.5", TypeError);
+ assertThrows("2.5 - one", TypeError);
+
+ assertTrue(two * three === six);
+ assertThrows("two * 1", TypeError);
+ assertThrows("1 * two", TypeError);
+ assertThrows("two * 1.5", TypeError);
+ assertThrows("1.5 * two", TypeError);
+
+ assertTrue(six / three === two);
+ assertThrows("six / 3", TypeError);
+ assertThrows("3 / three", TypeError);
+ assertThrows("six / 0.5", TypeError);
+ assertThrows("0.5 / six", TypeError);
+ assertThrows("zero / zero", RangeError);
+ assertThrows("zero / 0", TypeError);
+
+ assertTrue(three % two === one);
+ assertThrows("three % 2", TypeError);
+ assertThrows("3 % two", TypeError);
+ assertThrows("three % 2.5", TypeError);
+ assertThrows("3.5 % two", TypeError);
+ assertThrows("three % zero", RangeError);
+ assertThrows("three % 0", TypeError);
+}
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-large.js b/deps/v8/test/mjsunit/harmony/modules-import-large.js
new file mode 100644
index 0000000000..250a41bd82
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-large.js
@@ -0,0 +1,1120 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+// Flags: --allow-natives-syntax
+
+import * as m1 from "modules-skip-large1.js";
+import * as m2 from "modules-skip-large2.js";
+
+assertFalse(%HasFastProperties(m1));
+assertFalse(%HasFastProperties(m2));
+assertFalse(%HaveSameMap(m1, m2));
+
+function verify(m) {
+ assertEquals(m.a0, 0);
+ assertEquals(m.a1, 1);
+ assertEquals(m.a2, 2);
+ assertEquals(m.a3, 3);
+ assertEquals(m.a4, 4);
+ assertEquals(m.a5, 5);
+ assertEquals(m.a6, 6);
+ assertEquals(m.a7, 7);
+ assertEquals(m.a8, 8);
+ assertEquals(m.a9, 9);
+ assertEquals(m.a10, 10);
+ assertEquals(m.a11, 11);
+ assertEquals(m.a12, 12);
+ assertEquals(m.a13, 13);
+ assertEquals(m.a14, 14);
+ assertEquals(m.a15, 15);
+ assertEquals(m.a16, 16);
+ assertEquals(m.a17, 17);
+ assertEquals(m.a18, 18);
+ assertEquals(m.a19, 19);
+ assertEquals(m.a20, 20);
+ assertEquals(m.a21, 21);
+ assertEquals(m.a22, 22);
+ assertEquals(m.a23, 23);
+ assertEquals(m.a24, 24);
+ assertEquals(m.a25, 25);
+ assertEquals(m.a26, 26);
+ assertEquals(m.a27, 27);
+ assertEquals(m.a28, 28);
+ assertEquals(m.a29, 29);
+ assertEquals(m.a30, 30);
+ assertEquals(m.a31, 31);
+ assertEquals(m.a32, 32);
+ assertEquals(m.a33, 33);
+ assertEquals(m.a34, 34);
+ assertEquals(m.a35, 35);
+ assertEquals(m.a36, 36);
+ assertEquals(m.a37, 37);
+ assertEquals(m.a38, 38);
+ assertEquals(m.a39, 39);
+ assertEquals(m.a40, 40);
+ assertEquals(m.a41, 41);
+ assertEquals(m.a42, 42);
+ assertEquals(m.a43, 43);
+ assertEquals(m.a44, 44);
+ assertEquals(m.a45, 45);
+ assertEquals(m.a46, 46);
+ assertEquals(m.a47, 47);
+ assertEquals(m.a48, 48);
+ assertEquals(m.a49, 49);
+ assertEquals(m.a50, 50);
+ assertEquals(m.a51, 51);
+ assertEquals(m.a52, 52);
+ assertEquals(m.a53, 53);
+ assertEquals(m.a54, 54);
+ assertEquals(m.a55, 55);
+ assertEquals(m.a56, 56);
+ assertEquals(m.a57, 57);
+ assertEquals(m.a58, 58);
+ assertEquals(m.a59, 59);
+ assertEquals(m.a60, 60);
+ assertEquals(m.a61, 61);
+ assertEquals(m.a62, 62);
+ assertEquals(m.a63, 63);
+ assertEquals(m.a64, 64);
+ assertEquals(m.a65, 65);
+ assertEquals(m.a66, 66);
+ assertEquals(m.a67, 67);
+ assertEquals(m.a68, 68);
+ assertEquals(m.a69, 69);
+ assertEquals(m.a70, 70);
+ assertEquals(m.a71, 71);
+ assertEquals(m.a72, 72);
+ assertEquals(m.a73, 73);
+ assertEquals(m.a74, 74);
+ assertEquals(m.a75, 75);
+ assertEquals(m.a76, 76);
+ assertEquals(m.a77, 77);
+ assertEquals(m.a78, 78);
+ assertEquals(m.a79, 79);
+ assertEquals(m.a80, 80);
+ assertEquals(m.a81, 81);
+ assertEquals(m.a82, 82);
+ assertEquals(m.a83, 83);
+ assertEquals(m.a84, 84);
+ assertEquals(m.a85, 85);
+ assertEquals(m.a86, 86);
+ assertEquals(m.a87, 87);
+ assertEquals(m.a88, 88);
+ assertEquals(m.a89, 89);
+ assertEquals(m.a90, 90);
+ assertEquals(m.a91, 91);
+ assertEquals(m.a92, 92);
+ assertEquals(m.a93, 93);
+ assertEquals(m.a94, 94);
+ assertEquals(m.a95, 95);
+ assertEquals(m.a96, 96);
+ assertEquals(m.a97, 97);
+ assertEquals(m.a98, 98);
+ assertEquals(m.a99, 99);
+ assertEquals(m.a100, 100);
+ assertEquals(m.a101, 101);
+ assertEquals(m.a102, 102);
+ assertEquals(m.a103, 103);
+ assertEquals(m.a104, 104);
+ assertEquals(m.a105, 105);
+ assertEquals(m.a106, 106);
+ assertEquals(m.a107, 107);
+ assertEquals(m.a108, 108);
+ assertEquals(m.a109, 109);
+ assertEquals(m.a110, 110);
+ assertEquals(m.a111, 111);
+ assertEquals(m.a112, 112);
+ assertEquals(m.a113, 113);
+ assertEquals(m.a114, 114);
+ assertEquals(m.a115, 115);
+ assertEquals(m.a116, 116);
+ assertEquals(m.a117, 117);
+ assertEquals(m.a118, 118);
+ assertEquals(m.a119, 119);
+ assertEquals(m.a120, 120);
+ assertEquals(m.a121, 121);
+ assertEquals(m.a122, 122);
+ assertEquals(m.a123, 123);
+ assertEquals(m.a124, 124);
+ assertEquals(m.a125, 125);
+ assertEquals(m.a126, 126);
+ assertEquals(m.a127, 127);
+ assertEquals(m.a128, 128);
+ assertEquals(m.a129, 129);
+ assertEquals(m.a130, 130);
+ assertEquals(m.a131, 131);
+ assertEquals(m.a132, 132);
+ assertEquals(m.a133, 133);
+ assertEquals(m.a134, 134);
+ assertEquals(m.a135, 135);
+ assertEquals(m.a136, 136);
+ assertEquals(m.a137, 137);
+ assertEquals(m.a138, 138);
+ assertEquals(m.a139, 139);
+ assertEquals(m.a140, 140);
+ assertEquals(m.a141, 141);
+ assertEquals(m.a142, 142);
+ assertEquals(m.a143, 143);
+ assertEquals(m.a144, 144);
+ assertEquals(m.a145, 145);
+ assertEquals(m.a146, 146);
+ assertEquals(m.a147, 147);
+ assertEquals(m.a148, 148);
+ assertEquals(m.a149, 149);
+ assertEquals(m.a150, 150);
+ assertEquals(m.a151, 151);
+ assertEquals(m.a152, 152);
+ assertEquals(m.a153, 153);
+ assertEquals(m.a154, 154);
+ assertEquals(m.a155, 155);
+ assertEquals(m.a156, 156);
+ assertEquals(m.a157, 157);
+ assertEquals(m.a158, 158);
+ assertEquals(m.a159, 159);
+ assertEquals(m.a160, 160);
+ assertEquals(m.a161, 161);
+ assertEquals(m.a162, 162);
+ assertEquals(m.a163, 163);
+ assertEquals(m.a164, 164);
+ assertEquals(m.a165, 165);
+ assertEquals(m.a166, 166);
+ assertEquals(m.a167, 167);
+ assertEquals(m.a168, 168);
+ assertEquals(m.a169, 169);
+ assertEquals(m.a170, 170);
+ assertEquals(m.a171, 171);
+ assertEquals(m.a172, 172);
+ assertEquals(m.a173, 173);
+ assertEquals(m.a174, 174);
+ assertEquals(m.a175, 175);
+ assertEquals(m.a176, 176);
+ assertEquals(m.a177, 177);
+ assertEquals(m.a178, 178);
+ assertEquals(m.a179, 179);
+ assertEquals(m.a180, 180);
+ assertEquals(m.a181, 181);
+ assertEquals(m.a182, 182);
+ assertEquals(m.a183, 183);
+ assertEquals(m.a184, 184);
+ assertEquals(m.a185, 185);
+ assertEquals(m.a186, 186);
+ assertEquals(m.a187, 187);
+ assertEquals(m.a188, 188);
+ assertEquals(m.a189, 189);
+ assertEquals(m.a190, 190);
+ assertEquals(m.a191, 191);
+ assertEquals(m.a192, 192);
+ assertEquals(m.a193, 193);
+ assertEquals(m.a194, 194);
+ assertEquals(m.a195, 195);
+ assertEquals(m.a196, 196);
+ assertEquals(m.a197, 197);
+ assertEquals(m.a198, 198);
+ assertEquals(m.a199, 199);
+ assertEquals(m.a200, 200);
+ assertEquals(m.a201, 201);
+ assertEquals(m.a202, 202);
+ assertEquals(m.a203, 203);
+ assertEquals(m.a204, 204);
+ assertEquals(m.a205, 205);
+ assertEquals(m.a206, 206);
+ assertEquals(m.a207, 207);
+ assertEquals(m.a208, 208);
+ assertEquals(m.a209, 209);
+ assertEquals(m.a210, 210);
+ assertEquals(m.a211, 211);
+ assertEquals(m.a212, 212);
+ assertEquals(m.a213, 213);
+ assertEquals(m.a214, 214);
+ assertEquals(m.a215, 215);
+ assertEquals(m.a216, 216);
+ assertEquals(m.a217, 217);
+ assertEquals(m.a218, 218);
+ assertEquals(m.a219, 219);
+ assertEquals(m.a220, 220);
+ assertEquals(m.a221, 221);
+ assertEquals(m.a222, 222);
+ assertEquals(m.a223, 223);
+ assertEquals(m.a224, 224);
+ assertEquals(m.a225, 225);
+ assertEquals(m.a226, 226);
+ assertEquals(m.a227, 227);
+ assertEquals(m.a228, 228);
+ assertEquals(m.a229, 229);
+ assertEquals(m.a230, 230);
+ assertEquals(m.a231, 231);
+ assertEquals(m.a232, 232);
+ assertEquals(m.a233, 233);
+ assertEquals(m.a234, 234);
+ assertEquals(m.a235, 235);
+ assertEquals(m.a236, 236);
+ assertEquals(m.a237, 237);
+ assertEquals(m.a238, 238);
+ assertEquals(m.a239, 239);
+ assertEquals(m.a240, 240);
+ assertEquals(m.a241, 241);
+ assertEquals(m.a242, 242);
+ assertEquals(m.a243, 243);
+ assertEquals(m.a244, 244);
+ assertEquals(m.a245, 245);
+ assertEquals(m.a246, 246);
+ assertEquals(m.a247, 247);
+ assertEquals(m.a248, 248);
+ assertEquals(m.a249, 249);
+ assertEquals(m.a250, 250);
+ assertEquals(m.a251, 251);
+ assertEquals(m.a252, 252);
+ assertEquals(m.a253, 253);
+ assertEquals(m.a254, 254);
+ assertEquals(m.a255, 255);
+ assertEquals(m.a256, 256);
+ assertEquals(m.a257, 257);
+ assertEquals(m.a258, 258);
+ assertEquals(m.a259, 259);
+ assertEquals(m.a260, 260);
+ assertEquals(m.a261, 261);
+ assertEquals(m.a262, 262);
+ assertEquals(m.a263, 263);
+ assertEquals(m.a264, 264);
+ assertEquals(m.a265, 265);
+ assertEquals(m.a266, 266);
+ assertEquals(m.a267, 267);
+ assertEquals(m.a268, 268);
+ assertEquals(m.a269, 269);
+ assertEquals(m.a270, 270);
+ assertEquals(m.a271, 271);
+ assertEquals(m.a272, 272);
+ assertEquals(m.a273, 273);
+ assertEquals(m.a274, 274);
+ assertEquals(m.a275, 275);
+ assertEquals(m.a276, 276);
+ assertEquals(m.a277, 277);
+ assertEquals(m.a278, 278);
+ assertEquals(m.a279, 279);
+ assertEquals(m.a280, 280);
+ assertEquals(m.a281, 281);
+ assertEquals(m.a282, 282);
+ assertEquals(m.a283, 283);
+ assertEquals(m.a284, 284);
+ assertEquals(m.a285, 285);
+ assertEquals(m.a286, 286);
+ assertEquals(m.a287, 287);
+ assertEquals(m.a288, 288);
+ assertEquals(m.a289, 289);
+ assertEquals(m.a290, 290);
+ assertEquals(m.a291, 291);
+ assertEquals(m.a292, 292);
+ assertEquals(m.a293, 293);
+ assertEquals(m.a294, 294);
+ assertEquals(m.a295, 295);
+ assertEquals(m.a296, 296);
+ assertEquals(m.a297, 297);
+ assertEquals(m.a298, 298);
+ assertEquals(m.a299, 299);
+ assertEquals(m.a300, 300);
+ assertEquals(m.a301, 301);
+ assertEquals(m.a302, 302);
+ assertEquals(m.a303, 303);
+ assertEquals(m.a304, 304);
+ assertEquals(m.a305, 305);
+ assertEquals(m.a306, 306);
+ assertEquals(m.a307, 307);
+ assertEquals(m.a308, 308);
+ assertEquals(m.a309, 309);
+ assertEquals(m.a310, 310);
+ assertEquals(m.a311, 311);
+ assertEquals(m.a312, 312);
+ assertEquals(m.a313, 313);
+ assertEquals(m.a314, 314);
+ assertEquals(m.a315, 315);
+ assertEquals(m.a316, 316);
+ assertEquals(m.a317, 317);
+ assertEquals(m.a318, 318);
+ assertEquals(m.a319, 319);
+ assertEquals(m.a320, 320);
+ assertEquals(m.a321, 321);
+ assertEquals(m.a322, 322);
+ assertEquals(m.a323, 323);
+ assertEquals(m.a324, 324);
+ assertEquals(m.a325, 325);
+ assertEquals(m.a326, 326);
+ assertEquals(m.a327, 327);
+ assertEquals(m.a328, 328);
+ assertEquals(m.a329, 329);
+ assertEquals(m.a330, 330);
+ assertEquals(m.a331, 331);
+ assertEquals(m.a332, 332);
+ assertEquals(m.a333, 333);
+ assertEquals(m.a334, 334);
+ assertEquals(m.a335, 335);
+ assertEquals(m.a336, 336);
+ assertEquals(m.a337, 337);
+ assertEquals(m.a338, 338);
+ assertEquals(m.a339, 339);
+ assertEquals(m.a340, 340);
+ assertEquals(m.a341, 341);
+ assertEquals(m.a342, 342);
+ assertEquals(m.a343, 343);
+ assertEquals(m.a344, 344);
+ assertEquals(m.a345, 345);
+ assertEquals(m.a346, 346);
+ assertEquals(m.a347, 347);
+ assertEquals(m.a348, 348);
+ assertEquals(m.a349, 349);
+ assertEquals(m.a350, 350);
+ assertEquals(m.a351, 351);
+ assertEquals(m.a352, 352);
+ assertEquals(m.a353, 353);
+ assertEquals(m.a354, 354);
+ assertEquals(m.a355, 355);
+ assertEquals(m.a356, 356);
+ assertEquals(m.a357, 357);
+ assertEquals(m.a358, 358);
+ assertEquals(m.a359, 359);
+ assertEquals(m.a360, 360);
+ assertEquals(m.a361, 361);
+ assertEquals(m.a362, 362);
+ assertEquals(m.a363, 363);
+ assertEquals(m.a364, 364);
+ assertEquals(m.a365, 365);
+ assertEquals(m.a366, 366);
+ assertEquals(m.a367, 367);
+ assertEquals(m.a368, 368);
+ assertEquals(m.a369, 369);
+ assertEquals(m.a370, 370);
+ assertEquals(m.a371, 371);
+ assertEquals(m.a372, 372);
+ assertEquals(m.a373, 373);
+ assertEquals(m.a374, 374);
+ assertEquals(m.a375, 375);
+ assertEquals(m.a376, 376);
+ assertEquals(m.a377, 377);
+ assertEquals(m.a378, 378);
+ assertEquals(m.a379, 379);
+ assertEquals(m.a380, 380);
+ assertEquals(m.a381, 381);
+ assertEquals(m.a382, 382);
+ assertEquals(m.a383, 383);
+ assertEquals(m.a384, 384);
+ assertEquals(m.a385, 385);
+ assertEquals(m.a386, 386);
+ assertEquals(m.a387, 387);
+ assertEquals(m.a388, 388);
+ assertEquals(m.a389, 389);
+ assertEquals(m.a390, 390);
+ assertEquals(m.a391, 391);
+ assertEquals(m.a392, 392);
+ assertEquals(m.a393, 393);
+ assertEquals(m.a394, 394);
+ assertEquals(m.a395, 395);
+ assertEquals(m.a396, 396);
+ assertEquals(m.a397, 397);
+ assertEquals(m.a398, 398);
+ assertEquals(m.a399, 399);
+ assertEquals(m.a400, 400);
+ assertEquals(m.a401, 401);
+ assertEquals(m.a402, 402);
+ assertEquals(m.a403, 403);
+ assertEquals(m.a404, 404);
+ assertEquals(m.a405, 405);
+ assertEquals(m.a406, 406);
+ assertEquals(m.a407, 407);
+ assertEquals(m.a408, 408);
+ assertEquals(m.a409, 409);
+ assertEquals(m.a410, 410);
+ assertEquals(m.a411, 411);
+ assertEquals(m.a412, 412);
+ assertEquals(m.a413, 413);
+ assertEquals(m.a414, 414);
+ assertEquals(m.a415, 415);
+ assertEquals(m.a416, 416);
+ assertEquals(m.a417, 417);
+ assertEquals(m.a418, 418);
+ assertEquals(m.a419, 419);
+ assertEquals(m.a420, 420);
+ assertEquals(m.a421, 421);
+ assertEquals(m.a422, 422);
+ assertEquals(m.a423, 423);
+ assertEquals(m.a424, 424);
+ assertEquals(m.a425, 425);
+ assertEquals(m.a426, 426);
+ assertEquals(m.a427, 427);
+ assertEquals(m.a428, 428);
+ assertEquals(m.a429, 429);
+ assertEquals(m.a430, 430);
+ assertEquals(m.a431, 431);
+ assertEquals(m.a432, 432);
+ assertEquals(m.a433, 433);
+ assertEquals(m.a434, 434);
+ assertEquals(m.a435, 435);
+ assertEquals(m.a436, 436);
+ assertEquals(m.a437, 437);
+ assertEquals(m.a438, 438);
+ assertEquals(m.a439, 439);
+ assertEquals(m.a440, 440);
+ assertEquals(m.a441, 441);
+ assertEquals(m.a442, 442);
+ assertEquals(m.a443, 443);
+ assertEquals(m.a444, 444);
+ assertEquals(m.a445, 445);
+ assertEquals(m.a446, 446);
+ assertEquals(m.a447, 447);
+ assertEquals(m.a448, 448);
+ assertEquals(m.a449, 449);
+ assertEquals(m.a450, 450);
+ assertEquals(m.a451, 451);
+ assertEquals(m.a452, 452);
+ assertEquals(m.a453, 453);
+ assertEquals(m.a454, 454);
+ assertEquals(m.a455, 455);
+ assertEquals(m.a456, 456);
+ assertEquals(m.a457, 457);
+ assertEquals(m.a458, 458);
+ assertEquals(m.a459, 459);
+ assertEquals(m.a460, 460);
+ assertEquals(m.a461, 461);
+ assertEquals(m.a462, 462);
+ assertEquals(m.a463, 463);
+ assertEquals(m.a464, 464);
+ assertEquals(m.a465, 465);
+ assertEquals(m.a466, 466);
+ assertEquals(m.a467, 467);
+ assertEquals(m.a468, 468);
+ assertEquals(m.a469, 469);
+ assertEquals(m.a470, 470);
+ assertEquals(m.a471, 471);
+ assertEquals(m.a472, 472);
+ assertEquals(m.a473, 473);
+ assertEquals(m.a474, 474);
+ assertEquals(m.a475, 475);
+ assertEquals(m.a476, 476);
+ assertEquals(m.a477, 477);
+ assertEquals(m.a478, 478);
+ assertEquals(m.a479, 479);
+ assertEquals(m.a480, 480);
+ assertEquals(m.a481, 481);
+ assertEquals(m.a482, 482);
+ assertEquals(m.a483, 483);
+ assertEquals(m.a484, 484);
+ assertEquals(m.a485, 485);
+ assertEquals(m.a486, 486);
+ assertEquals(m.a487, 487);
+ assertEquals(m.a488, 488);
+ assertEquals(m.a489, 489);
+ assertEquals(m.a490, 490);
+ assertEquals(m.a491, 491);
+ assertEquals(m.a492, 492);
+ assertEquals(m.a493, 493);
+ assertEquals(m.a494, 494);
+ assertEquals(m.a495, 495);
+ assertEquals(m.a496, 496);
+ assertEquals(m.a497, 497);
+ assertEquals(m.a498, 498);
+ assertEquals(m.a499, 499);
+ assertEquals(m.a500, 500);
+ assertEquals(m.a501, 501);
+ assertEquals(m.a502, 502);
+ assertEquals(m.a503, 503);
+ assertEquals(m.a504, 504);
+ assertEquals(m.a505, 505);
+ assertEquals(m.a506, 506);
+ assertEquals(m.a507, 507);
+ assertEquals(m.a508, 508);
+ assertEquals(m.a509, 509);
+ assertEquals(m.a510, 510);
+ assertEquals(m.a511, 511);
+ assertEquals(m.a512, 512);
+ assertEquals(m.a513, 513);
+ assertEquals(m.a514, 514);
+ assertEquals(m.a515, 515);
+ assertEquals(m.a516, 516);
+ assertEquals(m.a517, 517);
+ assertEquals(m.a518, 518);
+ assertEquals(m.a519, 519);
+ assertEquals(m.a520, 520);
+ assertEquals(m.a521, 521);
+ assertEquals(m.a522, 522);
+ assertEquals(m.a523, 523);
+ assertEquals(m.a524, 524);
+ assertEquals(m.a525, 525);
+ assertEquals(m.a526, 526);
+ assertEquals(m.a527, 527);
+ assertEquals(m.a528, 528);
+ assertEquals(m.a529, 529);
+ assertEquals(m.a530, 530);
+ assertEquals(m.a531, 531);
+ assertEquals(m.a532, 532);
+ assertEquals(m.a533, 533);
+ assertEquals(m.a534, 534);
+ assertEquals(m.a535, 535);
+ assertEquals(m.a536, 536);
+ assertEquals(m.a537, 537);
+ assertEquals(m.a538, 538);
+ assertEquals(m.a539, 539);
+ assertEquals(m.a540, 540);
+ assertEquals(m.a541, 541);
+ assertEquals(m.a542, 542);
+ assertEquals(m.a543, 543);
+ assertEquals(m.a544, 544);
+ assertEquals(m.a545, 545);
+ assertEquals(m.a546, 546);
+ assertEquals(m.a547, 547);
+ assertEquals(m.a548, 548);
+ assertEquals(m.a549, 549);
+ assertEquals(m.a550, 550);
+ assertEquals(m.a551, 551);
+ assertEquals(m.a552, 552);
+ assertEquals(m.a553, 553);
+ assertEquals(m.a554, 554);
+ assertEquals(m.a555, 555);
+ assertEquals(m.a556, 556);
+ assertEquals(m.a557, 557);
+ assertEquals(m.a558, 558);
+ assertEquals(m.a559, 559);
+ assertEquals(m.a560, 560);
+ assertEquals(m.a561, 561);
+ assertEquals(m.a562, 562);
+ assertEquals(m.a563, 563);
+ assertEquals(m.a564, 564);
+ assertEquals(m.a565, 565);
+ assertEquals(m.a566, 566);
+ assertEquals(m.a567, 567);
+ assertEquals(m.a568, 568);
+ assertEquals(m.a569, 569);
+ assertEquals(m.a570, 570);
+ assertEquals(m.a571, 571);
+ assertEquals(m.a572, 572);
+ assertEquals(m.a573, 573);
+ assertEquals(m.a574, 574);
+ assertEquals(m.a575, 575);
+ assertEquals(m.a576, 576);
+ assertEquals(m.a577, 577);
+ assertEquals(m.a578, 578);
+ assertEquals(m.a579, 579);
+ assertEquals(m.a580, 580);
+ assertEquals(m.a581, 581);
+ assertEquals(m.a582, 582);
+ assertEquals(m.a583, 583);
+ assertEquals(m.a584, 584);
+ assertEquals(m.a585, 585);
+ assertEquals(m.a586, 586);
+ assertEquals(m.a587, 587);
+ assertEquals(m.a588, 588);
+ assertEquals(m.a589, 589);
+ assertEquals(m.a590, 590);
+ assertEquals(m.a591, 591);
+ assertEquals(m.a592, 592);
+ assertEquals(m.a593, 593);
+ assertEquals(m.a594, 594);
+ assertEquals(m.a595, 595);
+ assertEquals(m.a596, 596);
+ assertEquals(m.a597, 597);
+ assertEquals(m.a598, 598);
+ assertEquals(m.a599, 599);
+ assertEquals(m.a600, 600);
+ assertEquals(m.a601, 601);
+ assertEquals(m.a602, 602);
+ assertEquals(m.a603, 603);
+ assertEquals(m.a604, 604);
+ assertEquals(m.a605, 605);
+ assertEquals(m.a606, 606);
+ assertEquals(m.a607, 607);
+ assertEquals(m.a608, 608);
+ assertEquals(m.a609, 609);
+ assertEquals(m.a610, 610);
+ assertEquals(m.a611, 611);
+ assertEquals(m.a612, 612);
+ assertEquals(m.a613, 613);
+ assertEquals(m.a614, 614);
+ assertEquals(m.a615, 615);
+ assertEquals(m.a616, 616);
+ assertEquals(m.a617, 617);
+ assertEquals(m.a618, 618);
+ assertEquals(m.a619, 619);
+ assertEquals(m.a620, 620);
+ assertEquals(m.a621, 621);
+ assertEquals(m.a622, 622);
+ assertEquals(m.a623, 623);
+ assertEquals(m.a624, 624);
+ assertEquals(m.a625, 625);
+ assertEquals(m.a626, 626);
+ assertEquals(m.a627, 627);
+ assertEquals(m.a628, 628);
+ assertEquals(m.a629, 629);
+ assertEquals(m.a630, 630);
+ assertEquals(m.a631, 631);
+ assertEquals(m.a632, 632);
+ assertEquals(m.a633, 633);
+ assertEquals(m.a634, 634);
+ assertEquals(m.a635, 635);
+ assertEquals(m.a636, 636);
+ assertEquals(m.a637, 637);
+ assertEquals(m.a638, 638);
+ assertEquals(m.a639, 639);
+ assertEquals(m.a640, 640);
+ assertEquals(m.a641, 641);
+ assertEquals(m.a642, 642);
+ assertEquals(m.a643, 643);
+ assertEquals(m.a644, 644);
+ assertEquals(m.a645, 645);
+ assertEquals(m.a646, 646);
+ assertEquals(m.a647, 647);
+ assertEquals(m.a648, 648);
+ assertEquals(m.a649, 649);
+ assertEquals(m.a650, 650);
+ assertEquals(m.a651, 651);
+ assertEquals(m.a652, 652);
+ assertEquals(m.a653, 653);
+ assertEquals(m.a654, 654);
+ assertEquals(m.a655, 655);
+ assertEquals(m.a656, 656);
+ assertEquals(m.a657, 657);
+ assertEquals(m.a658, 658);
+ assertEquals(m.a659, 659);
+ assertEquals(m.a660, 660);
+ assertEquals(m.a661, 661);
+ assertEquals(m.a662, 662);
+ assertEquals(m.a663, 663);
+ assertEquals(m.a664, 664);
+ assertEquals(m.a665, 665);
+ assertEquals(m.a666, 666);
+ assertEquals(m.a667, 667);
+ assertEquals(m.a668, 668);
+ assertEquals(m.a669, 669);
+ assertEquals(m.a670, 670);
+ assertEquals(m.a671, 671);
+ assertEquals(m.a672, 672);
+ assertEquals(m.a673, 673);
+ assertEquals(m.a674, 674);
+ assertEquals(m.a675, 675);
+ assertEquals(m.a676, 676);
+ assertEquals(m.a677, 677);
+ assertEquals(m.a678, 678);
+ assertEquals(m.a679, 679);
+ assertEquals(m.a680, 680);
+ assertEquals(m.a681, 681);
+ assertEquals(m.a682, 682);
+ assertEquals(m.a683, 683);
+ assertEquals(m.a684, 684);
+ assertEquals(m.a685, 685);
+ assertEquals(m.a686, 686);
+ assertEquals(m.a687, 687);
+ assertEquals(m.a688, 688);
+ assertEquals(m.a689, 689);
+ assertEquals(m.a690, 690);
+ assertEquals(m.a691, 691);
+ assertEquals(m.a692, 692);
+ assertEquals(m.a693, 693);
+ assertEquals(m.a694, 694);
+ assertEquals(m.a695, 695);
+ assertEquals(m.a696, 696);
+ assertEquals(m.a697, 697);
+ assertEquals(m.a698, 698);
+ assertEquals(m.a699, 699);
+ assertEquals(m.a700, 700);
+ assertEquals(m.a701, 701);
+ assertEquals(m.a702, 702);
+ assertEquals(m.a703, 703);
+ assertEquals(m.a704, 704);
+ assertEquals(m.a705, 705);
+ assertEquals(m.a706, 706);
+ assertEquals(m.a707, 707);
+ assertEquals(m.a708, 708);
+ assertEquals(m.a709, 709);
+ assertEquals(m.a710, 710);
+ assertEquals(m.a711, 711);
+ assertEquals(m.a712, 712);
+ assertEquals(m.a713, 713);
+ assertEquals(m.a714, 714);
+ assertEquals(m.a715, 715);
+ assertEquals(m.a716, 716);
+ assertEquals(m.a717, 717);
+ assertEquals(m.a718, 718);
+ assertEquals(m.a719, 719);
+ assertEquals(m.a720, 720);
+ assertEquals(m.a721, 721);
+ assertEquals(m.a722, 722);
+ assertEquals(m.a723, 723);
+ assertEquals(m.a724, 724);
+ assertEquals(m.a725, 725);
+ assertEquals(m.a726, 726);
+ assertEquals(m.a727, 727);
+ assertEquals(m.a728, 728);
+ assertEquals(m.a729, 729);
+ assertEquals(m.a730, 730);
+ assertEquals(m.a731, 731);
+ assertEquals(m.a732, 732);
+ assertEquals(m.a733, 733);
+ assertEquals(m.a734, 734);
+ assertEquals(m.a735, 735);
+ assertEquals(m.a736, 736);
+ assertEquals(m.a737, 737);
+ assertEquals(m.a738, 738);
+ assertEquals(m.a739, 739);
+ assertEquals(m.a740, 740);
+ assertEquals(m.a741, 741);
+ assertEquals(m.a742, 742);
+ assertEquals(m.a743, 743);
+ assertEquals(m.a744, 744);
+ assertEquals(m.a745, 745);
+ assertEquals(m.a746, 746);
+ assertEquals(m.a747, 747);
+ assertEquals(m.a748, 748);
+ assertEquals(m.a749, 749);
+ assertEquals(m.a750, 750);
+ assertEquals(m.a751, 751);
+ assertEquals(m.a752, 752);
+ assertEquals(m.a753, 753);
+ assertEquals(m.a754, 754);
+ assertEquals(m.a755, 755);
+ assertEquals(m.a756, 756);
+ assertEquals(m.a757, 757);
+ assertEquals(m.a758, 758);
+ assertEquals(m.a759, 759);
+ assertEquals(m.a760, 760);
+ assertEquals(m.a761, 761);
+ assertEquals(m.a762, 762);
+ assertEquals(m.a763, 763);
+ assertEquals(m.a764, 764);
+ assertEquals(m.a765, 765);
+ assertEquals(m.a766, 766);
+ assertEquals(m.a767, 767);
+ assertEquals(m.a768, 768);
+ assertEquals(m.a769, 769);
+ assertEquals(m.a770, 770);
+ assertEquals(m.a771, 771);
+ assertEquals(m.a772, 772);
+ assertEquals(m.a773, 773);
+ assertEquals(m.a774, 774);
+ assertEquals(m.a775, 775);
+ assertEquals(m.a776, 776);
+ assertEquals(m.a777, 777);
+ assertEquals(m.a778, 778);
+ assertEquals(m.a779, 779);
+ assertEquals(m.a780, 780);
+ assertEquals(m.a781, 781);
+ assertEquals(m.a782, 782);
+ assertEquals(m.a783, 783);
+ assertEquals(m.a784, 784);
+ assertEquals(m.a785, 785);
+ assertEquals(m.a786, 786);
+ assertEquals(m.a787, 787);
+ assertEquals(m.a788, 788);
+ assertEquals(m.a789, 789);
+ assertEquals(m.a790, 790);
+ assertEquals(m.a791, 791);
+ assertEquals(m.a792, 792);
+ assertEquals(m.a793, 793);
+ assertEquals(m.a794, 794);
+ assertEquals(m.a795, 795);
+ assertEquals(m.a796, 796);
+ assertEquals(m.a797, 797);
+ assertEquals(m.a798, 798);
+ assertEquals(m.a799, 799);
+ assertEquals(m.a800, 800);
+ assertEquals(m.a801, 801);
+ assertEquals(m.a802, 802);
+ assertEquals(m.a803, 803);
+ assertEquals(m.a804, 804);
+ assertEquals(m.a805, 805);
+ assertEquals(m.a806, 806);
+ assertEquals(m.a807, 807);
+ assertEquals(m.a808, 808);
+ assertEquals(m.a809, 809);
+ assertEquals(m.a810, 810);
+ assertEquals(m.a811, 811);
+ assertEquals(m.a812, 812);
+ assertEquals(m.a813, 813);
+ assertEquals(m.a814, 814);
+ assertEquals(m.a815, 815);
+ assertEquals(m.a816, 816);
+ assertEquals(m.a817, 817);
+ assertEquals(m.a818, 818);
+ assertEquals(m.a819, 819);
+ assertEquals(m.a820, 820);
+ assertEquals(m.a821, 821);
+ assertEquals(m.a822, 822);
+ assertEquals(m.a823, 823);
+ assertEquals(m.a824, 824);
+ assertEquals(m.a825, 825);
+ assertEquals(m.a826, 826);
+ assertEquals(m.a827, 827);
+ assertEquals(m.a828, 828);
+ assertEquals(m.a829, 829);
+ assertEquals(m.a830, 830);
+ assertEquals(m.a831, 831);
+ assertEquals(m.a832, 832);
+ assertEquals(m.a833, 833);
+ assertEquals(m.a834, 834);
+ assertEquals(m.a835, 835);
+ assertEquals(m.a836, 836);
+ assertEquals(m.a837, 837);
+ assertEquals(m.a838, 838);
+ assertEquals(m.a839, 839);
+ assertEquals(m.a840, 840);
+ assertEquals(m.a841, 841);
+ assertEquals(m.a842, 842);
+ assertEquals(m.a843, 843);
+ assertEquals(m.a844, 844);
+ assertEquals(m.a845, 845);
+ assertEquals(m.a846, 846);
+ assertEquals(m.a847, 847);
+ assertEquals(m.a848, 848);
+ assertEquals(m.a849, 849);
+ assertEquals(m.a850, 850);
+ assertEquals(m.a851, 851);
+ assertEquals(m.a852, 852);
+ assertEquals(m.a853, 853);
+ assertEquals(m.a854, 854);
+ assertEquals(m.a855, 855);
+ assertEquals(m.a856, 856);
+ assertEquals(m.a857, 857);
+ assertEquals(m.a858, 858);
+ assertEquals(m.a859, 859);
+ assertEquals(m.a860, 860);
+ assertEquals(m.a861, 861);
+ assertEquals(m.a862, 862);
+ assertEquals(m.a863, 863);
+ assertEquals(m.a864, 864);
+ assertEquals(m.a865, 865);
+ assertEquals(m.a866, 866);
+ assertEquals(m.a867, 867);
+ assertEquals(m.a868, 868);
+ assertEquals(m.a869, 869);
+ assertEquals(m.a870, 870);
+ assertEquals(m.a871, 871);
+ assertEquals(m.a872, 872);
+ assertEquals(m.a873, 873);
+ assertEquals(m.a874, 874);
+ assertEquals(m.a875, 875);
+ assertEquals(m.a876, 876);
+ assertEquals(m.a877, 877);
+ assertEquals(m.a878, 878);
+ assertEquals(m.a879, 879);
+ assertEquals(m.a880, 880);
+ assertEquals(m.a881, 881);
+ assertEquals(m.a882, 882);
+ assertEquals(m.a883, 883);
+ assertEquals(m.a884, 884);
+ assertEquals(m.a885, 885);
+ assertEquals(m.a886, 886);
+ assertEquals(m.a887, 887);
+ assertEquals(m.a888, 888);
+ assertEquals(m.a889, 889);
+ assertEquals(m.a890, 890);
+ assertEquals(m.a891, 891);
+ assertEquals(m.a892, 892);
+ assertEquals(m.a893, 893);
+ assertEquals(m.a894, 894);
+ assertEquals(m.a895, 895);
+ assertEquals(m.a896, 896);
+ assertEquals(m.a897, 897);
+ assertEquals(m.a898, 898);
+ assertEquals(m.a899, 899);
+ assertEquals(m.a900, 900);
+ assertEquals(m.a901, 901);
+ assertEquals(m.a902, 902);
+ assertEquals(m.a903, 903);
+ assertEquals(m.a904, 904);
+ assertEquals(m.a905, 905);
+ assertEquals(m.a906, 906);
+ assertEquals(m.a907, 907);
+ assertEquals(m.a908, 908);
+ assertEquals(m.a909, 909);
+ assertEquals(m.a910, 910);
+ assertEquals(m.a911, 911);
+ assertEquals(m.a912, 912);
+ assertEquals(m.a913, 913);
+ assertEquals(m.a914, 914);
+ assertEquals(m.a915, 915);
+ assertEquals(m.a916, 916);
+ assertEquals(m.a917, 917);
+ assertEquals(m.a918, 918);
+ assertEquals(m.a919, 919);
+ assertEquals(m.a920, 920);
+ assertEquals(m.a921, 921);
+ assertEquals(m.a922, 922);
+ assertEquals(m.a923, 923);
+ assertEquals(m.a924, 924);
+ assertEquals(m.a925, 925);
+ assertEquals(m.a926, 926);
+ assertEquals(m.a927, 927);
+ assertEquals(m.a928, 928);
+ assertEquals(m.a929, 929);
+ assertEquals(m.a930, 930);
+ assertEquals(m.a931, 931);
+ assertEquals(m.a932, 932);
+ assertEquals(m.a933, 933);
+ assertEquals(m.a934, 934);
+ assertEquals(m.a935, 935);
+ assertEquals(m.a936, 936);
+ assertEquals(m.a937, 937);
+ assertEquals(m.a938, 938);
+ assertEquals(m.a939, 939);
+ assertEquals(m.a940, 940);
+ assertEquals(m.a941, 941);
+ assertEquals(m.a942, 942);
+ assertEquals(m.a943, 943);
+ assertEquals(m.a944, 944);
+ assertEquals(m.a945, 945);
+ assertEquals(m.a946, 946);
+ assertEquals(m.a947, 947);
+ assertEquals(m.a948, 948);
+ assertEquals(m.a949, 949);
+ assertEquals(m.a950, 950);
+ assertEquals(m.a951, 951);
+ assertEquals(m.a952, 952);
+ assertEquals(m.a953, 953);
+ assertEquals(m.a954, 954);
+ assertEquals(m.a955, 955);
+ assertEquals(m.a956, 956);
+ assertEquals(m.a957, 957);
+ assertEquals(m.a958, 958);
+ assertEquals(m.a959, 959);
+ assertEquals(m.a960, 960);
+ assertEquals(m.a961, 961);
+ assertEquals(m.a962, 962);
+ assertEquals(m.a963, 963);
+ assertEquals(m.a964, 964);
+ assertEquals(m.a965, 965);
+ assertEquals(m.a966, 966);
+ assertEquals(m.a967, 967);
+ assertEquals(m.a968, 968);
+ assertEquals(m.a969, 969);
+ assertEquals(m.a970, 970);
+ assertEquals(m.a971, 971);
+ assertEquals(m.a972, 972);
+ assertEquals(m.a973, 973);
+ assertEquals(m.a974, 974);
+ assertEquals(m.a975, 975);
+ assertEquals(m.a976, 976);
+ assertEquals(m.a977, 977);
+ assertEquals(m.a978, 978);
+ assertEquals(m.a979, 979);
+ assertEquals(m.a980, 980);
+ assertEquals(m.a981, 981);
+ assertEquals(m.a982, 982);
+ assertEquals(m.a983, 983);
+ assertEquals(m.a984, 984);
+ assertEquals(m.a985, 985);
+ assertEquals(m.a986, 986);
+ assertEquals(m.a987, 987);
+ assertEquals(m.a988, 988);
+ assertEquals(m.a989, 989);
+ assertEquals(m.a990, 990);
+ assertEquals(m.a991, 991);
+ assertEquals(m.a992, 992);
+ assertEquals(m.a993, 993);
+ assertEquals(m.a994, 994);
+ assertEquals(m.a995, 995);
+ assertEquals(m.a996, 996);
+ assertEquals(m.a997, 997);
+ assertEquals(m.a998, 998);
+ assertEquals(m.a999, 999);
+ assertEquals(m.a1000, 1000);
+ assertEquals(m.a1001, 1001);
+ assertEquals(m.a1002, 1002);
+ assertEquals(m.a1003, 1003);
+ assertEquals(m.a1004, 1004);
+ assertEquals(m.a1005, 1005);
+ assertEquals(m.a1006, 1006);
+ assertEquals(m.a1007, 1007);
+ assertEquals(m.a1008, 1008);
+ assertEquals(m.a1009, 1009);
+ assertEquals(m.a1010, 1010);
+ assertEquals(m.a1011, 1011);
+ assertEquals(m.a1012, 1012);
+ assertEquals(m.a1013, 1013);
+ assertEquals(m.a1014, 1014);
+ assertEquals(m.a1015, 1015);
+ assertEquals(m.a1016, 1016);
+ assertEquals(m.a1017, 1017);
+ assertEquals(m.a1018, 1018);
+ assertEquals(m.a1019, 1019);
+ assertEquals(m.a1020, 1020);
+ assertEquals(m.a1021, 1021);
+ assertEquals(m.a1022, 1022);
+ assertEquals(m.a1023, 1023);
+ assertEquals(m.a1024, 1024);
+ assertEquals(m.a1025, 1025);
+ assertEquals(m.a1026, 1026);
+ assertEquals(m.a1027, 1027);
+ assertEquals(m.a1028, 1028);
+ assertEquals(m.a1029, 1029);
+ assertEquals(m.a1030, 1030);
+ assertEquals(m.a1031, 1031);
+ assertEquals(m.a1032, 1032);
+ assertEquals(m.a1033, 1033);
+ assertEquals(m.a1034, 1034);
+ assertEquals(m.a1035, 1035);
+ assertEquals(m.a1036, 1036);
+ assertEquals(m.a1037, 1037);
+ assertEquals(m.a1038, 1038);
+ assertEquals(m.a1039, 1039);
+ assertEquals(m.a1040, 1040);
+ assertEquals(m.a1041, 1041);
+ assertEquals(m.a1042, 1042);
+ assertEquals(m.a1043, 1043);
+ assertEquals(m.a1044, 1044);
+ assertEquals(m.a1045, 1045);
+ assertEquals(m.a1046, 1046);
+ assertEquals(m.a1047, 1047);
+ assertEquals(m.a1048, 1048);
+ assertEquals(m.a1049, 1049);
+ assertEquals(m.a1050, 1050);
+ assertEquals(m.a1051, 1051);
+ assertEquals(m.a1052, 1052);
+ assertEquals(m.a1053, 1053);
+ assertEquals(m.a1054, 1054);
+ assertEquals(m.a1055, 1055);
+ assertEquals(m.a1056, 1056);
+ assertEquals(m.a1057, 1057);
+ assertEquals(m.a1058, 1058);
+ assertEquals(m.a1059, 1059);
+ assertEquals(m.a1060, 1060);
+ assertEquals(m.a1061, 1061);
+ assertEquals(m.a1062, 1062);
+ assertEquals(m.a1063, 1063);
+ assertEquals(m.a1064, 1064);
+ assertEquals(m.a1065, 1065);
+ assertEquals(m.a1066, 1066);
+ assertEquals(m.a1067, 1067);
+ assertEquals(m.a1068, 1068);
+ assertEquals(m.a1069, 1069);
+ assertEquals(m.a1070, 1070);
+ assertEquals(m.a1071, 1071);
+ assertEquals(m.a1072, 1072);
+ assertEquals(m.a1073, 1073);
+ assertEquals(m.a1074, 1074);
+ assertEquals(m.a1075, 1075);
+ assertEquals(m.a1076, 1076);
+ assertEquals(m.a1077, 1077);
+ assertEquals(m.a1078, 1078);
+ assertEquals(m.a1079, 1079);
+ assertEquals(m.a1080, 1080);
+ assertEquals(m.a1081, 1081);
+ assertEquals(m.a1082, 1082);
+ assertEquals(m.a1083, 1083);
+ assertEquals(m.a1084, 1084);
+ assertEquals(m.a1085, 1085);
+ assertEquals(m.a1086, 1086);
+ assertEquals(m.a1087, 1087);
+ assertEquals(m.a1088, 1088);
+ assertEquals(m.a1089, 1089);
+ assertEquals(m.a1090, 1090);
+ assertEquals(m.a1091, 1091);
+ assertEquals(m.a1092, 1092);
+ assertEquals(m.a1093, 1093);
+ assertEquals(m.a1094, 1094);
+ assertEquals(m.a1095, 1095);
+ assertEquals(m.a1096, 1096);
+ assertEquals(m.a1097, 1097);
+ assertEquals(m.a1098, 1098);
+ assertEquals(m.a1099, 1099);
+}
+verify(m1); // Uninitialized.
+verify(m1); // Premonomorphic.
+verify(m2); // Monomorphic.
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-large1.js b/deps/v8/test/mjsunit/harmony/modules-skip-large1.js
new file mode 100644
index 0000000000..e643df32a9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-large1.js
@@ -0,0 +1,2204 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export let a0 = 0;
+export let a1 = 1;
+export let a2 = 2;
+export let a3 = 3;
+export let a4 = 4;
+export let a5 = 5;
+export let a6 = 6;
+export let a7 = 7;
+export let a8 = 8;
+export let a9 = 9;
+export let a10 = 10;
+export let a11 = 11;
+export let a12 = 12;
+export let a13 = 13;
+export let a14 = 14;
+export let a15 = 15;
+export let a16 = 16;
+export let a17 = 17;
+export let a18 = 18;
+export let a19 = 19;
+export let a20 = 20;
+export let a21 = 21;
+export let a22 = 22;
+export let a23 = 23;
+export let a24 = 24;
+export let a25 = 25;
+export let a26 = 26;
+export let a27 = 27;
+export let a28 = 28;
+export let a29 = 29;
+export let a30 = 30;
+export let a31 = 31;
+export let a32 = 32;
+export let a33 = 33;
+export let a34 = 34;
+export let a35 = 35;
+export let a36 = 36;
+export let a37 = 37;
+export let a38 = 38;
+export let a39 = 39;
+export let a40 = 40;
+export let a41 = 41;
+export let a42 = 42;
+export let a43 = 43;
+export let a44 = 44;
+export let a45 = 45;
+export let a46 = 46;
+export let a47 = 47;
+export let a48 = 48;
+export let a49 = 49;
+export let a50 = 50;
+export let a51 = 51;
+export let a52 = 52;
+export let a53 = 53;
+export let a54 = 54;
+export let a55 = 55;
+export let a56 = 56;
+export let a57 = 57;
+export let a58 = 58;
+export let a59 = 59;
+export let a60 = 60;
+export let a61 = 61;
+export let a62 = 62;
+export let a63 = 63;
+export let a64 = 64;
+export let a65 = 65;
+export let a66 = 66;
+export let a67 = 67;
+export let a68 = 68;
+export let a69 = 69;
+export let a70 = 70;
+export let a71 = 71;
+export let a72 = 72;
+export let a73 = 73;
+export let a74 = 74;
+export let a75 = 75;
+export let a76 = 76;
+export let a77 = 77;
+export let a78 = 78;
+export let a79 = 79;
+export let a80 = 80;
+export let a81 = 81;
+export let a82 = 82;
+export let a83 = 83;
+export let a84 = 84;
+export let a85 = 85;
+export let a86 = 86;
+export let a87 = 87;
+export let a88 = 88;
+export let a89 = 89;
+export let a90 = 90;
+export let a91 = 91;
+export let a92 = 92;
+export let a93 = 93;
+export let a94 = 94;
+export let a95 = 95;
+export let a96 = 96;
+export let a97 = 97;
+export let a98 = 98;
+export let a99 = 99;
+export let a100 = 100;
+export let a101 = 101;
+export let a102 = 102;
+export let a103 = 103;
+export let a104 = 104;
+export let a105 = 105;
+export let a106 = 106;
+export let a107 = 107;
+export let a108 = 108;
+export let a109 = 109;
+export let a110 = 110;
+export let a111 = 111;
+export let a112 = 112;
+export let a113 = 113;
+export let a114 = 114;
+export let a115 = 115;
+export let a116 = 116;
+export let a117 = 117;
+export let a118 = 118;
+export let a119 = 119;
+export let a120 = 120;
+export let a121 = 121;
+export let a122 = 122;
+export let a123 = 123;
+export let a124 = 124;
+export let a125 = 125;
+export let a126 = 126;
+export let a127 = 127;
+export let a128 = 128;
+export let a129 = 129;
+export let a130 = 130;
+export let a131 = 131;
+export let a132 = 132;
+export let a133 = 133;
+export let a134 = 134;
+export let a135 = 135;
+export let a136 = 136;
+export let a137 = 137;
+export let a138 = 138;
+export let a139 = 139;
+export let a140 = 140;
+export let a141 = 141;
+export let a142 = 142;
+export let a143 = 143;
+export let a144 = 144;
+export let a145 = 145;
+export let a146 = 146;
+export let a147 = 147;
+export let a148 = 148;
+export let a149 = 149;
+export let a150 = 150;
+export let a151 = 151;
+export let a152 = 152;
+export let a153 = 153;
+export let a154 = 154;
+export let a155 = 155;
+export let a156 = 156;
+export let a157 = 157;
+export let a158 = 158;
+export let a159 = 159;
+export let a160 = 160;
+export let a161 = 161;
+export let a162 = 162;
+export let a163 = 163;
+export let a164 = 164;
+export let a165 = 165;
+export let a166 = 166;
+export let a167 = 167;
+export let a168 = 168;
+export let a169 = 169;
+export let a170 = 170;
+export let a171 = 171;
+export let a172 = 172;
+export let a173 = 173;
+export let a174 = 174;
+export let a175 = 175;
+export let a176 = 176;
+export let a177 = 177;
+export let a178 = 178;
+export let a179 = 179;
+export let a180 = 180;
+export let a181 = 181;
+export let a182 = 182;
+export let a183 = 183;
+export let a184 = 184;
+export let a185 = 185;
+export let a186 = 186;
+export let a187 = 187;
+export let a188 = 188;
+export let a189 = 189;
+export let a190 = 190;
+export let a191 = 191;
+export let a192 = 192;
+export let a193 = 193;
+export let a194 = 194;
+export let a195 = 195;
+export let a196 = 196;
+export let a197 = 197;
+export let a198 = 198;
+export let a199 = 199;
+export let a200 = 200;
+export let a201 = 201;
+export let a202 = 202;
+export let a203 = 203;
+export let a204 = 204;
+export let a205 = 205;
+export let a206 = 206;
+export let a207 = 207;
+export let a208 = 208;
+export let a209 = 209;
+export let a210 = 210;
+export let a211 = 211;
+export let a212 = 212;
+export let a213 = 213;
+export let a214 = 214;
+export let a215 = 215;
+export let a216 = 216;
+export let a217 = 217;
+export let a218 = 218;
+export let a219 = 219;
+export let a220 = 220;
+export let a221 = 221;
+export let a222 = 222;
+export let a223 = 223;
+export let a224 = 224;
+export let a225 = 225;
+export let a226 = 226;
+export let a227 = 227;
+export let a228 = 228;
+export let a229 = 229;
+export let a230 = 230;
+export let a231 = 231;
+export let a232 = 232;
+export let a233 = 233;
+export let a234 = 234;
+export let a235 = 235;
+export let a236 = 236;
+export let a237 = 237;
+export let a238 = 238;
+export let a239 = 239;
+export let a240 = 240;
+export let a241 = 241;
+export let a242 = 242;
+export let a243 = 243;
+export let a244 = 244;
+export let a245 = 245;
+export let a246 = 246;
+export let a247 = 247;
+export let a248 = 248;
+export let a249 = 249;
+export let a250 = 250;
+export let a251 = 251;
+export let a252 = 252;
+export let a253 = 253;
+export let a254 = 254;
+export let a255 = 255;
+export let a256 = 256;
+export let a257 = 257;
+export let a258 = 258;
+export let a259 = 259;
+export let a260 = 260;
+export let a261 = 261;
+export let a262 = 262;
+export let a263 = 263;
+export let a264 = 264;
+export let a265 = 265;
+export let a266 = 266;
+export let a267 = 267;
+export let a268 = 268;
+export let a269 = 269;
+export let a270 = 270;
+export let a271 = 271;
+export let a272 = 272;
+export let a273 = 273;
+export let a274 = 274;
+export let a275 = 275;
+export let a276 = 276;
+export let a277 = 277;
+export let a278 = 278;
+export let a279 = 279;
+export let a280 = 280;
+export let a281 = 281;
+export let a282 = 282;
+export let a283 = 283;
+export let a284 = 284;
+export let a285 = 285;
+export let a286 = 286;
+export let a287 = 287;
+export let a288 = 288;
+export let a289 = 289;
+export let a290 = 290;
+export let a291 = 291;
+export let a292 = 292;
+export let a293 = 293;
+export let a294 = 294;
+export let a295 = 295;
+export let a296 = 296;
+export let a297 = 297;
+export let a298 = 298;
+export let a299 = 299;
+export let a300 = 300;
+export let a301 = 301;
+export let a302 = 302;
+export let a303 = 303;
+export let a304 = 304;
+export let a305 = 305;
+export let a306 = 306;
+export let a307 = 307;
+export let a308 = 308;
+export let a309 = 309;
+export let a310 = 310;
+export let a311 = 311;
+export let a312 = 312;
+export let a313 = 313;
+export let a314 = 314;
+export let a315 = 315;
+export let a316 = 316;
+export let a317 = 317;
+export let a318 = 318;
+export let a319 = 319;
+export let a320 = 320;
+export let a321 = 321;
+export let a322 = 322;
+export let a323 = 323;
+export let a324 = 324;
+export let a325 = 325;
+export let a326 = 326;
+export let a327 = 327;
+export let a328 = 328;
+export let a329 = 329;
+export let a330 = 330;
+export let a331 = 331;
+export let a332 = 332;
+export let a333 = 333;
+export let a334 = 334;
+export let a335 = 335;
+export let a336 = 336;
+export let a337 = 337;
+export let a338 = 338;
+export let a339 = 339;
+export let a340 = 340;
+export let a341 = 341;
+export let a342 = 342;
+export let a343 = 343;
+export let a344 = 344;
+export let a345 = 345;
+export let a346 = 346;
+export let a347 = 347;
+export let a348 = 348;
+export let a349 = 349;
+export let a350 = 350;
+export let a351 = 351;
+export let a352 = 352;
+export let a353 = 353;
+export let a354 = 354;
+export let a355 = 355;
+export let a356 = 356;
+export let a357 = 357;
+export let a358 = 358;
+export let a359 = 359;
+export let a360 = 360;
+export let a361 = 361;
+export let a362 = 362;
+export let a363 = 363;
+export let a364 = 364;
+export let a365 = 365;
+export let a366 = 366;
+export let a367 = 367;
+export let a368 = 368;
+export let a369 = 369;
+export let a370 = 370;
+export let a371 = 371;
+export let a372 = 372;
+export let a373 = 373;
+export let a374 = 374;
+export let a375 = 375;
+export let a376 = 376;
+export let a377 = 377;
+export let a378 = 378;
+export let a379 = 379;
+export let a380 = 380;
+export let a381 = 381;
+export let a382 = 382;
+export let a383 = 383;
+export let a384 = 384;
+export let a385 = 385;
+export let a386 = 386;
+export let a387 = 387;
+export let a388 = 388;
+export let a389 = 389;
+export let a390 = 390;
+export let a391 = 391;
+export let a392 = 392;
+export let a393 = 393;
+export let a394 = 394;
+export let a395 = 395;
+export let a396 = 396;
+export let a397 = 397;
+export let a398 = 398;
+export let a399 = 399;
+export let a400 = 400;
+export let a401 = 401;
+export let a402 = 402;
+export let a403 = 403;
+export let a404 = 404;
+export let a405 = 405;
+export let a406 = 406;
+export let a407 = 407;
+export let a408 = 408;
+export let a409 = 409;
+export let a410 = 410;
+export let a411 = 411;
+export let a412 = 412;
+export let a413 = 413;
+export let a414 = 414;
+export let a415 = 415;
+export let a416 = 416;
+export let a417 = 417;
+export let a418 = 418;
+export let a419 = 419;
+export let a420 = 420;
+export let a421 = 421;
+export let a422 = 422;
+export let a423 = 423;
+export let a424 = 424;
+export let a425 = 425;
+export let a426 = 426;
+export let a427 = 427;
+export let a428 = 428;
+export let a429 = 429;
+export let a430 = 430;
+export let a431 = 431;
+export let a432 = 432;
+export let a433 = 433;
+export let a434 = 434;
+export let a435 = 435;
+export let a436 = 436;
+export let a437 = 437;
+export let a438 = 438;
+export let a439 = 439;
+export let a440 = 440;
+export let a441 = 441;
+export let a442 = 442;
+export let a443 = 443;
+export let a444 = 444;
+export let a445 = 445;
+export let a446 = 446;
+export let a447 = 447;
+export let a448 = 448;
+export let a449 = 449;
+export let a450 = 450;
+export let a451 = 451;
+export let a452 = 452;
+export let a453 = 453;
+export let a454 = 454;
+export let a455 = 455;
+export let a456 = 456;
+export let a457 = 457;
+export let a458 = 458;
+export let a459 = 459;
+export let a460 = 460;
+export let a461 = 461;
+export let a462 = 462;
+export let a463 = 463;
+export let a464 = 464;
+export let a465 = 465;
+export let a466 = 466;
+export let a467 = 467;
+export let a468 = 468;
+export let a469 = 469;
+export let a470 = 470;
+export let a471 = 471;
+export let a472 = 472;
+export let a473 = 473;
+export let a474 = 474;
+export let a475 = 475;
+export let a476 = 476;
+export let a477 = 477;
+export let a478 = 478;
+export let a479 = 479;
+export let a480 = 480;
+export let a481 = 481;
+export let a482 = 482;
+export let a483 = 483;
+export let a484 = 484;
+export let a485 = 485;
+export let a486 = 486;
+export let a487 = 487;
+export let a488 = 488;
+export let a489 = 489;
+export let a490 = 490;
+export let a491 = 491;
+export let a492 = 492;
+export let a493 = 493;
+export let a494 = 494;
+export let a495 = 495;
+export let a496 = 496;
+export let a497 = 497;
+export let a498 = 498;
+export let a499 = 499;
+export let a500 = 500;
+export let a501 = 501;
+export let a502 = 502;
+export let a503 = 503;
+export let a504 = 504;
+export let a505 = 505;
+export let a506 = 506;
+export let a507 = 507;
+export let a508 = 508;
+export let a509 = 509;
+export let a510 = 510;
+export let a511 = 511;
+export let a512 = 512;
+export let a513 = 513;
+export let a514 = 514;
+export let a515 = 515;
+export let a516 = 516;
+export let a517 = 517;
+export let a518 = 518;
+export let a519 = 519;
+export let a520 = 520;
+export let a521 = 521;
+export let a522 = 522;
+export let a523 = 523;
+export let a524 = 524;
+export let a525 = 525;
+export let a526 = 526;
+export let a527 = 527;
+export let a528 = 528;
+export let a529 = 529;
+export let a530 = 530;
+export let a531 = 531;
+export let a532 = 532;
+export let a533 = 533;
+export let a534 = 534;
+export let a535 = 535;
+export let a536 = 536;
+export let a537 = 537;
+export let a538 = 538;
+export let a539 = 539;
+export let a540 = 540;
+export let a541 = 541;
+export let a542 = 542;
+export let a543 = 543;
+export let a544 = 544;
+export let a545 = 545;
+export let a546 = 546;
+export let a547 = 547;
+export let a548 = 548;
+export let a549 = 549;
+export let a550 = 550;
+export let a551 = 551;
+export let a552 = 552;
+export let a553 = 553;
+export let a554 = 554;
+export let a555 = 555;
+export let a556 = 556;
+export let a557 = 557;
+export let a558 = 558;
+export let a559 = 559;
+export let a560 = 560;
+export let a561 = 561;
+export let a562 = 562;
+export let a563 = 563;
+export let a564 = 564;
+export let a565 = 565;
+export let a566 = 566;
+export let a567 = 567;
+export let a568 = 568;
+export let a569 = 569;
+export let a570 = 570;
+export let a571 = 571;
+export let a572 = 572;
+export let a573 = 573;
+export let a574 = 574;
+export let a575 = 575;
+export let a576 = 576;
+export let a577 = 577;
+export let a578 = 578;
+export let a579 = 579;
+export let a580 = 580;
+export let a581 = 581;
+export let a582 = 582;
+export let a583 = 583;
+export let a584 = 584;
+export let a585 = 585;
+export let a586 = 586;
+export let a587 = 587;
+export let a588 = 588;
+export let a589 = 589;
+export let a590 = 590;
+export let a591 = 591;
+export let a592 = 592;
+export let a593 = 593;
+export let a594 = 594;
+export let a595 = 595;
+export let a596 = 596;
+export let a597 = 597;
+export let a598 = 598;
+export let a599 = 599;
+export let a600 = 600;
+export let a601 = 601;
+export let a602 = 602;
+export let a603 = 603;
+export let a604 = 604;
+export let a605 = 605;
+export let a606 = 606;
+export let a607 = 607;
+export let a608 = 608;
+export let a609 = 609;
+export let a610 = 610;
+export let a611 = 611;
+export let a612 = 612;
+export let a613 = 613;
+export let a614 = 614;
+export let a615 = 615;
+export let a616 = 616;
+export let a617 = 617;
+export let a618 = 618;
+export let a619 = 619;
+export let a620 = 620;
+export let a621 = 621;
+export let a622 = 622;
+export let a623 = 623;
+export let a624 = 624;
+export let a625 = 625;
+export let a626 = 626;
+export let a627 = 627;
+export let a628 = 628;
+export let a629 = 629;
+export let a630 = 630;
+export let a631 = 631;
+export let a632 = 632;
+export let a633 = 633;
+export let a634 = 634;
+export let a635 = 635;
+export let a636 = 636;
+export let a637 = 637;
+export let a638 = 638;
+export let a639 = 639;
+export let a640 = 640;
+export let a641 = 641;
+export let a642 = 642;
+export let a643 = 643;
+export let a644 = 644;
+export let a645 = 645;
+export let a646 = 646;
+export let a647 = 647;
+export let a648 = 648;
+export let a649 = 649;
+export let a650 = 650;
+export let a651 = 651;
+export let a652 = 652;
+export let a653 = 653;
+export let a654 = 654;
+export let a655 = 655;
+export let a656 = 656;
+export let a657 = 657;
+export let a658 = 658;
+export let a659 = 659;
+export let a660 = 660;
+export let a661 = 661;
+export let a662 = 662;
+export let a663 = 663;
+export let a664 = 664;
+export let a665 = 665;
+export let a666 = 666;
+export let a667 = 667;
+export let a668 = 668;
+export let a669 = 669;
+export let a670 = 670;
+export let a671 = 671;
+export let a672 = 672;
+export let a673 = 673;
+export let a674 = 674;
+export let a675 = 675;
+export let a676 = 676;
+export let a677 = 677;
+export let a678 = 678;
+export let a679 = 679;
+export let a680 = 680;
+export let a681 = 681;
+export let a682 = 682;
+export let a683 = 683;
+export let a684 = 684;
+export let a685 = 685;
+export let a686 = 686;
+export let a687 = 687;
+export let a688 = 688;
+export let a689 = 689;
+export let a690 = 690;
+export let a691 = 691;
+export let a692 = 692;
+export let a693 = 693;
+export let a694 = 694;
+export let a695 = 695;
+export let a696 = 696;
+export let a697 = 697;
+export let a698 = 698;
+export let a699 = 699;
+export let a700 = 700;
+export let a701 = 701;
+export let a702 = 702;
+export let a703 = 703;
+export let a704 = 704;
+export let a705 = 705;
+export let a706 = 706;
+export let a707 = 707;
+export let a708 = 708;
+export let a709 = 709;
+export let a710 = 710;
+export let a711 = 711;
+export let a712 = 712;
+export let a713 = 713;
+export let a714 = 714;
+export let a715 = 715;
+export let a716 = 716;
+export let a717 = 717;
+export let a718 = 718;
+export let a719 = 719;
+export let a720 = 720;
+export let a721 = 721;
+export let a722 = 722;
+export let a723 = 723;
+export let a724 = 724;
+export let a725 = 725;
+export let a726 = 726;
+export let a727 = 727;
+export let a728 = 728;
+export let a729 = 729;
+export let a730 = 730;
+export let a731 = 731;
+export let a732 = 732;
+export let a733 = 733;
+export let a734 = 734;
+export let a735 = 735;
+export let a736 = 736;
+export let a737 = 737;
+export let a738 = 738;
+export let a739 = 739;
+export let a740 = 740;
+export let a741 = 741;
+export let a742 = 742;
+export let a743 = 743;
+export let a744 = 744;
+export let a745 = 745;
+export let a746 = 746;
+export let a747 = 747;
+export let a748 = 748;
+export let a749 = 749;
+export let a750 = 750;
+export let a751 = 751;
+export let a752 = 752;
+export let a753 = 753;
+export let a754 = 754;
+export let a755 = 755;
+export let a756 = 756;
+export let a757 = 757;
+export let a758 = 758;
+export let a759 = 759;
+export let a760 = 760;
+export let a761 = 761;
+export let a762 = 762;
+export let a763 = 763;
+export let a764 = 764;
+export let a765 = 765;
+export let a766 = 766;
+export let a767 = 767;
+export let a768 = 768;
+export let a769 = 769;
+export let a770 = 770;
+export let a771 = 771;
+export let a772 = 772;
+export let a773 = 773;
+export let a774 = 774;
+export let a775 = 775;
+export let a776 = 776;
+export let a777 = 777;
+export let a778 = 778;
+export let a779 = 779;
+export let a780 = 780;
+export let a781 = 781;
+export let a782 = 782;
+export let a783 = 783;
+export let a784 = 784;
+export let a785 = 785;
+export let a786 = 786;
+export let a787 = 787;
+export let a788 = 788;
+export let a789 = 789;
+export let a790 = 790;
+export let a791 = 791;
+export let a792 = 792;
+export let a793 = 793;
+export let a794 = 794;
+export let a795 = 795;
+export let a796 = 796;
+export let a797 = 797;
+export let a798 = 798;
+export let a799 = 799;
+export let a800 = 800;
+export let a801 = 801;
+export let a802 = 802;
+export let a803 = 803;
+export let a804 = 804;
+export let a805 = 805;
+export let a806 = 806;
+export let a807 = 807;
+export let a808 = 808;
+export let a809 = 809;
+export let a810 = 810;
+export let a811 = 811;
+export let a812 = 812;
+export let a813 = 813;
+export let a814 = 814;
+export let a815 = 815;
+export let a816 = 816;
+export let a817 = 817;
+export let a818 = 818;
+export let a819 = 819;
+export let a820 = 820;
+export let a821 = 821;
+export let a822 = 822;
+export let a823 = 823;
+export let a824 = 824;
+export let a825 = 825;
+export let a826 = 826;
+export let a827 = 827;
+export let a828 = 828;
+export let a829 = 829;
+export let a830 = 830;
+export let a831 = 831;
+export let a832 = 832;
+export let a833 = 833;
+export let a834 = 834;
+export let a835 = 835;
+export let a836 = 836;
+export let a837 = 837;
+export let a838 = 838;
+export let a839 = 839;
+export let a840 = 840;
+export let a841 = 841;
+export let a842 = 842;
+export let a843 = 843;
+export let a844 = 844;
+export let a845 = 845;
+export let a846 = 846;
+export let a847 = 847;
+export let a848 = 848;
+export let a849 = 849;
+export let a850 = 850;
+export let a851 = 851;
+export let a852 = 852;
+export let a853 = 853;
+export let a854 = 854;
+export let a855 = 855;
+export let a856 = 856;
+export let a857 = 857;
+export let a858 = 858;
+export let a859 = 859;
+export let a860 = 860;
+export let a861 = 861;
+export let a862 = 862;
+export let a863 = 863;
+export let a864 = 864;
+export let a865 = 865;
+export let a866 = 866;
+export let a867 = 867;
+export let a868 = 868;
+export let a869 = 869;
+export let a870 = 870;
+export let a871 = 871;
+export let a872 = 872;
+export let a873 = 873;
+export let a874 = 874;
+export let a875 = 875;
+export let a876 = 876;
+export let a877 = 877;
+export let a878 = 878;
+export let a879 = 879;
+export let a880 = 880;
+export let a881 = 881;
+export let a882 = 882;
+export let a883 = 883;
+export let a884 = 884;
+export let a885 = 885;
+export let a886 = 886;
+export let a887 = 887;
+export let a888 = 888;
+export let a889 = 889;
+export let a890 = 890;
+export let a891 = 891;
+export let a892 = 892;
+export let a893 = 893;
+export let a894 = 894;
+export let a895 = 895;
+export let a896 = 896;
+export let a897 = 897;
+export let a898 = 898;
+export let a899 = 899;
+export let a900 = 900;
+export let a901 = 901;
+export let a902 = 902;
+export let a903 = 903;
+export let a904 = 904;
+export let a905 = 905;
+export let a906 = 906;
+export let a907 = 907;
+export let a908 = 908;
+export let a909 = 909;
+export let a910 = 910;
+export let a911 = 911;
+export let a912 = 912;
+export let a913 = 913;
+export let a914 = 914;
+export let a915 = 915;
+export let a916 = 916;
+export let a917 = 917;
+export let a918 = 918;
+export let a919 = 919;
+export let a920 = 920;
+export let a921 = 921;
+export let a922 = 922;
+export let a923 = 923;
+export let a924 = 924;
+export let a925 = 925;
+export let a926 = 926;
+export let a927 = 927;
+export let a928 = 928;
+export let a929 = 929;
+export let a930 = 930;
+export let a931 = 931;
+export let a932 = 932;
+export let a933 = 933;
+export let a934 = 934;
+export let a935 = 935;
+export let a936 = 936;
+export let a937 = 937;
+export let a938 = 938;
+export let a939 = 939;
+export let a940 = 940;
+export let a941 = 941;
+export let a942 = 942;
+export let a943 = 943;
+export let a944 = 944;
+export let a945 = 945;
+export let a946 = 946;
+export let a947 = 947;
+export let a948 = 948;
+export let a949 = 949;
+export let a950 = 950;
+export let a951 = 951;
+export let a952 = 952;
+export let a953 = 953;
+export let a954 = 954;
+export let a955 = 955;
+export let a956 = 956;
+export let a957 = 957;
+export let a958 = 958;
+export let a959 = 959;
+export let a960 = 960;
+export let a961 = 961;
+export let a962 = 962;
+export let a963 = 963;
+export let a964 = 964;
+export let a965 = 965;
+export let a966 = 966;
+export let a967 = 967;
+export let a968 = 968;
+export let a969 = 969;
+export let a970 = 970;
+export let a971 = 971;
+export let a972 = 972;
+export let a973 = 973;
+export let a974 = 974;
+export let a975 = 975;
+export let a976 = 976;
+export let a977 = 977;
+export let a978 = 978;
+export let a979 = 979;
+export let a980 = 980;
+export let a981 = 981;
+export let a982 = 982;
+export let a983 = 983;
+export let a984 = 984;
+export let a985 = 985;
+export let a986 = 986;
+export let a987 = 987;
+export let a988 = 988;
+export let a989 = 989;
+export let a990 = 990;
+export let a991 = 991;
+export let a992 = 992;
+export let a993 = 993;
+export let a994 = 994;
+export let a995 = 995;
+export let a996 = 996;
+export let a997 = 997;
+export let a998 = 998;
+export let a999 = 999;
+export let a1000 = 1000;
+export let a1001 = 1001;
+export let a1002 = 1002;
+export let a1003 = 1003;
+export let a1004 = 1004;
+export let a1005 = 1005;
+export let a1006 = 1006;
+export let a1007 = 1007;
+export let a1008 = 1008;
+export let a1009 = 1009;
+export let a1010 = 1010;
+export let a1011 = 1011;
+export let a1012 = 1012;
+export let a1013 = 1013;
+export let a1014 = 1014;
+export let a1015 = 1015;
+export let a1016 = 1016;
+export let a1017 = 1017;
+export let a1018 = 1018;
+export let a1019 = 1019;
+export let a1020 = 1020;
+export let a1021 = 1021;
+export let a1022 = 1022;
+export let a1023 = 1023;
+export let a1024 = 1024;
+export let a1025 = 1025;
+export let a1026 = 1026;
+export let a1027 = 1027;
+export let a1028 = 1028;
+export let a1029 = 1029;
+export let a1030 = 1030;
+export let a1031 = 1031;
+export let a1032 = 1032;
+export let a1033 = 1033;
+export let a1034 = 1034;
+export let a1035 = 1035;
+export let a1036 = 1036;
+export let a1037 = 1037;
+export let a1038 = 1038;
+export let a1039 = 1039;
+export let a1040 = 1040;
+export let a1041 = 1041;
+export let a1042 = 1042;
+export let a1043 = 1043;
+export let a1044 = 1044;
+export let a1045 = 1045;
+export let a1046 = 1046;
+export let a1047 = 1047;
+export let a1048 = 1048;
+export let a1049 = 1049;
+export let a1050 = 1050;
+export let a1051 = 1051;
+export let a1052 = 1052;
+export let a1053 = 1053;
+export let a1054 = 1054;
+export let a1055 = 1055;
+export let a1056 = 1056;
+export let a1057 = 1057;
+export let a1058 = 1058;
+export let a1059 = 1059;
+export let a1060 = 1060;
+export let a1061 = 1061;
+export let a1062 = 1062;
+export let a1063 = 1063;
+export let a1064 = 1064;
+export let a1065 = 1065;
+export let a1066 = 1066;
+export let a1067 = 1067;
+export let a1068 = 1068;
+export let a1069 = 1069;
+export let a1070 = 1070;
+export let a1071 = 1071;
+export let a1072 = 1072;
+export let a1073 = 1073;
+export let a1074 = 1074;
+export let a1075 = 1075;
+export let a1076 = 1076;
+export let a1077 = 1077;
+export let a1078 = 1078;
+export let a1079 = 1079;
+export let a1080 = 1080;
+export let a1081 = 1081;
+export let a1082 = 1082;
+export let a1083 = 1083;
+export let a1084 = 1084;
+export let a1085 = 1085;
+export let a1086 = 1086;
+export let a1087 = 1087;
+export let a1088 = 1088;
+export let a1089 = 1089;
+export let a1090 = 1090;
+export let a1091 = 1091;
+export let a1092 = 1092;
+export let a1093 = 1093;
+export let a1094 = 1094;
+export let a1095 = 1095;
+export let a1096 = 1096;
+export let a1097 = 1097;
+export let a1098 = 1098;
+export let a1099 = 1099;
+export let a1100 = 1100;
+export let a1101 = 1101;
+export let a1102 = 1102;
+export let a1103 = 1103;
+export let a1104 = 1104;
+export let a1105 = 1105;
+export let a1106 = 1106;
+export let a1107 = 1107;
+export let a1108 = 1108;
+export let a1109 = 1109;
+export let a1110 = 1110;
+export let a1111 = 1111;
+export let a1112 = 1112;
+export let a1113 = 1113;
+export let a1114 = 1114;
+export let a1115 = 1115;
+export let a1116 = 1116;
+export let a1117 = 1117;
+export let a1118 = 1118;
+export let a1119 = 1119;
+export let a1120 = 1120;
+export let a1121 = 1121;
+export let a1122 = 1122;
+export let a1123 = 1123;
+export let a1124 = 1124;
+export let a1125 = 1125;
+export let a1126 = 1126;
+export let a1127 = 1127;
+export let a1128 = 1128;
+export let a1129 = 1129;
+export let a1130 = 1130;
+export let a1131 = 1131;
+export let a1132 = 1132;
+export let a1133 = 1133;
+export let a1134 = 1134;
+export let a1135 = 1135;
+export let a1136 = 1136;
+export let a1137 = 1137;
+export let a1138 = 1138;
+export let a1139 = 1139;
+export let a1140 = 1140;
+export let a1141 = 1141;
+export let a1142 = 1142;
+export let a1143 = 1143;
+export let a1144 = 1144;
+export let a1145 = 1145;
+export let a1146 = 1146;
+export let a1147 = 1147;
+export let a1148 = 1148;
+export let a1149 = 1149;
+export let a1150 = 1150;
+export let a1151 = 1151;
+export let a1152 = 1152;
+export let a1153 = 1153;
+export let a1154 = 1154;
+export let a1155 = 1155;
+export let a1156 = 1156;
+export let a1157 = 1157;
+export let a1158 = 1158;
+export let a1159 = 1159;
+export let a1160 = 1160;
+export let a1161 = 1161;
+export let a1162 = 1162;
+export let a1163 = 1163;
+export let a1164 = 1164;
+export let a1165 = 1165;
+export let a1166 = 1166;
+export let a1167 = 1167;
+export let a1168 = 1168;
+export let a1169 = 1169;
+export let a1170 = 1170;
+export let a1171 = 1171;
+export let a1172 = 1172;
+export let a1173 = 1173;
+export let a1174 = 1174;
+export let a1175 = 1175;
+export let a1176 = 1176;
+export let a1177 = 1177;
+export let a1178 = 1178;
+export let a1179 = 1179;
+export let a1180 = 1180;
+export let a1181 = 1181;
+export let a1182 = 1182;
+export let a1183 = 1183;
+export let a1184 = 1184;
+export let a1185 = 1185;
+export let a1186 = 1186;
+export let a1187 = 1187;
+export let a1188 = 1188;
+export let a1189 = 1189;
+export let a1190 = 1190;
+export let a1191 = 1191;
+export let a1192 = 1192;
+export let a1193 = 1193;
+export let a1194 = 1194;
+export let a1195 = 1195;
+export let a1196 = 1196;
+export let a1197 = 1197;
+export let a1198 = 1198;
+export let a1199 = 1199;
+export let a1200 = 1200;
+export let a1201 = 1201;
+export let a1202 = 1202;
+export let a1203 = 1203;
+export let a1204 = 1204;
+export let a1205 = 1205;
+export let a1206 = 1206;
+export let a1207 = 1207;
+export let a1208 = 1208;
+export let a1209 = 1209;
+export let a1210 = 1210;
+export let a1211 = 1211;
+export let a1212 = 1212;
+export let a1213 = 1213;
+export let a1214 = 1214;
+export let a1215 = 1215;
+export let a1216 = 1216;
+export let a1217 = 1217;
+export let a1218 = 1218;
+export let a1219 = 1219;
+export let a1220 = 1220;
+export let a1221 = 1221;
+export let a1222 = 1222;
+export let a1223 = 1223;
+export let a1224 = 1224;
+export let a1225 = 1225;
+export let a1226 = 1226;
+export let a1227 = 1227;
+export let a1228 = 1228;
+export let a1229 = 1229;
+export let a1230 = 1230;
+export let a1231 = 1231;
+export let a1232 = 1232;
+export let a1233 = 1233;
+export let a1234 = 1234;
+export let a1235 = 1235;
+export let a1236 = 1236;
+export let a1237 = 1237;
+export let a1238 = 1238;
+export let a1239 = 1239;
+export let a1240 = 1240;
+export let a1241 = 1241;
+export let a1242 = 1242;
+export let a1243 = 1243;
+export let a1244 = 1244;
+export let a1245 = 1245;
+export let a1246 = 1246;
+export let a1247 = 1247;
+export let a1248 = 1248;
+export let a1249 = 1249;
+export let a1250 = 1250;
+export let a1251 = 1251;
+export let a1252 = 1252;
+export let a1253 = 1253;
+export let a1254 = 1254;
+export let a1255 = 1255;
+export let a1256 = 1256;
+export let a1257 = 1257;
+export let a1258 = 1258;
+export let a1259 = 1259;
+export let a1260 = 1260;
+export let a1261 = 1261;
+export let a1262 = 1262;
+export let a1263 = 1263;
+export let a1264 = 1264;
+export let a1265 = 1265;
+export let a1266 = 1266;
+export let a1267 = 1267;
+export let a1268 = 1268;
+export let a1269 = 1269;
+export let a1270 = 1270;
+export let a1271 = 1271;
+export let a1272 = 1272;
+export let a1273 = 1273;
+export let a1274 = 1274;
+export let a1275 = 1275;
+export let a1276 = 1276;
+export let a1277 = 1277;
+export let a1278 = 1278;
+export let a1279 = 1279;
+export let a1280 = 1280;
+export let a1281 = 1281;
+export let a1282 = 1282;
+export let a1283 = 1283;
+export let a1284 = 1284;
+export let a1285 = 1285;
+export let a1286 = 1286;
+export let a1287 = 1287;
+export let a1288 = 1288;
+export let a1289 = 1289;
+export let a1290 = 1290;
+export let a1291 = 1291;
+export let a1292 = 1292;
+export let a1293 = 1293;
+export let a1294 = 1294;
+export let a1295 = 1295;
+export let a1296 = 1296;
+export let a1297 = 1297;
+export let a1298 = 1298;
+export let a1299 = 1299;
+export let a1300 = 1300;
+export let a1301 = 1301;
+export let a1302 = 1302;
+export let a1303 = 1303;
+export let a1304 = 1304;
+export let a1305 = 1305;
+export let a1306 = 1306;
+export let a1307 = 1307;
+export let a1308 = 1308;
+export let a1309 = 1309;
+export let a1310 = 1310;
+export let a1311 = 1311;
+export let a1312 = 1312;
+export let a1313 = 1313;
+export let a1314 = 1314;
+export let a1315 = 1315;
+export let a1316 = 1316;
+export let a1317 = 1317;
+export let a1318 = 1318;
+export let a1319 = 1319;
+export let a1320 = 1320;
+export let a1321 = 1321;
+export let a1322 = 1322;
+export let a1323 = 1323;
+export let a1324 = 1324;
+export let a1325 = 1325;
+export let a1326 = 1326;
+export let a1327 = 1327;
+export let a1328 = 1328;
+export let a1329 = 1329;
+export let a1330 = 1330;
+export let a1331 = 1331;
+export let a1332 = 1332;
+export let a1333 = 1333;
+export let a1334 = 1334;
+export let a1335 = 1335;
+export let a1336 = 1336;
+export let a1337 = 1337;
+export let a1338 = 1338;
+export let a1339 = 1339;
+export let a1340 = 1340;
+export let a1341 = 1341;
+export let a1342 = 1342;
+export let a1343 = 1343;
+export let a1344 = 1344;
+export let a1345 = 1345;
+export let a1346 = 1346;
+export let a1347 = 1347;
+export let a1348 = 1348;
+export let a1349 = 1349;
+export let a1350 = 1350;
+export let a1351 = 1351;
+export let a1352 = 1352;
+export let a1353 = 1353;
+export let a1354 = 1354;
+export let a1355 = 1355;
+export let a1356 = 1356;
+export let a1357 = 1357;
+export let a1358 = 1358;
+export let a1359 = 1359;
+export let a1360 = 1360;
+export let a1361 = 1361;
+export let a1362 = 1362;
+export let a1363 = 1363;
+export let a1364 = 1364;
+export let a1365 = 1365;
+export let a1366 = 1366;
+export let a1367 = 1367;
+export let a1368 = 1368;
+export let a1369 = 1369;
+export let a1370 = 1370;
+export let a1371 = 1371;
+export let a1372 = 1372;
+export let a1373 = 1373;
+export let a1374 = 1374;
+export let a1375 = 1375;
+export let a1376 = 1376;
+export let a1377 = 1377;
+export let a1378 = 1378;
+export let a1379 = 1379;
+export let a1380 = 1380;
+export let a1381 = 1381;
+export let a1382 = 1382;
+export let a1383 = 1383;
+export let a1384 = 1384;
+export let a1385 = 1385;
+export let a1386 = 1386;
+export let a1387 = 1387;
+export let a1388 = 1388;
+export let a1389 = 1389;
+export let a1390 = 1390;
+export let a1391 = 1391;
+export let a1392 = 1392;
+export let a1393 = 1393;
+export let a1394 = 1394;
+export let a1395 = 1395;
+export let a1396 = 1396;
+export let a1397 = 1397;
+export let a1398 = 1398;
+export let a1399 = 1399;
+export let a1400 = 1400;
+export let a1401 = 1401;
+export let a1402 = 1402;
+export let a1403 = 1403;
+export let a1404 = 1404;
+export let a1405 = 1405;
+export let a1406 = 1406;
+export let a1407 = 1407;
+export let a1408 = 1408;
+export let a1409 = 1409;
+export let a1410 = 1410;
+export let a1411 = 1411;
+export let a1412 = 1412;
+export let a1413 = 1413;
+export let a1414 = 1414;
+export let a1415 = 1415;
+export let a1416 = 1416;
+export let a1417 = 1417;
+export let a1418 = 1418;
+export let a1419 = 1419;
+export let a1420 = 1420;
+export let a1421 = 1421;
+export let a1422 = 1422;
+export let a1423 = 1423;
+export let a1424 = 1424;
+export let a1425 = 1425;
+export let a1426 = 1426;
+export let a1427 = 1427;
+export let a1428 = 1428;
+export let a1429 = 1429;
+export let a1430 = 1430;
+export let a1431 = 1431;
+export let a1432 = 1432;
+export let a1433 = 1433;
+export let a1434 = 1434;
+export let a1435 = 1435;
+export let a1436 = 1436;
+export let a1437 = 1437;
+export let a1438 = 1438;
+export let a1439 = 1439;
+export let a1440 = 1440;
+export let a1441 = 1441;
+export let a1442 = 1442;
+export let a1443 = 1443;
+export let a1444 = 1444;
+export let a1445 = 1445;
+export let a1446 = 1446;
+export let a1447 = 1447;
+export let a1448 = 1448;
+export let a1449 = 1449;
+export let a1450 = 1450;
+export let a1451 = 1451;
+export let a1452 = 1452;
+export let a1453 = 1453;
+export let a1454 = 1454;
+export let a1455 = 1455;
+export let a1456 = 1456;
+export let a1457 = 1457;
+export let a1458 = 1458;
+export let a1459 = 1459;
+export let a1460 = 1460;
+export let a1461 = 1461;
+export let a1462 = 1462;
+export let a1463 = 1463;
+export let a1464 = 1464;
+export let a1465 = 1465;
+export let a1466 = 1466;
+export let a1467 = 1467;
+export let a1468 = 1468;
+export let a1469 = 1469;
+export let a1470 = 1470;
+export let a1471 = 1471;
+export let a1472 = 1472;
+export let a1473 = 1473;
+export let a1474 = 1474;
+export let a1475 = 1475;
+export let a1476 = 1476;
+export let a1477 = 1477;
+export let a1478 = 1478;
+export let a1479 = 1479;
+export let a1480 = 1480;
+export let a1481 = 1481;
+export let a1482 = 1482;
+export let a1483 = 1483;
+export let a1484 = 1484;
+export let a1485 = 1485;
+export let a1486 = 1486;
+export let a1487 = 1487;
+export let a1488 = 1488;
+export let a1489 = 1489;
+export let a1490 = 1490;
+export let a1491 = 1491;
+export let a1492 = 1492;
+export let a1493 = 1493;
+export let a1494 = 1494;
+export let a1495 = 1495;
+export let a1496 = 1496;
+export let a1497 = 1497;
+export let a1498 = 1498;
+export let a1499 = 1499;
+export let a1500 = 1500;
+export let a1501 = 1501;
+export let a1502 = 1502;
+export let a1503 = 1503;
+export let a1504 = 1504;
+export let a1505 = 1505;
+export let a1506 = 1506;
+export let a1507 = 1507;
+export let a1508 = 1508;
+export let a1509 = 1509;
+export let a1510 = 1510;
+export let a1511 = 1511;
+export let a1512 = 1512;
+export let a1513 = 1513;
+export let a1514 = 1514;
+export let a1515 = 1515;
+export let a1516 = 1516;
+export let a1517 = 1517;
+export let a1518 = 1518;
+export let a1519 = 1519;
+export let a1520 = 1520;
+export let a1521 = 1521;
+export let a1522 = 1522;
+export let a1523 = 1523;
+export let a1524 = 1524;
+export let a1525 = 1525;
+export let a1526 = 1526;
+export let a1527 = 1527;
+export let a1528 = 1528;
+export let a1529 = 1529;
+export let a1530 = 1530;
+export let a1531 = 1531;
+export let a1532 = 1532;
+export let a1533 = 1533;
+export let a1534 = 1534;
+export let a1535 = 1535;
+export let a1536 = 1536;
+export let a1537 = 1537;
+export let a1538 = 1538;
+export let a1539 = 1539;
+export let a1540 = 1540;
+export let a1541 = 1541;
+export let a1542 = 1542;
+export let a1543 = 1543;
+export let a1544 = 1544;
+export let a1545 = 1545;
+export let a1546 = 1546;
+export let a1547 = 1547;
+export let a1548 = 1548;
+export let a1549 = 1549;
+export let a1550 = 1550;
+export let a1551 = 1551;
+export let a1552 = 1552;
+export let a1553 = 1553;
+export let a1554 = 1554;
+export let a1555 = 1555;
+export let a1556 = 1556;
+export let a1557 = 1557;
+export let a1558 = 1558;
+export let a1559 = 1559;
+export let a1560 = 1560;
+export let a1561 = 1561;
+export let a1562 = 1562;
+export let a1563 = 1563;
+export let a1564 = 1564;
+export let a1565 = 1565;
+export let a1566 = 1566;
+export let a1567 = 1567;
+export let a1568 = 1568;
+export let a1569 = 1569;
+export let a1570 = 1570;
+export let a1571 = 1571;
+export let a1572 = 1572;
+export let a1573 = 1573;
+export let a1574 = 1574;
+export let a1575 = 1575;
+export let a1576 = 1576;
+export let a1577 = 1577;
+export let a1578 = 1578;
+export let a1579 = 1579;
+export let a1580 = 1580;
+export let a1581 = 1581;
+export let a1582 = 1582;
+export let a1583 = 1583;
+export let a1584 = 1584;
+export let a1585 = 1585;
+export let a1586 = 1586;
+export let a1587 = 1587;
+export let a1588 = 1588;
+export let a1589 = 1589;
+export let a1590 = 1590;
+export let a1591 = 1591;
+export let a1592 = 1592;
+export let a1593 = 1593;
+export let a1594 = 1594;
+export let a1595 = 1595;
+export let a1596 = 1596;
+export let a1597 = 1597;
+export let a1598 = 1598;
+export let a1599 = 1599;
+export let a1600 = 1600;
+export let a1601 = 1601;
+export let a1602 = 1602;
+export let a1603 = 1603;
+export let a1604 = 1604;
+export let a1605 = 1605;
+export let a1606 = 1606;
+export let a1607 = 1607;
+export let a1608 = 1608;
+export let a1609 = 1609;
+export let a1610 = 1610;
+export let a1611 = 1611;
+export let a1612 = 1612;
+export let a1613 = 1613;
+export let a1614 = 1614;
+export let a1615 = 1615;
+export let a1616 = 1616;
+export let a1617 = 1617;
+export let a1618 = 1618;
+export let a1619 = 1619;
+export let a1620 = 1620;
+export let a1621 = 1621;
+export let a1622 = 1622;
+export let a1623 = 1623;
+export let a1624 = 1624;
+export let a1625 = 1625;
+export let a1626 = 1626;
+export let a1627 = 1627;
+export let a1628 = 1628;
+export let a1629 = 1629;
+export let a1630 = 1630;
+export let a1631 = 1631;
+export let a1632 = 1632;
+export let a1633 = 1633;
+export let a1634 = 1634;
+export let a1635 = 1635;
+export let a1636 = 1636;
+export let a1637 = 1637;
+export let a1638 = 1638;
+export let a1639 = 1639;
+export let a1640 = 1640;
+export let a1641 = 1641;
+export let a1642 = 1642;
+export let a1643 = 1643;
+export let a1644 = 1644;
+export let a1645 = 1645;
+export let a1646 = 1646;
+export let a1647 = 1647;
+export let a1648 = 1648;
+export let a1649 = 1649;
+export let a1650 = 1650;
+export let a1651 = 1651;
+export let a1652 = 1652;
+export let a1653 = 1653;
+export let a1654 = 1654;
+export let a1655 = 1655;
+export let a1656 = 1656;
+export let a1657 = 1657;
+export let a1658 = 1658;
+export let a1659 = 1659;
+export let a1660 = 1660;
+export let a1661 = 1661;
+export let a1662 = 1662;
+export let a1663 = 1663;
+export let a1664 = 1664;
+export let a1665 = 1665;
+export let a1666 = 1666;
+export let a1667 = 1667;
+export let a1668 = 1668;
+export let a1669 = 1669;
+export let a1670 = 1670;
+export let a1671 = 1671;
+export let a1672 = 1672;
+export let a1673 = 1673;
+export let a1674 = 1674;
+export let a1675 = 1675;
+export let a1676 = 1676;
+export let a1677 = 1677;
+export let a1678 = 1678;
+export let a1679 = 1679;
+export let a1680 = 1680;
+export let a1681 = 1681;
+export let a1682 = 1682;
+export let a1683 = 1683;
+export let a1684 = 1684;
+export let a1685 = 1685;
+export let a1686 = 1686;
+export let a1687 = 1687;
+export let a1688 = 1688;
+export let a1689 = 1689;
+export let a1690 = 1690;
+export let a1691 = 1691;
+export let a1692 = 1692;
+export let a1693 = 1693;
+export let a1694 = 1694;
+export let a1695 = 1695;
+export let a1696 = 1696;
+export let a1697 = 1697;
+export let a1698 = 1698;
+export let a1699 = 1699;
+export let a1700 = 1700;
+export let a1701 = 1701;
+export let a1702 = 1702;
+export let a1703 = 1703;
+export let a1704 = 1704;
+export let a1705 = 1705;
+export let a1706 = 1706;
+export let a1707 = 1707;
+export let a1708 = 1708;
+export let a1709 = 1709;
+export let a1710 = 1710;
+export let a1711 = 1711;
+export let a1712 = 1712;
+export let a1713 = 1713;
+export let a1714 = 1714;
+export let a1715 = 1715;
+export let a1716 = 1716;
+export let a1717 = 1717;
+export let a1718 = 1718;
+export let a1719 = 1719;
+export let a1720 = 1720;
+export let a1721 = 1721;
+export let a1722 = 1722;
+export let a1723 = 1723;
+export let a1724 = 1724;
+export let a1725 = 1725;
+export let a1726 = 1726;
+export let a1727 = 1727;
+export let a1728 = 1728;
+export let a1729 = 1729;
+export let a1730 = 1730;
+export let a1731 = 1731;
+export let a1732 = 1732;
+export let a1733 = 1733;
+export let a1734 = 1734;
+export let a1735 = 1735;
+export let a1736 = 1736;
+export let a1737 = 1737;
+export let a1738 = 1738;
+export let a1739 = 1739;
+export let a1740 = 1740;
+export let a1741 = 1741;
+export let a1742 = 1742;
+export let a1743 = 1743;
+export let a1744 = 1744;
+export let a1745 = 1745;
+export let a1746 = 1746;
+export let a1747 = 1747;
+export let a1748 = 1748;
+export let a1749 = 1749;
+export let a1750 = 1750;
+export let a1751 = 1751;
+export let a1752 = 1752;
+export let a1753 = 1753;
+export let a1754 = 1754;
+export let a1755 = 1755;
+export let a1756 = 1756;
+export let a1757 = 1757;
+export let a1758 = 1758;
+export let a1759 = 1759;
+export let a1760 = 1760;
+export let a1761 = 1761;
+export let a1762 = 1762;
+export let a1763 = 1763;
+export let a1764 = 1764;
+export let a1765 = 1765;
+export let a1766 = 1766;
+export let a1767 = 1767;
+export let a1768 = 1768;
+export let a1769 = 1769;
+export let a1770 = 1770;
+export let a1771 = 1771;
+export let a1772 = 1772;
+export let a1773 = 1773;
+export let a1774 = 1774;
+export let a1775 = 1775;
+export let a1776 = 1776;
+export let a1777 = 1777;
+export let a1778 = 1778;
+export let a1779 = 1779;
+export let a1780 = 1780;
+export let a1781 = 1781;
+export let a1782 = 1782;
+export let a1783 = 1783;
+export let a1784 = 1784;
+export let a1785 = 1785;
+export let a1786 = 1786;
+export let a1787 = 1787;
+export let a1788 = 1788;
+export let a1789 = 1789;
+export let a1790 = 1790;
+export let a1791 = 1791;
+export let a1792 = 1792;
+export let a1793 = 1793;
+export let a1794 = 1794;
+export let a1795 = 1795;
+export let a1796 = 1796;
+export let a1797 = 1797;
+export let a1798 = 1798;
+export let a1799 = 1799;
+export let a1800 = 1800;
+export let a1801 = 1801;
+export let a1802 = 1802;
+export let a1803 = 1803;
+export let a1804 = 1804;
+export let a1805 = 1805;
+export let a1806 = 1806;
+export let a1807 = 1807;
+export let a1808 = 1808;
+export let a1809 = 1809;
+export let a1810 = 1810;
+export let a1811 = 1811;
+export let a1812 = 1812;
+export let a1813 = 1813;
+export let a1814 = 1814;
+export let a1815 = 1815;
+export let a1816 = 1816;
+export let a1817 = 1817;
+export let a1818 = 1818;
+export let a1819 = 1819;
+export let a1820 = 1820;
+export let a1821 = 1821;
+export let a1822 = 1822;
+export let a1823 = 1823;
+export let a1824 = 1824;
+export let a1825 = 1825;
+export let a1826 = 1826;
+export let a1827 = 1827;
+export let a1828 = 1828;
+export let a1829 = 1829;
+export let a1830 = 1830;
+export let a1831 = 1831;
+export let a1832 = 1832;
+export let a1833 = 1833;
+export let a1834 = 1834;
+export let a1835 = 1835;
+export let a1836 = 1836;
+export let a1837 = 1837;
+export let a1838 = 1838;
+export let a1839 = 1839;
+export let a1840 = 1840;
+export let a1841 = 1841;
+export let a1842 = 1842;
+export let a1843 = 1843;
+export let a1844 = 1844;
+export let a1845 = 1845;
+export let a1846 = 1846;
+export let a1847 = 1847;
+export let a1848 = 1848;
+export let a1849 = 1849;
+export let a1850 = 1850;
+export let a1851 = 1851;
+export let a1852 = 1852;
+export let a1853 = 1853;
+export let a1854 = 1854;
+export let a1855 = 1855;
+export let a1856 = 1856;
+export let a1857 = 1857;
+export let a1858 = 1858;
+export let a1859 = 1859;
+export let a1860 = 1860;
+export let a1861 = 1861;
+export let a1862 = 1862;
+export let a1863 = 1863;
+export let a1864 = 1864;
+export let a1865 = 1865;
+export let a1866 = 1866;
+export let a1867 = 1867;
+export let a1868 = 1868;
+export let a1869 = 1869;
+export let a1870 = 1870;
+export let a1871 = 1871;
+export let a1872 = 1872;
+export let a1873 = 1873;
+export let a1874 = 1874;
+export let a1875 = 1875;
+export let a1876 = 1876;
+export let a1877 = 1877;
+export let a1878 = 1878;
+export let a1879 = 1879;
+export let a1880 = 1880;
+export let a1881 = 1881;
+export let a1882 = 1882;
+export let a1883 = 1883;
+export let a1884 = 1884;
+export let a1885 = 1885;
+export let a1886 = 1886;
+export let a1887 = 1887;
+export let a1888 = 1888;
+export let a1889 = 1889;
+export let a1890 = 1890;
+export let a1891 = 1891;
+export let a1892 = 1892;
+export let a1893 = 1893;
+export let a1894 = 1894;
+export let a1895 = 1895;
+export let a1896 = 1896;
+export let a1897 = 1897;
+export let a1898 = 1898;
+export let a1899 = 1899;
+export let a1900 = 1900;
+export let a1901 = 1901;
+export let a1902 = 1902;
+export let a1903 = 1903;
+export let a1904 = 1904;
+export let a1905 = 1905;
+export let a1906 = 1906;
+export let a1907 = 1907;
+export let a1908 = 1908;
+export let a1909 = 1909;
+export let a1910 = 1910;
+export let a1911 = 1911;
+export let a1912 = 1912;
+export let a1913 = 1913;
+export let a1914 = 1914;
+export let a1915 = 1915;
+export let a1916 = 1916;
+export let a1917 = 1917;
+export let a1918 = 1918;
+export let a1919 = 1919;
+export let a1920 = 1920;
+export let a1921 = 1921;
+export let a1922 = 1922;
+export let a1923 = 1923;
+export let a1924 = 1924;
+export let a1925 = 1925;
+export let a1926 = 1926;
+export let a1927 = 1927;
+export let a1928 = 1928;
+export let a1929 = 1929;
+export let a1930 = 1930;
+export let a1931 = 1931;
+export let a1932 = 1932;
+export let a1933 = 1933;
+export let a1934 = 1934;
+export let a1935 = 1935;
+export let a1936 = 1936;
+export let a1937 = 1937;
+export let a1938 = 1938;
+export let a1939 = 1939;
+export let a1940 = 1940;
+export let a1941 = 1941;
+export let a1942 = 1942;
+export let a1943 = 1943;
+export let a1944 = 1944;
+export let a1945 = 1945;
+export let a1946 = 1946;
+export let a1947 = 1947;
+export let a1948 = 1948;
+export let a1949 = 1949;
+export let a1950 = 1950;
+export let a1951 = 1951;
+export let a1952 = 1952;
+export let a1953 = 1953;
+export let a1954 = 1954;
+export let a1955 = 1955;
+export let a1956 = 1956;
+export let a1957 = 1957;
+export let a1958 = 1958;
+export let a1959 = 1959;
+export let a1960 = 1960;
+export let a1961 = 1961;
+export let a1962 = 1962;
+export let a1963 = 1963;
+export let a1964 = 1964;
+export let a1965 = 1965;
+export let a1966 = 1966;
+export let a1967 = 1967;
+export let a1968 = 1968;
+export let a1969 = 1969;
+export let a1970 = 1970;
+export let a1971 = 1971;
+export let a1972 = 1972;
+export let a1973 = 1973;
+export let a1974 = 1974;
+export let a1975 = 1975;
+export let a1976 = 1976;
+export let a1977 = 1977;
+export let a1978 = 1978;
+export let a1979 = 1979;
+export let a1980 = 1980;
+export let a1981 = 1981;
+export let a1982 = 1982;
+export let a1983 = 1983;
+export let a1984 = 1984;
+export let a1985 = 1985;
+export let a1986 = 1986;
+export let a1987 = 1987;
+export let a1988 = 1988;
+export let a1989 = 1989;
+export let a1990 = 1990;
+export let a1991 = 1991;
+export let a1992 = 1992;
+export let a1993 = 1993;
+export let a1994 = 1994;
+export let a1995 = 1995;
+export let a1996 = 1996;
+export let a1997 = 1997;
+export let a1998 = 1998;
+export let a1999 = 1999;
+export let a2000 = 2000;
+export let a2001 = 2001;
+export let a2002 = 2002;
+export let a2003 = 2003;
+export let a2004 = 2004;
+export let a2005 = 2005;
+export let a2006 = 2006;
+export let a2007 = 2007;
+export let a2008 = 2008;
+export let a2009 = 2009;
+export let a2010 = 2010;
+export let a2011 = 2011;
+export let a2012 = 2012;
+export let a2013 = 2013;
+export let a2014 = 2014;
+export let a2015 = 2015;
+export let a2016 = 2016;
+export let a2017 = 2017;
+export let a2018 = 2018;
+export let a2019 = 2019;
+export let a2020 = 2020;
+export let a2021 = 2021;
+export let a2022 = 2022;
+export let a2023 = 2023;
+export let a2024 = 2024;
+export let a2025 = 2025;
+export let a2026 = 2026;
+export let a2027 = 2027;
+export let a2028 = 2028;
+export let a2029 = 2029;
+export let a2030 = 2030;
+export let a2031 = 2031;
+export let a2032 = 2032;
+export let a2033 = 2033;
+export let a2034 = 2034;
+export let a2035 = 2035;
+export let a2036 = 2036;
+export let a2037 = 2037;
+export let a2038 = 2038;
+export let a2039 = 2039;
+export let a2040 = 2040;
+export let a2041 = 2041;
+export let a2042 = 2042;
+export let a2043 = 2043;
+export let a2044 = 2044;
+export let a2045 = 2045;
+export let a2046 = 2046;
+export let a2047 = 2047;
+export let a2048 = 2048;
+export let a2049 = 2049;
+export let a2050 = 2050;
+export let a2051 = 2051;
+export let a2052 = 2052;
+export let a2053 = 2053;
+export let a2054 = 2054;
+export let a2055 = 2055;
+export let a2056 = 2056;
+export let a2057 = 2057;
+export let a2058 = 2058;
+export let a2059 = 2059;
+export let a2060 = 2060;
+export let a2061 = 2061;
+export let a2062 = 2062;
+export let a2063 = 2063;
+export let a2064 = 2064;
+export let a2065 = 2065;
+export let a2066 = 2066;
+export let a2067 = 2067;
+export let a2068 = 2068;
+export let a2069 = 2069;
+export let a2070 = 2070;
+export let a2071 = 2071;
+export let a2072 = 2072;
+export let a2073 = 2073;
+export let a2074 = 2074;
+export let a2075 = 2075;
+export let a2076 = 2076;
+export let a2077 = 2077;
+export let a2078 = 2078;
+export let a2079 = 2079;
+export let a2080 = 2080;
+export let a2081 = 2081;
+export let a2082 = 2082;
+export let a2083 = 2083;
+export let a2084 = 2084;
+export let a2085 = 2085;
+export let a2086 = 2086;
+export let a2087 = 2087;
+export let a2088 = 2088;
+export let a2089 = 2089;
+export let a2090 = 2090;
+export let a2091 = 2091;
+export let a2092 = 2092;
+export let a2093 = 2093;
+export let a2094 = 2094;
+export let a2095 = 2095;
+export let a2096 = 2096;
+export let a2097 = 2097;
+export let a2098 = 2098;
+export let a2099 = 2099;
+export let a2100 = 2100;
+export let a2101 = 2101;
+export let a2102 = 2102;
+export let a2103 = 2103;
+export let a2104 = 2104;
+export let a2105 = 2105;
+export let a2106 = 2106;
+export let a2107 = 2107;
+export let a2108 = 2108;
+export let a2109 = 2109;
+export let a2110 = 2110;
+export let a2111 = 2111;
+export let a2112 = 2112;
+export let a2113 = 2113;
+export let a2114 = 2114;
+export let a2115 = 2115;
+export let a2116 = 2116;
+export let a2117 = 2117;
+export let a2118 = 2118;
+export let a2119 = 2119;
+export let a2120 = 2120;
+export let a2121 = 2121;
+export let a2122 = 2122;
+export let a2123 = 2123;
+export let a2124 = 2124;
+export let a2125 = 2125;
+export let a2126 = 2126;
+export let a2127 = 2127;
+export let a2128 = 2128;
+export let a2129 = 2129;
+export let a2130 = 2130;
+export let a2131 = 2131;
+export let a2132 = 2132;
+export let a2133 = 2133;
+export let a2134 = 2134;
+export let a2135 = 2135;
+export let a2136 = 2136;
+export let a2137 = 2137;
+export let a2138 = 2138;
+export let a2139 = 2139;
+export let a2140 = 2140;
+export let a2141 = 2141;
+export let a2142 = 2142;
+export let a2143 = 2143;
+export let a2144 = 2144;
+export let a2145 = 2145;
+export let a2146 = 2146;
+export let a2147 = 2147;
+export let a2148 = 2148;
+export let a2149 = 2149;
+export let a2150 = 2150;
+export let a2151 = 2151;
+export let a2152 = 2152;
+export let a2153 = 2153;
+export let a2154 = 2154;
+export let a2155 = 2155;
+export let a2156 = 2156;
+export let a2157 = 2157;
+export let a2158 = 2158;
+export let a2159 = 2159;
+export let a2160 = 2160;
+export let a2161 = 2161;
+export let a2162 = 2162;
+export let a2163 = 2163;
+export let a2164 = 2164;
+export let a2165 = 2165;
+export let a2166 = 2166;
+export let a2167 = 2167;
+export let a2168 = 2168;
+export let a2169 = 2169;
+export let a2170 = 2170;
+export let a2171 = 2171;
+export let a2172 = 2172;
+export let a2173 = 2173;
+export let a2174 = 2174;
+export let a2175 = 2175;
+export let a2176 = 2176;
+export let a2177 = 2177;
+export let a2178 = 2178;
+export let a2179 = 2179;
+export let a2180 = 2180;
+export let a2181 = 2181;
+export let a2182 = 2182;
+export let a2183 = 2183;
+export let a2184 = 2184;
+export let a2185 = 2185;
+export let a2186 = 2186;
+export let a2187 = 2187;
+export let a2188 = 2188;
+export let a2189 = 2189;
+export let a2190 = 2190;
+export let a2191 = 2191;
+export let a2192 = 2192;
+export let a2193 = 2193;
+export let a2194 = 2194;
+export let a2195 = 2195;
+export let a2196 = 2196;
+export let a2197 = 2197;
+export let a2198 = 2198;
+export let a2199 = 2199;
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-large2.js b/deps/v8/test/mjsunit/harmony/modules-skip-large2.js
new file mode 100644
index 0000000000..2c03557197
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-large2.js
@@ -0,0 +1,1104 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export let a0 = 0;
+export let a1 = 1;
+export let a2 = 2;
+export let a3 = 3;
+export let a4 = 4;
+export let a5 = 5;
+export let a6 = 6;
+export let a7 = 7;
+export let a8 = 8;
+export let a9 = 9;
+export let a10 = 10;
+export let a11 = 11;
+export let a12 = 12;
+export let a13 = 13;
+export let a14 = 14;
+export let a15 = 15;
+export let a16 = 16;
+export let a17 = 17;
+export let a18 = 18;
+export let a19 = 19;
+export let a20 = 20;
+export let a21 = 21;
+export let a22 = 22;
+export let a23 = 23;
+export let a24 = 24;
+export let a25 = 25;
+export let a26 = 26;
+export let a27 = 27;
+export let a28 = 28;
+export let a29 = 29;
+export let a30 = 30;
+export let a31 = 31;
+export let a32 = 32;
+export let a33 = 33;
+export let a34 = 34;
+export let a35 = 35;
+export let a36 = 36;
+export let a37 = 37;
+export let a38 = 38;
+export let a39 = 39;
+export let a40 = 40;
+export let a41 = 41;
+export let a42 = 42;
+export let a43 = 43;
+export let a44 = 44;
+export let a45 = 45;
+export let a46 = 46;
+export let a47 = 47;
+export let a48 = 48;
+export let a49 = 49;
+export let a50 = 50;
+export let a51 = 51;
+export let a52 = 52;
+export let a53 = 53;
+export let a54 = 54;
+export let a55 = 55;
+export let a56 = 56;
+export let a57 = 57;
+export let a58 = 58;
+export let a59 = 59;
+export let a60 = 60;
+export let a61 = 61;
+export let a62 = 62;
+export let a63 = 63;
+export let a64 = 64;
+export let a65 = 65;
+export let a66 = 66;
+export let a67 = 67;
+export let a68 = 68;
+export let a69 = 69;
+export let a70 = 70;
+export let a71 = 71;
+export let a72 = 72;
+export let a73 = 73;
+export let a74 = 74;
+export let a75 = 75;
+export let a76 = 76;
+export let a77 = 77;
+export let a78 = 78;
+export let a79 = 79;
+export let a80 = 80;
+export let a81 = 81;
+export let a82 = 82;
+export let a83 = 83;
+export let a84 = 84;
+export let a85 = 85;
+export let a86 = 86;
+export let a87 = 87;
+export let a88 = 88;
+export let a89 = 89;
+export let a90 = 90;
+export let a91 = 91;
+export let a92 = 92;
+export let a93 = 93;
+export let a94 = 94;
+export let a95 = 95;
+export let a96 = 96;
+export let a97 = 97;
+export let a98 = 98;
+export let a99 = 99;
+export let a100 = 100;
+export let a101 = 101;
+export let a102 = 102;
+export let a103 = 103;
+export let a104 = 104;
+export let a105 = 105;
+export let a106 = 106;
+export let a107 = 107;
+export let a108 = 108;
+export let a109 = 109;
+export let a110 = 110;
+export let a111 = 111;
+export let a112 = 112;
+export let a113 = 113;
+export let a114 = 114;
+export let a115 = 115;
+export let a116 = 116;
+export let a117 = 117;
+export let a118 = 118;
+export let a119 = 119;
+export let a120 = 120;
+export let a121 = 121;
+export let a122 = 122;
+export let a123 = 123;
+export let a124 = 124;
+export let a125 = 125;
+export let a126 = 126;
+export let a127 = 127;
+export let a128 = 128;
+export let a129 = 129;
+export let a130 = 130;
+export let a131 = 131;
+export let a132 = 132;
+export let a133 = 133;
+export let a134 = 134;
+export let a135 = 135;
+export let a136 = 136;
+export let a137 = 137;
+export let a138 = 138;
+export let a139 = 139;
+export let a140 = 140;
+export let a141 = 141;
+export let a142 = 142;
+export let a143 = 143;
+export let a144 = 144;
+export let a145 = 145;
+export let a146 = 146;
+export let a147 = 147;
+export let a148 = 148;
+export let a149 = 149;
+export let a150 = 150;
+export let a151 = 151;
+export let a152 = 152;
+export let a153 = 153;
+export let a154 = 154;
+export let a155 = 155;
+export let a156 = 156;
+export let a157 = 157;
+export let a158 = 158;
+export let a159 = 159;
+export let a160 = 160;
+export let a161 = 161;
+export let a162 = 162;
+export let a163 = 163;
+export let a164 = 164;
+export let a165 = 165;
+export let a166 = 166;
+export let a167 = 167;
+export let a168 = 168;
+export let a169 = 169;
+export let a170 = 170;
+export let a171 = 171;
+export let a172 = 172;
+export let a173 = 173;
+export let a174 = 174;
+export let a175 = 175;
+export let a176 = 176;
+export let a177 = 177;
+export let a178 = 178;
+export let a179 = 179;
+export let a180 = 180;
+export let a181 = 181;
+export let a182 = 182;
+export let a183 = 183;
+export let a184 = 184;
+export let a185 = 185;
+export let a186 = 186;
+export let a187 = 187;
+export let a188 = 188;
+export let a189 = 189;
+export let a190 = 190;
+export let a191 = 191;
+export let a192 = 192;
+export let a193 = 193;
+export let a194 = 194;
+export let a195 = 195;
+export let a196 = 196;
+export let a197 = 197;
+export let a198 = 198;
+export let a199 = 199;
+export let a200 = 200;
+export let a201 = 201;
+export let a202 = 202;
+export let a203 = 203;
+export let a204 = 204;
+export let a205 = 205;
+export let a206 = 206;
+export let a207 = 207;
+export let a208 = 208;
+export let a209 = 209;
+export let a210 = 210;
+export let a211 = 211;
+export let a212 = 212;
+export let a213 = 213;
+export let a214 = 214;
+export let a215 = 215;
+export let a216 = 216;
+export let a217 = 217;
+export let a218 = 218;
+export let a219 = 219;
+export let a220 = 220;
+export let a221 = 221;
+export let a222 = 222;
+export let a223 = 223;
+export let a224 = 224;
+export let a225 = 225;
+export let a226 = 226;
+export let a227 = 227;
+export let a228 = 228;
+export let a229 = 229;
+export let a230 = 230;
+export let a231 = 231;
+export let a232 = 232;
+export let a233 = 233;
+export let a234 = 234;
+export let a235 = 235;
+export let a236 = 236;
+export let a237 = 237;
+export let a238 = 238;
+export let a239 = 239;
+export let a240 = 240;
+export let a241 = 241;
+export let a242 = 242;
+export let a243 = 243;
+export let a244 = 244;
+export let a245 = 245;
+export let a246 = 246;
+export let a247 = 247;
+export let a248 = 248;
+export let a249 = 249;
+export let a250 = 250;
+export let a251 = 251;
+export let a252 = 252;
+export let a253 = 253;
+export let a254 = 254;
+export let a255 = 255;
+export let a256 = 256;
+export let a257 = 257;
+export let a258 = 258;
+export let a259 = 259;
+export let a260 = 260;
+export let a261 = 261;
+export let a262 = 262;
+export let a263 = 263;
+export let a264 = 264;
+export let a265 = 265;
+export let a266 = 266;
+export let a267 = 267;
+export let a268 = 268;
+export let a269 = 269;
+export let a270 = 270;
+export let a271 = 271;
+export let a272 = 272;
+export let a273 = 273;
+export let a274 = 274;
+export let a275 = 275;
+export let a276 = 276;
+export let a277 = 277;
+export let a278 = 278;
+export let a279 = 279;
+export let a280 = 280;
+export let a281 = 281;
+export let a282 = 282;
+export let a283 = 283;
+export let a284 = 284;
+export let a285 = 285;
+export let a286 = 286;
+export let a287 = 287;
+export let a288 = 288;
+export let a289 = 289;
+export let a290 = 290;
+export let a291 = 291;
+export let a292 = 292;
+export let a293 = 293;
+export let a294 = 294;
+export let a295 = 295;
+export let a296 = 296;
+export let a297 = 297;
+export let a298 = 298;
+export let a299 = 299;
+export let a300 = 300;
+export let a301 = 301;
+export let a302 = 302;
+export let a303 = 303;
+export let a304 = 304;
+export let a305 = 305;
+export let a306 = 306;
+export let a307 = 307;
+export let a308 = 308;
+export let a309 = 309;
+export let a310 = 310;
+export let a311 = 311;
+export let a312 = 312;
+export let a313 = 313;
+export let a314 = 314;
+export let a315 = 315;
+export let a316 = 316;
+export let a317 = 317;
+export let a318 = 318;
+export let a319 = 319;
+export let a320 = 320;
+export let a321 = 321;
+export let a322 = 322;
+export let a323 = 323;
+export let a324 = 324;
+export let a325 = 325;
+export let a326 = 326;
+export let a327 = 327;
+export let a328 = 328;
+export let a329 = 329;
+export let a330 = 330;
+export let a331 = 331;
+export let a332 = 332;
+export let a333 = 333;
+export let a334 = 334;
+export let a335 = 335;
+export let a336 = 336;
+export let a337 = 337;
+export let a338 = 338;
+export let a339 = 339;
+export let a340 = 340;
+export let a341 = 341;
+export let a342 = 342;
+export let a343 = 343;
+export let a344 = 344;
+export let a345 = 345;
+export let a346 = 346;
+export let a347 = 347;
+export let a348 = 348;
+export let a349 = 349;
+export let a350 = 350;
+export let a351 = 351;
+export let a352 = 352;
+export let a353 = 353;
+export let a354 = 354;
+export let a355 = 355;
+export let a356 = 356;
+export let a357 = 357;
+export let a358 = 358;
+export let a359 = 359;
+export let a360 = 360;
+export let a361 = 361;
+export let a362 = 362;
+export let a363 = 363;
+export let a364 = 364;
+export let a365 = 365;
+export let a366 = 366;
+export let a367 = 367;
+export let a368 = 368;
+export let a369 = 369;
+export let a370 = 370;
+export let a371 = 371;
+export let a372 = 372;
+export let a373 = 373;
+export let a374 = 374;
+export let a375 = 375;
+export let a376 = 376;
+export let a377 = 377;
+export let a378 = 378;
+export let a379 = 379;
+export let a380 = 380;
+export let a381 = 381;
+export let a382 = 382;
+export let a383 = 383;
+export let a384 = 384;
+export let a385 = 385;
+export let a386 = 386;
+export let a387 = 387;
+export let a388 = 388;
+export let a389 = 389;
+export let a390 = 390;
+export let a391 = 391;
+export let a392 = 392;
+export let a393 = 393;
+export let a394 = 394;
+export let a395 = 395;
+export let a396 = 396;
+export let a397 = 397;
+export let a398 = 398;
+export let a399 = 399;
+export let a400 = 400;
+export let a401 = 401;
+export let a402 = 402;
+export let a403 = 403;
+export let a404 = 404;
+export let a405 = 405;
+export let a406 = 406;
+export let a407 = 407;
+export let a408 = 408;
+export let a409 = 409;
+export let a410 = 410;
+export let a411 = 411;
+export let a412 = 412;
+export let a413 = 413;
+export let a414 = 414;
+export let a415 = 415;
+export let a416 = 416;
+export let a417 = 417;
+export let a418 = 418;
+export let a419 = 419;
+export let a420 = 420;
+export let a421 = 421;
+export let a422 = 422;
+export let a423 = 423;
+export let a424 = 424;
+export let a425 = 425;
+export let a426 = 426;
+export let a427 = 427;
+export let a428 = 428;
+export let a429 = 429;
+export let a430 = 430;
+export let a431 = 431;
+export let a432 = 432;
+export let a433 = 433;
+export let a434 = 434;
+export let a435 = 435;
+export let a436 = 436;
+export let a437 = 437;
+export let a438 = 438;
+export let a439 = 439;
+export let a440 = 440;
+export let a441 = 441;
+export let a442 = 442;
+export let a443 = 443;
+export let a444 = 444;
+export let a445 = 445;
+export let a446 = 446;
+export let a447 = 447;
+export let a448 = 448;
+export let a449 = 449;
+export let a450 = 450;
+export let a451 = 451;
+export let a452 = 452;
+export let a453 = 453;
+export let a454 = 454;
+export let a455 = 455;
+export let a456 = 456;
+export let a457 = 457;
+export let a458 = 458;
+export let a459 = 459;
+export let a460 = 460;
+export let a461 = 461;
+export let a462 = 462;
+export let a463 = 463;
+export let a464 = 464;
+export let a465 = 465;
+export let a466 = 466;
+export let a467 = 467;
+export let a468 = 468;
+export let a469 = 469;
+export let a470 = 470;
+export let a471 = 471;
+export let a472 = 472;
+export let a473 = 473;
+export let a474 = 474;
+export let a475 = 475;
+export let a476 = 476;
+export let a477 = 477;
+export let a478 = 478;
+export let a479 = 479;
+export let a480 = 480;
+export let a481 = 481;
+export let a482 = 482;
+export let a483 = 483;
+export let a484 = 484;
+export let a485 = 485;
+export let a486 = 486;
+export let a487 = 487;
+export let a488 = 488;
+export let a489 = 489;
+export let a490 = 490;
+export let a491 = 491;
+export let a492 = 492;
+export let a493 = 493;
+export let a494 = 494;
+export let a495 = 495;
+export let a496 = 496;
+export let a497 = 497;
+export let a498 = 498;
+export let a499 = 499;
+export let a500 = 500;
+export let a501 = 501;
+export let a502 = 502;
+export let a503 = 503;
+export let a504 = 504;
+export let a505 = 505;
+export let a506 = 506;
+export let a507 = 507;
+export let a508 = 508;
+export let a509 = 509;
+export let a510 = 510;
+export let a511 = 511;
+export let a512 = 512;
+export let a513 = 513;
+export let a514 = 514;
+export let a515 = 515;
+export let a516 = 516;
+export let a517 = 517;
+export let a518 = 518;
+export let a519 = 519;
+export let a520 = 520;
+export let a521 = 521;
+export let a522 = 522;
+export let a523 = 523;
+export let a524 = 524;
+export let a525 = 525;
+export let a526 = 526;
+export let a527 = 527;
+export let a528 = 528;
+export let a529 = 529;
+export let a530 = 530;
+export let a531 = 531;
+export let a532 = 532;
+export let a533 = 533;
+export let a534 = 534;
+export let a535 = 535;
+export let a536 = 536;
+export let a537 = 537;
+export let a538 = 538;
+export let a539 = 539;
+export let a540 = 540;
+export let a541 = 541;
+export let a542 = 542;
+export let a543 = 543;
+export let a544 = 544;
+export let a545 = 545;
+export let a546 = 546;
+export let a547 = 547;
+export let a548 = 548;
+export let a549 = 549;
+export let a550 = 550;
+export let a551 = 551;
+export let a552 = 552;
+export let a553 = 553;
+export let a554 = 554;
+export let a555 = 555;
+export let a556 = 556;
+export let a557 = 557;
+export let a558 = 558;
+export let a559 = 559;
+export let a560 = 560;
+export let a561 = 561;
+export let a562 = 562;
+export let a563 = 563;
+export let a564 = 564;
+export let a565 = 565;
+export let a566 = 566;
+export let a567 = 567;
+export let a568 = 568;
+export let a569 = 569;
+export let a570 = 570;
+export let a571 = 571;
+export let a572 = 572;
+export let a573 = 573;
+export let a574 = 574;
+export let a575 = 575;
+export let a576 = 576;
+export let a577 = 577;
+export let a578 = 578;
+export let a579 = 579;
+export let a580 = 580;
+export let a581 = 581;
+export let a582 = 582;
+export let a583 = 583;
+export let a584 = 584;
+export let a585 = 585;
+export let a586 = 586;
+export let a587 = 587;
+export let a588 = 588;
+export let a589 = 589;
+export let a590 = 590;
+export let a591 = 591;
+export let a592 = 592;
+export let a593 = 593;
+export let a594 = 594;
+export let a595 = 595;
+export let a596 = 596;
+export let a597 = 597;
+export let a598 = 598;
+export let a599 = 599;
+export let a600 = 600;
+export let a601 = 601;
+export let a602 = 602;
+export let a603 = 603;
+export let a604 = 604;
+export let a605 = 605;
+export let a606 = 606;
+export let a607 = 607;
+export let a608 = 608;
+export let a609 = 609;
+export let a610 = 610;
+export let a611 = 611;
+export let a612 = 612;
+export let a613 = 613;
+export let a614 = 614;
+export let a615 = 615;
+export let a616 = 616;
+export let a617 = 617;
+export let a618 = 618;
+export let a619 = 619;
+export let a620 = 620;
+export let a621 = 621;
+export let a622 = 622;
+export let a623 = 623;
+export let a624 = 624;
+export let a625 = 625;
+export let a626 = 626;
+export let a627 = 627;
+export let a628 = 628;
+export let a629 = 629;
+export let a630 = 630;
+export let a631 = 631;
+export let a632 = 632;
+export let a633 = 633;
+export let a634 = 634;
+export let a635 = 635;
+export let a636 = 636;
+export let a637 = 637;
+export let a638 = 638;
+export let a639 = 639;
+export let a640 = 640;
+export let a641 = 641;
+export let a642 = 642;
+export let a643 = 643;
+export let a644 = 644;
+export let a645 = 645;
+export let a646 = 646;
+export let a647 = 647;
+export let a648 = 648;
+export let a649 = 649;
+export let a650 = 650;
+export let a651 = 651;
+export let a652 = 652;
+export let a653 = 653;
+export let a654 = 654;
+export let a655 = 655;
+export let a656 = 656;
+export let a657 = 657;
+export let a658 = 658;
+export let a659 = 659;
+export let a660 = 660;
+export let a661 = 661;
+export let a662 = 662;
+export let a663 = 663;
+export let a664 = 664;
+export let a665 = 665;
+export let a666 = 666;
+export let a667 = 667;
+export let a668 = 668;
+export let a669 = 669;
+export let a670 = 670;
+export let a671 = 671;
+export let a672 = 672;
+export let a673 = 673;
+export let a674 = 674;
+export let a675 = 675;
+export let a676 = 676;
+export let a677 = 677;
+export let a678 = 678;
+export let a679 = 679;
+export let a680 = 680;
+export let a681 = 681;
+export let a682 = 682;
+export let a683 = 683;
+export let a684 = 684;
+export let a685 = 685;
+export let a686 = 686;
+export let a687 = 687;
+export let a688 = 688;
+export let a689 = 689;
+export let a690 = 690;
+export let a691 = 691;
+export let a692 = 692;
+export let a693 = 693;
+export let a694 = 694;
+export let a695 = 695;
+export let a696 = 696;
+export let a697 = 697;
+export let a698 = 698;
+export let a699 = 699;
+export let a700 = 700;
+export let a701 = 701;
+export let a702 = 702;
+export let a703 = 703;
+export let a704 = 704;
+export let a705 = 705;
+export let a706 = 706;
+export let a707 = 707;
+export let a708 = 708;
+export let a709 = 709;
+export let a710 = 710;
+export let a711 = 711;
+export let a712 = 712;
+export let a713 = 713;
+export let a714 = 714;
+export let a715 = 715;
+export let a716 = 716;
+export let a717 = 717;
+export let a718 = 718;
+export let a719 = 719;
+export let a720 = 720;
+export let a721 = 721;
+export let a722 = 722;
+export let a723 = 723;
+export let a724 = 724;
+export let a725 = 725;
+export let a726 = 726;
+export let a727 = 727;
+export let a728 = 728;
+export let a729 = 729;
+export let a730 = 730;
+export let a731 = 731;
+export let a732 = 732;
+export let a733 = 733;
+export let a734 = 734;
+export let a735 = 735;
+export let a736 = 736;
+export let a737 = 737;
+export let a738 = 738;
+export let a739 = 739;
+export let a740 = 740;
+export let a741 = 741;
+export let a742 = 742;
+export let a743 = 743;
+export let a744 = 744;
+export let a745 = 745;
+export let a746 = 746;
+export let a747 = 747;
+export let a748 = 748;
+export let a749 = 749;
+export let a750 = 750;
+export let a751 = 751;
+export let a752 = 752;
+export let a753 = 753;
+export let a754 = 754;
+export let a755 = 755;
+export let a756 = 756;
+export let a757 = 757;
+export let a758 = 758;
+export let a759 = 759;
+export let a760 = 760;
+export let a761 = 761;
+export let a762 = 762;
+export let a763 = 763;
+export let a764 = 764;
+export let a765 = 765;
+export let a766 = 766;
+export let a767 = 767;
+export let a768 = 768;
+export let a769 = 769;
+export let a770 = 770;
+export let a771 = 771;
+export let a772 = 772;
+export let a773 = 773;
+export let a774 = 774;
+export let a775 = 775;
+export let a776 = 776;
+export let a777 = 777;
+export let a778 = 778;
+export let a779 = 779;
+export let a780 = 780;
+export let a781 = 781;
+export let a782 = 782;
+export let a783 = 783;
+export let a784 = 784;
+export let a785 = 785;
+export let a786 = 786;
+export let a787 = 787;
+export let a788 = 788;
+export let a789 = 789;
+export let a790 = 790;
+export let a791 = 791;
+export let a792 = 792;
+export let a793 = 793;
+export let a794 = 794;
+export let a795 = 795;
+export let a796 = 796;
+export let a797 = 797;
+export let a798 = 798;
+export let a799 = 799;
+export let a800 = 800;
+export let a801 = 801;
+export let a802 = 802;
+export let a803 = 803;
+export let a804 = 804;
+export let a805 = 805;
+export let a806 = 806;
+export let a807 = 807;
+export let a808 = 808;
+export let a809 = 809;
+export let a810 = 810;
+export let a811 = 811;
+export let a812 = 812;
+export let a813 = 813;
+export let a814 = 814;
+export let a815 = 815;
+export let a816 = 816;
+export let a817 = 817;
+export let a818 = 818;
+export let a819 = 819;
+export let a820 = 820;
+export let a821 = 821;
+export let a822 = 822;
+export let a823 = 823;
+export let a824 = 824;
+export let a825 = 825;
+export let a826 = 826;
+export let a827 = 827;
+export let a828 = 828;
+export let a829 = 829;
+export let a830 = 830;
+export let a831 = 831;
+export let a832 = 832;
+export let a833 = 833;
+export let a834 = 834;
+export let a835 = 835;
+export let a836 = 836;
+export let a837 = 837;
+export let a838 = 838;
+export let a839 = 839;
+export let a840 = 840;
+export let a841 = 841;
+export let a842 = 842;
+export let a843 = 843;
+export let a844 = 844;
+export let a845 = 845;
+export let a846 = 846;
+export let a847 = 847;
+export let a848 = 848;
+export let a849 = 849;
+export let a850 = 850;
+export let a851 = 851;
+export let a852 = 852;
+export let a853 = 853;
+export let a854 = 854;
+export let a855 = 855;
+export let a856 = 856;
+export let a857 = 857;
+export let a858 = 858;
+export let a859 = 859;
+export let a860 = 860;
+export let a861 = 861;
+export let a862 = 862;
+export let a863 = 863;
+export let a864 = 864;
+export let a865 = 865;
+export let a866 = 866;
+export let a867 = 867;
+export let a868 = 868;
+export let a869 = 869;
+export let a870 = 870;
+export let a871 = 871;
+export let a872 = 872;
+export let a873 = 873;
+export let a874 = 874;
+export let a875 = 875;
+export let a876 = 876;
+export let a877 = 877;
+export let a878 = 878;
+export let a879 = 879;
+export let a880 = 880;
+export let a881 = 881;
+export let a882 = 882;
+export let a883 = 883;
+export let a884 = 884;
+export let a885 = 885;
+export let a886 = 886;
+export let a887 = 887;
+export let a888 = 888;
+export let a889 = 889;
+export let a890 = 890;
+export let a891 = 891;
+export let a892 = 892;
+export let a893 = 893;
+export let a894 = 894;
+export let a895 = 895;
+export let a896 = 896;
+export let a897 = 897;
+export let a898 = 898;
+export let a899 = 899;
+export let a900 = 900;
+export let a901 = 901;
+export let a902 = 902;
+export let a903 = 903;
+export let a904 = 904;
+export let a905 = 905;
+export let a906 = 906;
+export let a907 = 907;
+export let a908 = 908;
+export let a909 = 909;
+export let a910 = 910;
+export let a911 = 911;
+export let a912 = 912;
+export let a913 = 913;
+export let a914 = 914;
+export let a915 = 915;
+export let a916 = 916;
+export let a917 = 917;
+export let a918 = 918;
+export let a919 = 919;
+export let a920 = 920;
+export let a921 = 921;
+export let a922 = 922;
+export let a923 = 923;
+export let a924 = 924;
+export let a925 = 925;
+export let a926 = 926;
+export let a927 = 927;
+export let a928 = 928;
+export let a929 = 929;
+export let a930 = 930;
+export let a931 = 931;
+export let a932 = 932;
+export let a933 = 933;
+export let a934 = 934;
+export let a935 = 935;
+export let a936 = 936;
+export let a937 = 937;
+export let a938 = 938;
+export let a939 = 939;
+export let a940 = 940;
+export let a941 = 941;
+export let a942 = 942;
+export let a943 = 943;
+export let a944 = 944;
+export let a945 = 945;
+export let a946 = 946;
+export let a947 = 947;
+export let a948 = 948;
+export let a949 = 949;
+export let a950 = 950;
+export let a951 = 951;
+export let a952 = 952;
+export let a953 = 953;
+export let a954 = 954;
+export let a955 = 955;
+export let a956 = 956;
+export let a957 = 957;
+export let a958 = 958;
+export let a959 = 959;
+export let a960 = 960;
+export let a961 = 961;
+export let a962 = 962;
+export let a963 = 963;
+export let a964 = 964;
+export let a965 = 965;
+export let a966 = 966;
+export let a967 = 967;
+export let a968 = 968;
+export let a969 = 969;
+export let a970 = 970;
+export let a971 = 971;
+export let a972 = 972;
+export let a973 = 973;
+export let a974 = 974;
+export let a975 = 975;
+export let a976 = 976;
+export let a977 = 977;
+export let a978 = 978;
+export let a979 = 979;
+export let a980 = 980;
+export let a981 = 981;
+export let a982 = 982;
+export let a983 = 983;
+export let a984 = 984;
+export let a985 = 985;
+export let a986 = 986;
+export let a987 = 987;
+export let a988 = 988;
+export let a989 = 989;
+export let a990 = 990;
+export let a991 = 991;
+export let a992 = 992;
+export let a993 = 993;
+export let a994 = 994;
+export let a995 = 995;
+export let a996 = 996;
+export let a997 = 997;
+export let a998 = 998;
+export let a999 = 999;
+export let a1000 = 1000;
+export let a1001 = 1001;
+export let a1002 = 1002;
+export let a1003 = 1003;
+export let a1004 = 1004;
+export let a1005 = 1005;
+export let a1006 = 1006;
+export let a1007 = 1007;
+export let a1008 = 1008;
+export let a1009 = 1009;
+export let a1010 = 1010;
+export let a1011 = 1011;
+export let a1012 = 1012;
+export let a1013 = 1013;
+export let a1014 = 1014;
+export let a1015 = 1015;
+export let a1016 = 1016;
+export let a1017 = 1017;
+export let a1018 = 1018;
+export let a1019 = 1019;
+export let a1020 = 1020;
+export let a1021 = 1021;
+export let a1022 = 1022;
+export let a1023 = 1023;
+export let a1024 = 1024;
+export let a1025 = 1025;
+export let a1026 = 1026;
+export let a1027 = 1027;
+export let a1028 = 1028;
+export let a1029 = 1029;
+export let a1030 = 1030;
+export let a1031 = 1031;
+export let a1032 = 1032;
+export let a1033 = 1033;
+export let a1034 = 1034;
+export let a1035 = 1035;
+export let a1036 = 1036;
+export let a1037 = 1037;
+export let a1038 = 1038;
+export let a1039 = 1039;
+export let a1040 = 1040;
+export let a1041 = 1041;
+export let a1042 = 1042;
+export let a1043 = 1043;
+export let a1044 = 1044;
+export let a1045 = 1045;
+export let a1046 = 1046;
+export let a1047 = 1047;
+export let a1048 = 1048;
+export let a1049 = 1049;
+export let a1050 = 1050;
+export let a1051 = 1051;
+export let a1052 = 1052;
+export let a1053 = 1053;
+export let a1054 = 1054;
+export let a1055 = 1055;
+export let a1056 = 1056;
+export let a1057 = 1057;
+export let a1058 = 1058;
+export let a1059 = 1059;
+export let a1060 = 1060;
+export let a1061 = 1061;
+export let a1062 = 1062;
+export let a1063 = 1063;
+export let a1064 = 1064;
+export let a1065 = 1065;
+export let a1066 = 1066;
+export let a1067 = 1067;
+export let a1068 = 1068;
+export let a1069 = 1069;
+export let a1070 = 1070;
+export let a1071 = 1071;
+export let a1072 = 1072;
+export let a1073 = 1073;
+export let a1074 = 1074;
+export let a1075 = 1075;
+export let a1076 = 1076;
+export let a1077 = 1077;
+export let a1078 = 1078;
+export let a1079 = 1079;
+export let a1080 = 1080;
+export let a1081 = 1081;
+export let a1082 = 1082;
+export let a1083 = 1083;
+export let a1084 = 1084;
+export let a1085 = 1085;
+export let a1086 = 1086;
+export let a1087 = 1087;
+export let a1088 = 1088;
+export let a1089 = 1089;
+export let a1090 = 1090;
+export let a1091 = 1091;
+export let a1092 = 1092;
+export let a1093 = 1093;
+export let a1094 = 1094;
+export let a1095 = 1095;
+export let a1096 = 1096;
+export let a1097 = 1097;
+export let a1098 = 1098;
+export let a1099 = 1099;
diff --git a/deps/v8/test/mjsunit/harmony/promise-prototype-finally.js b/deps/v8/test/mjsunit/harmony/promise-prototype-finally.js
index d80065f10d..3668ab5538 100644
--- a/deps/v8/test/mjsunit/harmony/promise-prototype-finally.js
+++ b/deps/v8/test/mjsunit/harmony/promise-prototype-finally.js
@@ -524,3 +524,84 @@ assertTrue(descriptor.configurable);
assertFalse(descriptor.enumerable);
assertEquals("finally", Promise.prototype.finally.name);
assertEquals(1, Promise.prototype.finally.length);
+
+var count = 0;
+class FooPromise extends Promise {
+ constructor(resolve, reject) {
+ count++;
+ return super(resolve, reject);
+ }
+}
+
+testAsync(assert => {
+ assert.plan(1);
+ count = 0;
+
+ new FooPromise(r => r()).finally(() => {}).then(() => {
+ assert.equals(6, count);
+ });
+}, "finally/speciesconstructor");
+
+testAsync(assert => {
+ assert.plan(1);
+ count = 0;
+
+ FooPromise.resolve().finally(() => {}).then(() => {
+ assert.equals(6, count);
+ })
+}, "resolve/finally/speciesconstructor");
+
+testAsync(assert => {
+ assert.plan(1);
+ count = 0;
+
+ FooPromise.reject().finally(() => {}).catch(() => {
+ assert.equals(6, count);
+ })
+}, "reject/finally/speciesconstructor");
+
+testAsync(assert => {
+ assert.plan(2);
+
+ class MyPromise extends Promise {
+ static get [Symbol.species]() { return Promise; }
+ }
+
+ var p = Promise
+ .resolve()
+ .finally(() => MyPromise.resolve());
+
+ assert.equals(true, p instanceof Promise);
+ assert.equals(false, p instanceof MyPromise);
+}, "finally/Symbol.Species");
+
+testAsync(assert => {
+ assert.plan(3);
+ let resolve;
+ let value = 0;
+
+ let p = new Promise(r => { resolve = r });
+
+ Promise.resolve()
+ .finally(() => {
+ return p;
+ })
+ .then(() => {
+ value = 1;
+ });
+
+ // This makes sure we take the fast path in PromiseResolve that just
+ // returns the promise it receives as value. If we had to create
+ // another wrapper promise, that would cause an additional tick in
+ // the microtask queue.
+ Promise.resolve()
+ // onFinally has run.
+ .then(() => { resolve(); })
+ // thenFinally has run.
+ .then(() => assert.equals(0, value))
+ // promise returned by .finally has been resolved.
+ .then(() => assert.equals(0, value))
+ // onFulfilled callback of .then() has run.
+ .then(() => assert.equals(1, value));
+
+}, "PromiseResolve-ordering");
diff --git a/deps/v8/test/mjsunit/harmony/regexp-named-captures.js b/deps/v8/test/mjsunit/harmony/regexp-named-captures.js
index be90427cfa..3ce947d1e5 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-named-captures.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-named-captures.js
@@ -360,6 +360,7 @@ function toSlowMode(re) {
assertEquals("bacd", "abcd".replace(re, "$2$1"));
assertEquals("cd", "abcd".replace(re, "$3"));
assertEquals("$<sndcd", "abcd".replace(re, "$<snd"));
+ assertEquals("$<sndacd", "abcd".replace(re, "$<snd$1"));
assertEquals("$<42a>cd", "abcd".replace(re, "$<42$1>"));
assertEquals("$<fth>cd", "abcd".replace(re, "$<fth>"));
assertEquals("$<a>cd", "abcd".replace(re, "$<$1>"));
@@ -371,10 +372,11 @@ function toSlowMode(re) {
assertEquals("badc", "abcd".replace(re, "$<snd>$<fst>"));
assertEquals("badc", "abcd".replace(re, "$2$1"));
assertEquals("", "abcd".replace(re, "$<thd>"));
- assertThrows(() => "abcd".replace(re, "$<snd"), SyntaxError);
- assertThrows(() => "abcd".replace(re, "$<42$1>"), SyntaxError);
- assertThrows(() => "abcd".replace(re, "$<fth>"), SyntaxError);
- assertThrows(() => "abcd".replace(re, "$<$1>"), SyntaxError);
+ assertEquals("$<snd$<snd", "abcd".replace(re, "$<snd"));
+ assertEquals("$<snda$<sndc", "abcd".replace(re, "$<snd$1"));
+ assertEquals("", "abcd".replace(re, "$<42$1>"));
+ assertEquals("", "abcd".replace(re, "$<fth>"));
+ assertEquals("", "abcd".replace(re, "$<$1>"));
}
// @@replace with a string replacement argument (non-global, named captures).
@@ -383,10 +385,11 @@ function toSlowMode(re) {
assertEquals("bacd", "abcd".replace(re, "$<snd>$<fst>"));
assertEquals("bacd", "abcd".replace(re, "$2$1"));
assertEquals("cd", "abcd".replace(re, "$<thd>"));
- assertThrows(() => "abcd".replace(re, "$<snd"), SyntaxError);
- assertThrows(() => "abcd".replace(re, "$<42$1>"), SyntaxError);
- assertThrows(() => "abcd".replace(re, "$<fth>"), SyntaxError);
- assertThrows(() => "abcd".replace(re, "$<$1>"), SyntaxError);
+ assertEquals("$<sndcd", "abcd".replace(re, "$<snd"));
+ assertEquals("$<sndacd", "abcd".replace(re, "$<snd$1"));
+ assertEquals("cd", "abcd".replace(re, "$<42$1>"));
+ assertEquals("cd", "abcd".replace(re, "$<fth>"));
+ assertEquals("cd", "abcd".replace(re, "$<$1>"));
}
// @@replace with a string replacement argument (slow, global, named captures).
@@ -395,10 +398,11 @@ function toSlowMode(re) {
assertEquals("badc", "abcd".replace(re, "$<snd>$<fst>"));
assertEquals("badc", "abcd".replace(re, "$2$1"));
assertEquals("", "abcd".replace(re, "$<thd>"));
- assertThrows(() => "abcd".replace(re, "$<snd"), SyntaxError);
- assertThrows(() => "abcd".replace(re, "$<42$1>"), SyntaxError);
- assertThrows(() => "abcd".replace(re, "$<fth>"), SyntaxError);
- assertThrows(() => "abcd".replace(re, "$<$1>"), SyntaxError);
+ assertEquals("$<snd$<snd", "abcd".replace(re, "$<snd"));
+ assertEquals("$<snda$<sndc", "abcd".replace(re, "$<snd$1"));
+ assertEquals("", "abcd".replace(re, "$<42$1>"));
+ assertEquals("", "abcd".replace(re, "$<fth>"));
+ assertEquals("", "abcd".replace(re, "$<$1>"));
}
// @@replace with a string replacement argument (slow, non-global,
@@ -408,8 +412,9 @@ function toSlowMode(re) {
assertEquals("bacd", "abcd".replace(re, "$<snd>$<fst>"));
assertEquals("bacd", "abcd".replace(re, "$2$1"));
assertEquals("cd", "abcd".replace(re, "$<thd>"));
- assertThrows(() => "abcd".replace(re, "$<snd"), SyntaxError);
- assertThrows(() => "abcd".replace(re, "$<42$1>"), SyntaxError);
- assertThrows(() => "abcd".replace(re, "$<fth>"), SyntaxError);
- assertThrows(() => "abcd".replace(re, "$<$1>"), SyntaxError);
+ assertEquals("$<sndcd", "abcd".replace(re, "$<snd"));
+ assertEquals("$<sndacd", "abcd".replace(re, "$<snd$1"));
+ assertEquals("cd", "abcd".replace(re, "$<42$1>"));
+ assertEquals("cd", "abcd".replace(re, "$<fth>"));
+ assertEquals("cd", "abcd".replace(re, "$<$1>"));
}
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-771470.js b/deps/v8/test/mjsunit/harmony/regress/regress-771470.js
new file mode 100644
index 0000000000..9776fb0bad
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-771470.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --enable-slow-asserts
+
+async function* gen() { };
+gen.prototype = 1;
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-772649.js b/deps/v8/test/mjsunit/harmony/regress/regress-772649.js
new file mode 100644
index 0000000000..d080410226
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-772649.js
@@ -0,0 +1,11 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-async-iteration
+
+async function* gen([[notIterable]] = [null]) {}
+assertThrows(() => gen(), TypeError);
+assertThrows(() => gen(), TypeError);
+%OptimizeFunctionOnNextCall(gen);
+assertThrows(() => gen(), TypeError);
diff --git a/deps/v8/test/mjsunit/messages.js b/deps/v8/test/mjsunit/messages.js
index a57e1fcf1c..934a731e8f 100644
--- a/deps/v8/test/mjsunit/messages.js
+++ b/deps/v8/test/mjsunit/messages.js
@@ -70,12 +70,24 @@ test(function() {
// kCalledOnNullOrUndefined
test(function() {
+ String.prototype.includes.call(null);
+}, "String.prototype.includes called on null or undefined", TypeError);
+
+test(function() {
Array.prototype.shift.call(null);
}, "Array.prototype.shift called on null or undefined", TypeError);
test(function() {
- String.prototype.includes.call(null);
-}, "String.prototype.includes called on null or undefined", TypeError);
+ String.prototype.trim.call(null);
+}, "String.prototype.trim called on null or undefined", TypeError);
+
+test(function() {
+ String.prototype.trimLeft.call(null);
+}, "String.prototype.trimLeft called on null or undefined", TypeError);
+
+test(function() {
+ String.prototype.trimRight.call(null);
+}, "String.prototype.trimRight called on null or undefined", TypeError);
// kCannotFreezeArrayBufferView
test(function() {
@@ -428,6 +440,11 @@ test(function() {
new Uint16Array(-1);
}, "Invalid typed array length: -1", RangeError);
+// kThrowInvalidStringLength
+test(function() {
+ "a".repeat(1 << 30);
+}, "Invalid string length", RangeError);
+
// kNormalizationForm
test(function() {
"".normalize("ABC");
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 77f8b7605b..d3db2e2e94 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -550,7 +550,7 @@ var failWithMessage;
try {
success(result);
} catch (e) {
- failWithMessage(e);
+ failWithMessage(String(e));
}
},
result => {
@@ -582,7 +582,8 @@ var failWithMessage;
return OptimizationStatusImpl(fun, sync_opt);
}
- assertUnoptimized = function assertUnoptimized(fun, sync_opt, name_opt) {
+ assertUnoptimized = function assertUnoptimized(fun, sync_opt, name_opt,
+ skip_if_maybe_deopted = true) {
if (sync_opt === undefined) sync_opt = "";
var opt_status = OptimizationStatus(fun, sync_opt);
// Tests that use assertUnoptimized() do not make sense if --always-opt
@@ -590,7 +591,8 @@ var failWithMessage;
assertFalse((opt_status & V8OptimizationStatus.kAlwaysOptimize) !== 0,
"test does not make sense with --always-opt");
assertTrue((opt_status & V8OptimizationStatus.kIsFunction) !== 0, name_opt);
- if ((opt_status & V8OptimizationStatus.kMaybeDeopted) !== 0) {
+ if (skip_if_maybe_deopted &&
+ (opt_status & V8OptimizationStatus.kMaybeDeopted) !== 0) {
// When --deopt-every-n-times flag is specified it's no longer guaranteed
// that particular function is still deoptimized, so keep running the test
// to stress test the deoptimizer.
@@ -599,7 +601,8 @@ var failWithMessage;
assertFalse((opt_status & V8OptimizationStatus.kOptimized) !== 0, name_opt);
}
- assertOptimized = function assertOptimized(fun, sync_opt, name_opt) {
+ assertOptimized = function assertOptimized(fun, sync_opt, name_opt,
+ skip_if_maybe_deopted = true) {
if (sync_opt === undefined) sync_opt = "";
var opt_status = OptimizationStatus(fun, sync_opt);
// Tests that use assertOptimized() do not make sense if --no-opt
@@ -607,7 +610,8 @@ var failWithMessage;
assertFalse((opt_status & V8OptimizationStatus.kNeverOptimize) !== 0,
"test does not make sense with --no-opt");
assertTrue((opt_status & V8OptimizationStatus.kIsFunction) !== 0, name_opt);
- if ((opt_status & V8OptimizationStatus.kMaybeDeopted) !== 0) {
+ if (skip_if_maybe_deopted &&
+ (opt_status & V8OptimizationStatus.kMaybeDeopted) !== 0) {
// When --deopt-every-n-times flag is specified it's no longer guaranteed
// that particular function is still optimized, so keep running the test
// to stress test the deoptimizer.
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 4f49fdd595..b1b6f6aeb1 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -328,6 +328,7 @@
'unicodelctest-no-optimization': [PASS, SLOW],
'unicodelctest': [PASS, SLOW],
'unicode-test': [PASS, SLOW],
+ 'wasm/atomics': [PASS, SLOW],
'whitespaces': [PASS, SLOW],
}], # 'arch == arm64'
@@ -656,4 +657,10 @@
'mjsunit-assertion-error' : [SKIP],
}], # no_harness
+##############################################################################
+['arch != x64 or deopt_fuzzer', {
+ # Skip stress-deopt-count tests since it's in x64 only
+ 'compiler/stress-deopt-count-*': [SKIP],
+}], # arch != x64 or deopt_fuzzer
+
]
diff --git a/deps/v8/test/mjsunit/optimized-foreach-polymorph.js b/deps/v8/test/mjsunit/optimized-foreach-polymorph.js
new file mode 100644
index 0000000000..ed4958354f
--- /dev/null
+++ b/deps/v8/test/mjsunit/optimized-foreach-polymorph.js
@@ -0,0 +1,111 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc --turbo-inline-array-builtins
+
+var a = [0, 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,0,0];
+var b = [{}, {}];
+var c = [,,,,,2,3,4];
+var d = [0.5,3,4];
+var e = [,,,,0.5,3,4];
+
+// Make sure that calls to forEach handle a certain degree of polymorphism (no
+// hole check)
+(function() {
+ var result = 0;
+ var polymorph1 = function(arg) {
+ var sum = function(v,i,o) {
+ result += i;
+ }
+ arg.forEach(sum);
+ }
+ polymorph1(a);
+ polymorph1(a);
+ polymorph1(b);
+ polymorph1(a);
+ polymorph1(a);
+ %OptimizeFunctionOnNextCall(polymorph1);
+ polymorph1(a);
+ polymorph1(b);
+ assertEquals(1757, result);
+})();
+
+// Make sure that calls to forEach handle a certain degree of polymorphism.
+(function() {
+ var result = 0;
+ var polymorph1 = function(arg) {
+ var sum = function(v,i,o) {
+ result += i;
+ }
+ arg.forEach(sum);
+ }
+ polymorph1(a);
+ polymorph1(a);
+ polymorph1(b);
+ polymorph1(a);
+ polymorph1(c);
+ polymorph1(a);
+ %OptimizeFunctionOnNextCall(polymorph1);
+ polymorph1(a);
+ polymorph1(b);
+ assertEquals(1775, result);
+})();
+
+// Make sure that calls to forEach with mixed object/double arrays don't inline
+// forEach.
+(function() {
+ var result = 0;
+ var polymorph1 = function(arg) {
+ var sum = function(v,i,o) {
+ result += i;
+ }
+ arg.forEach(sum);
+ }
+ polymorph1(a);
+ polymorph1(a);
+ polymorph1(b);
+ polymorph1(a);
+ polymorph1(d);
+ polymorph1(a);
+ %OptimizeFunctionOnNextCall(polymorph1);
+ polymorph1(a);
+ polymorph1(b);
+ assertEquals(1760, result);
+})();
+
+// Make sure that calls to forEach with double arrays get the right result
+(function() {
+ var result = 0;
+ var polymorph1 = function(arg) {
+ var sum = function(v,i,o) {
+ result += v;
+ }
+ arg.forEach(sum);
+ }
+ polymorph1(d);
+ polymorph1(d);
+ polymorph1(d);
+ %OptimizeFunctionOnNextCall(polymorph1);
+ polymorph1(d);
+ polymorph1(d);
+ assertEquals(37.5, result);
+})();
+
+// Make sure that calls to forEach with mixed double arrays get the right result
+(function() {
+ var result = 0;
+ var polymorph1 = function(arg) {
+ var sum = function(v,i,o) {
+ result += v;
+ }
+ arg.forEach(sum);
+ }
+ polymorph1(d);
+ polymorph1(e);
+ polymorph1(d);
+ %OptimizeFunctionOnNextCall(polymorph1);
+ polymorph1(d);
+ polymorph1(e);
+ assertEquals(37.5, result);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-2435.js b/deps/v8/test/mjsunit/regress/regress-2435.js
new file mode 100644
index 0000000000..05a4027339
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2435.js
@@ -0,0 +1,25 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function arrayLikeToString(a) {
+ return String.fromCharCode.apply(this, a);
+}
+
+const klasses = [
+ Int8Array,
+ Uint8Array,
+ Uint8ClampedArray,
+ Int16Array,
+ Uint16Array,
+ Int32Array,
+ Uint32Array,
+ Float32Array,
+ Float64Array
+];
+const string = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789';
+
+for (const klass of klasses) {
+ const array = klass.from(string, s => s.charCodeAt(0));
+ assertEquals(string, arrayLikeToString(array));
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-6838-1.js b/deps/v8/test/mjsunit/regress/regress-6838-1.js
new file mode 100644
index 0000000000..bab6a194d3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-6838-1.js
@@ -0,0 +1,33 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestMathMaxOnLargeInt() {
+ function Module(stdlib) {
+ "use asm";
+ var max = stdlib.Math.max;
+ function f() {
+ return max(42,0xffffffff);
+ }
+ return f;
+ }
+ var f = Module(this);
+ assertEquals(0xffffffff, f());
+ assertFalse(%IsAsmWasmCode(Module));
+})();
+
+(function TestMathMinOnLargeInt() {
+ function Module(stdlib) {
+ "use asm";
+ var min = stdlib.Math.min;
+ function f() {
+ return min(42,0xffffffff);
+ }
+ return f;
+ }
+ var f = Module(this);
+ assertEquals(42, f());
+ assertFalse(%IsAsmWasmCode(Module));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-6838-2.js b/deps/v8/test/mjsunit/regress/regress-6838-2.js
new file mode 100644
index 0000000000..31b94b43c2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-6838-2.js
@@ -0,0 +1,101 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestMathCeilReturningFloatish() {
+ function Module(stdlib) {
+ "use asm";
+ var ceil = stdlib.Math.ceil;
+ var fround = stdlib.Math.fround;
+ function f(a) {
+ a = fround(a);
+ return ceil(a);
+ }
+ return f;
+ }
+ var f = Module(this);
+ assertEquals(3, f(2.2));
+ assertFalse(%IsAsmWasmCode(Module));
+})();
+
+(function TestMathFloorReturningFloatish() {
+ function Module(stdlib) {
+ "use asm";
+ var floor = stdlib.Math.floor;
+ var fround = stdlib.Math.fround;
+ function f(a) {
+ a = fround(a);
+ return floor(a);
+ }
+ return f;
+ }
+ var f = Module(this);
+ assertEquals(2, f(2.2));
+ assertFalse(%IsAsmWasmCode(Module));
+})();
+
+(function TestMathSqrtReturningFloatish() {
+ function Module(stdlib) {
+ "use asm";
+ var sqrt = stdlib.Math.sqrt;
+ var fround = stdlib.Math.fround;
+ function f(a) {
+ a = fround(a);
+ return sqrt(a);
+ }
+ return f;
+ }
+ var f = Module(this);
+ assertEquals(Math.sqrt(Math.fround(2.2)), f(2.2));
+ assertFalse(%IsAsmWasmCode(Module));
+})();
+
+(function TestMathAbsReturningFloatish() {
+ function Module(stdlib) {
+ "use asm";
+ var abs = stdlib.Math.abs;
+ var fround = stdlib.Math.fround;
+ function f(a) {
+ a = fround(a);
+ return abs(a);
+ }
+ return f;
+ }
+ var f = Module(this);
+ assertEquals(Math.fround(2.2), f(-2.2));
+ assertFalse(%IsAsmWasmCode(Module));
+})();
+
+(function TestMathMinReturningFloat() {
+ function Module(stdlib) {
+ "use asm";
+ var min = stdlib.Math.min;
+ var fround = stdlib.Math.fround;
+ function f(a) {
+ a = fround(a);
+ return min(a, a);
+ }
+ return f;
+ }
+ var f = Module(this);
+ assertEquals(Math.fround(2.2), f(2.2));
+ assertTrue(%IsAsmWasmCode(Module));
+})();
+
+(function TestMathMaxReturningFloat() {
+ function Module(stdlib) {
+ "use asm";
+ var max = stdlib.Math.max;
+ var fround = stdlib.Math.fround;
+ function f(a) {
+ a = fround(a);
+ return max(a, a);
+ }
+ return f;
+ }
+ var f = Module(this);
+ assertEquals(Math.fround(2.2), f(2.2));
+ assertTrue(%IsAsmWasmCode(Module));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-6838-3.js b/deps/v8/test/mjsunit/regress/regress-6838-3.js
new file mode 100644
index 0000000000..639ffa5da7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-6838-3.js
@@ -0,0 +1,39 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestMathAbsReturningUnsigned() {
+ function Module(stdlib) {
+ "use asm";
+ var abs=stdlib.Math.abs;
+ function f(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return (abs(a >> 0) == (b >>> 0)) | 0;
+ }
+ return f;
+ }
+ var f = Module(this);
+ assertEquals(0, f(1, 2));
+ assertEquals(1, f(23, 23));
+ assertEquals(1, f(-42, 42));
+ assertEquals(1, f(-0x7fffffff, 0x7fffffff));
+ assertEquals(1, f(-0x80000000, 0x80000000));
+ assertTrue(%IsAsmWasmCode(Module));
+})();
+
+(function TestMathAbsOverflowSignedValue() {
+ function Module(stdlib) {
+ "use asm";
+ var abs=stdlib.Math.abs;
+ function f() {
+ return (abs(-0x80000000) > 0) | 0;
+ }
+ return f;
+ }
+ var f = Module(this);
+ assertEquals(1, f());
+ assertTrue(%IsAsmWasmCode(Module));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-6907.js b/deps/v8/test/mjsunit/regress/regress-6907.js
new file mode 100644
index 0000000000..0749365fed
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-6907.js
@@ -0,0 +1,21 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestDematerializedContextInBuiltin() {
+ var f = function() {
+ var b = [1,2,3];
+ var callback = function(v,i,o) {
+ %_DeoptimizeNow();
+ };
+ try { throw 0 } catch(e) {
+ return b.forEach(callback);
+ }
+ }
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-719380.js b/deps/v8/test/mjsunit/regress/regress-719380.js
index 18d541a5fe..4d41609c51 100644
--- a/deps/v8/test/mjsunit/regress/regress-719380.js
+++ b/deps/v8/test/mjsunit/regress/regress-719380.js
@@ -3,5 +3,5 @@
// found in the LICENSE file.
TypeError.prototype.__defineGetter__("name", () => { throw 42; });
-console.log({ toString: () => { throw new TypeError() }});
+try { console.log({ toString: () => { throw new TypeError() }}); } catch (e) {}
try { new WebAssembly.Table({}); } catch (e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-760268.js b/deps/v8/test/mjsunit/regress/regress-760268.js
new file mode 100644
index 0000000000..6884ae1dca
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-760268.js
@@ -0,0 +1,11 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var obj = this;
+var handler = {
+ has: function() { return false; }
+}
+var proxy = new Proxy(obj, handler);
+Object.defineProperty(obj, "nonconf", {});
+assertThrows("'nonconf' in proxy");
diff --git a/deps/v8/test/mjsunit/regress/regress-760790.js b/deps/v8/test/mjsunit/regress/regress-760790.js
new file mode 100644
index 0000000000..75770bad18
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-760790.js
@@ -0,0 +1,11 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function g() {
+ var a = Array(0);
+ a[0]++;
+}
+g();
+g();
+g();
diff --git a/deps/v8/test/mjsunit/regress/regress-761639.js b/deps/v8/test/mjsunit/regress/regress-761639.js
new file mode 100644
index 0000000000..a5fa69f6d8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-761639.js
@@ -0,0 +1,10 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Regression test for hitting a DCHECK in StoreProxy.
+
+
+for (var i = 0; i < 10; i++) {
+ __proto__ = new Proxy({}, { getPrototypeOf() { } });
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-772190.js b/deps/v8/test/mjsunit/regress/regress-772190.js
new file mode 100644
index 0000000000..008ed63b4d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-772190.js
@@ -0,0 +1,10 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-opt
+
+assertThrows(function() {
+ __v_13383[4];
+ let __v_13383 = {};
+});
diff --git a/deps/v8/test/mjsunit/regress/regress-774475.js b/deps/v8/test/mjsunit/regress/regress-774475.js
new file mode 100644
index 0000000000..b11ea6c231
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-774475.js
@@ -0,0 +1,938 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var o = function f3() {
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-781218.js b/deps/v8/test/mjsunit/regress/regress-781218.js
new file mode 100644
index 0000000000..ae00cc5c08
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-781218.js
@@ -0,0 +1,43 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var m = new Map();
+
+function C() { }
+
+// Make sure slack tracking kicks in and shrinks the default size to prevent
+// any further in-object properties.
+%CompleteInobjectSlackTracking(new C());
+
+function f(o) {
+ o.x = true;
+}
+
+// Warm up {f}.
+f(new C());
+f(new C());
+
+
+var o = new C();
+%HeapObjectVerify(o);
+
+m.set(o, 1); // This creates hash code on o.
+
+// Add an out-of-object property.
+o.x = true;
+%HeapObjectVerify(o);
+// Delete the property (so we have no out-of-object properties).
+delete o.x;
+%HeapObjectVerify(o);
+
+
+// Ensure that growing the properties backing store in optimized code preserves
+// the hash.
+%OptimizeFunctionOnNextCall(f);
+f(o);
+
+%HeapObjectVerify(o);
+assertEquals(1, m.get(o));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-537444.js b/deps/v8/test/mjsunit/regress/regress-crbug-537444.js
index 6f56fd148d..d6fe6b89a3 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-537444.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-537444.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --stress-inline
"use strict";
@@ -19,8 +19,6 @@ function h(x) {
return z + 1;
}
-%SetForceInlineFlag(g);
-%SetForceInlineFlag(f);
h(1);
h(1);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-593697-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-593697-2.js
index c8af4a4e08..c26fdfdf67 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-593697-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-593697-2.js
@@ -2,12 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --stress-inline
"use strict";
-%SetForceInlineFlag(Math.cos);
-
var f5 = (function f6(stdlib) {
"use asm";
var cos = stdlib.Math.cos;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-598998.js b/deps/v8/test/mjsunit/regress/regress-crbug-598998.js
index bbabf1e31d..a2a02623f5 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-598998.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-598998.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --stress-inline
"use strict";
@@ -22,8 +22,6 @@ function h(x) {
g(x, 1);
}
-%SetForceInlineFlag(g);
-%SetForceInlineFlag(f);
%NeverOptimizeFunction(deopt_function);
h(1);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-608278.js b/deps/v8/test/mjsunit/regress/regress-crbug-608278.js
index 251ecad86c..c8d2fcc745 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-608278.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-608278.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --stress-inline
"use strict";
@@ -21,7 +21,6 @@ function h() {
function g(v) {
return h();
}
-%SetForceInlineFlag(g);
function f1() {
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-647217.js b/deps/v8/test/mjsunit/regress/regress-crbug-647217.js
index e3968a19d6..ab45c9afa9 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-647217.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-647217.js
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --stack-size=100
+// Flags: --allow-natives-syntax --stack-size=100 --stress-inline
var source = "return 1" + new Array(2048).join(' + a') + "";
eval("function g(a) {" + source + "}");
-%SetForceInlineFlag(g);
function f(a) { return g(a) }
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-714696.js b/deps/v8/test/mjsunit/regress/regress-crbug-714696.js
index 16b09604e9..ad5925478d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-714696.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-714696.js
@@ -5,6 +5,6 @@
if (this.Intl) {
new Intl.v8BreakIterator();
new Intl.DateTimeFormat();
- console.log({ toString: function() { throw 1; }});
+ try { console.log({ toString: function() { throw 1; }}); } catch (e) {}
new Intl.v8BreakIterator();
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-722871.js b/deps/v8/test/mjsunit/regress/regress-crbug-722871.js
new file mode 100644
index 0000000000..c5b7958f49
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-722871.js
@@ -0,0 +1,113 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+let sab = new SharedArrayBuffer(10 * 4);
+let memory = new Int32Array(sab);
+let workers = [];
+let runningWorkers = 0;
+
+function startWorker(script) {
+ let worker = new Worker(script);
+ worker.done = false;
+ worker.idx = workers.length;
+ workers.push(worker);
+ worker.postMessage(memory);
+ ++runningWorkers;
+};
+
+let shared = `
+ function wait(memory, index, waitCondition, wakeCondition) {
+ while (memory[index] == waitCondition) {
+ var result = Atomics.wait(memory, index, waitCondition);
+ switch (result) {
+ case 'not-equal':
+ case 'ok':
+ break;
+ default:
+ postMessage('Error: bad result from wait: ' + result);
+ break;
+ }
+ var value = memory[index];
+ if (value != wakeCondition) {
+ postMessage(
+ 'Error: wait returned not-equal but the memory has a bad value: ' +
+ value);
+ }
+ }
+ var value = memory[index];
+ if (value != wakeCondition) {
+ postMessage(
+ 'Error: done waiting but the memory has a bad value: ' + value);
+ }
+ }
+
+ function wake(memory, index) {
+ var result = Atomics.wake(memory, index, 1);
+ if (result != 0 && result != 1) {
+ postMessage('Error: bad result from wake: ' + result);
+ }
+ }
+`;
+
+let worker1 = startWorker(shared + `
+ onmessage = function(msg) {
+ let memory = msg;
+ const didStartIdx = 0;
+ const shouldGoIdx = 1;
+ const didEndIdx = 2;
+
+ postMessage("started");
+ postMessage("memory: " + memory);
+ wait(memory, didStartIdx, 0, 1);
+ memory[shouldGoIdx] = 1;
+ wake(memory, shouldGoIdx);
+ wait(memory, didEndIdx, 0, 1);
+ postMessage("memory: " + memory);
+ postMessage("done");
+ };
+`);
+
+let worker2 = startWorker(shared + `
+ onmessage = function(msg) {
+ let memory = msg;
+ const didStartIdx = 0;
+ const shouldGoIdx = 1;
+ const didEndIdx = 2;
+
+ postMessage("started");
+ postMessage("memory: " + memory);
+ Atomics.store(memory, didStartIdx, 1);
+ wake(memory, didStartIdx);
+ wait(memory, shouldGoIdx, 0, 1);
+ Atomics.store(memory, didEndIdx, 1);
+ wake(memory, didEndIdx, 1);
+ postMessage("memory: " + memory);
+ postMessage("done");
+ };
+`);
+
+let running = true;
+while (running) {
+ for (let worker of workers) {
+ if (worker.done) continue;
+
+ let msg = worker.getMessage();
+ if (msg) {
+ switch (msg) {
+ case "done":
+ if (worker.done === false) {
+ print("worker #" + worker.idx + " done.");
+ worker.done = true;
+ if (--runningWorkers === 0) {
+ running = false;
+ }
+ }
+ break;
+
+ default:
+ print("msg from worker #" + worker.idx + ": " + msg);
+ break;
+ }
+ }
+ }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-764219.js b/deps/v8/test/mjsunit/regress/regress-crbug-764219.js
new file mode 100644
index 0000000000..2a92d66d2e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-764219.js
@@ -0,0 +1,35 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function() {
+ function f(o) {
+ o.x = 42;
+ };
+
+ f({});
+ f(this);
+ f(this);
+})();
+
+(function() {
+ function f(o) {
+ o.y = 153;
+ };
+
+ Object.setPrototypeOf(this, new Proxy({}, {}));
+ f({});
+ f(this);
+ f(this);
+})();
+
+(function() {
+ function f(o) {
+ o.z = 153;
+ };
+
+ Object.setPrototypeOf(this, new Proxy({get z(){}}, {}));
+ f({});
+ f(this);
+ f(this);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-768080.js b/deps/v8/test/mjsunit/regress/regress-crbug-768080.js
new file mode 100644
index 0000000000..cfd1fc1f35
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-768080.js
@@ -0,0 +1,64 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestReflectConstructBogusNewTarget1() {
+ class C {}
+ function g() {
+ Reflect.construct(C, arguments, 23);
+ }
+ function f() {
+ return new g();
+ }
+ new C(); // Warm-up!
+ assertThrows(f, TypeError);
+ assertThrows(f, TypeError);
+ %OptimizeFunctionOnNextCall(f);
+ assertThrows(f, TypeError);
+})();
+
+(function TestReflectConstructBogusNewTarget2() {
+ class C {}
+ // Note that {unescape} is an example of a non-constructable function. If that
+ // ever changes and this test needs to be adapted, make sure to choose another
+ // non-constructable {JSFunction} object instead.
+ function g() {
+ Reflect.construct(C, arguments, unescape);
+ }
+ function f() {
+ return new g();
+ }
+ new C(); // Warm-up!
+ assertThrows(f, TypeError);
+ assertThrows(f, TypeError);
+ %OptimizeFunctionOnNextCall(f);
+ assertThrows(f, TypeError);
+})();
+
+(function TestReflectConstructBogusTarget() {
+ function g() {
+ Reflect.construct(23, arguments);
+ }
+ function f() {
+ return new g();
+ }
+ assertThrows(f, TypeError);
+ assertThrows(f, TypeError);
+ %OptimizeFunctionOnNextCall(f);
+ assertThrows(f, TypeError);
+})();
+
+(function TestReflectApplyBogusTarget() {
+ function g() {
+ Reflect.apply(23, this, arguments);
+ }
+ function f() {
+ return g();
+ }
+ assertThrows(f, TypeError);
+ assertThrows(f, TypeError);
+ %OptimizeFunctionOnNextCall(f);
+ assertThrows(f, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-768367.js b/deps/v8/test/mjsunit/regress/regress-crbug-768367.js
new file mode 100644
index 0000000000..d1041f32ce
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-768367.js
@@ -0,0 +1,14 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const o = {};
+
+function foo() { return o[4294967295]; }
+
+assertEquals(undefined, foo());
+assertEquals(undefined, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(undefined, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-769852.js b/deps/v8/test/mjsunit/regress/regress-crbug-769852.js
new file mode 100644
index 0000000000..120ea0109e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-769852.js
@@ -0,0 +1,14 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(o) {
+ function g() {}
+ Object.keys(o).forEach(suite => g());
+}
+assertDoesNotThrow(() => f({}));
+assertDoesNotThrow(() => f({ x:0 }));
+%OptimizeFunctionOnNextCall(f);
+assertDoesNotThrow(() => f({ x:0 }));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-770543.js b/deps/v8/test/mjsunit/regress/regress-crbug-770543.js
new file mode 100644
index 0000000000..5397a499c3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-770543.js
@@ -0,0 +1,31 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function FunctionCallerFromInlinedBuiltin() {
+ function f() {
+ function g() {
+ Object.getOwnPropertyDescriptor(g, "caller");
+ };
+ [0].forEach(g);
+ }
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+})();
+
+(function FunctionArgumentsFromInlinedBuiltin() {
+ function g() {
+ g.arguments;
+ }
+ function f() {
+ [0].forEach(g);
+ }
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-770581.js b/deps/v8/test/mjsunit/regress/regress-crbug-770581.js
new file mode 100644
index 0000000000..64edec97cd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-770581.js
@@ -0,0 +1,22 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(callback) {
+ [Object].forEach(callback);
+}
+
+function message_of_f() {
+ try {
+ f("a teapot");
+ } catch(e) {
+ return String(e);
+ }
+}
+
+assertEquals("TypeError: a teapot is not a function", message_of_f());
+assertEquals("TypeError: a teapot is not a function", message_of_f());
+%OptimizeFunctionOnNextCall(f);
+assertEquals("TypeError: a teapot is not a function", message_of_f());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-771971.js b/deps/v8/test/mjsunit/regress/regress-crbug-771971.js
new file mode 100644
index 0000000000..cb40db5aa3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-771971.js
@@ -0,0 +1,12 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() { Object.is(); }
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-772056.js b/deps/v8/test/mjsunit/regress/regress-crbug-772056.js
new file mode 100644
index 0000000000..380bae356d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-772056.js
@@ -0,0 +1,17 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+var builder = new WasmModuleBuilder();
+builder.addImportedTable("x", "table", 1, 10000000);
+let module = new WebAssembly.Module(builder.toBuffer());
+let table = new WebAssembly.Table({element: "anyfunc",
+ initial: 1, maximum:1000000});
+let instance = new WebAssembly.Instance(module, {x: {table:table}});
+
+assertThrows(() => table.grow(Infinity), RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-772610.js b/deps/v8/test/mjsunit/regress/regress-crbug-772610.js
new file mode 100644
index 0000000000..d68ebbf2be
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-772610.js
@@ -0,0 +1,18 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --verify-heap --expose-gc
+
+function f() {
+ var o = [{
+ [Symbol.toPrimitive]() {}
+ }];
+ %_DeoptimizeNow();
+ return o.length;
+}
+assertEquals(1, f());
+assertEquals(1, f());
+%OptimizeFunctionOnNextCall(f);
+assertEquals(1, f());
+gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-772672.js b/deps/v8/test/mjsunit/regress/regress-crbug-772672.js
new file mode 100644
index 0000000000..86e738344a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-772672.js
@@ -0,0 +1,11 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() { return new Array(120 * 1024); }
+
+foo()[0] = 0.1;
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-772689.js b/deps/v8/test/mjsunit/regress/regress-crbug-772689.js
new file mode 100644
index 0000000000..32e220daa7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-772689.js
@@ -0,0 +1,23 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const A = class A extends Array {
+ constructor() {
+ super();
+ this.y = 1;
+ }
+}
+
+function foo(x) {
+ var a = new A();
+ if (x) return a.y;
+}
+
+assertEquals(undefined, foo(false));
+assertEquals(undefined, foo(false));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(undefined, foo(false));
+assertEquals(1, foo(true));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-772720.js b/deps/v8/test/mjsunit/regress/regress-crbug-772720.js
new file mode 100644
index 0000000000..3e359f6c16
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-772720.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var global;
+function f() {
+ var local = 'abcdefghijklmnopqrst';
+ local += 'abcdefghijkl' + (0 + global);
+ global += 'abcdefghijkl';
+}
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-772897.js b/deps/v8/test/mjsunit/regress/regress-crbug-772897.js
new file mode 100644
index 0000000000..c2e4b25fb0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-772897.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function store(obj, name) {
+ return obj[name] = 0;
+}
+
+function f(obj) {
+ var key = {
+ toString() { throw new Error("boom"); }
+ };
+ store(obj, key);
+}
+
+(function() {
+ var proxy = new Proxy({}, {});
+ store(proxy, 0)
+ assertThrows(() => f(proxy), Error);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-774994.js b/deps/v8/test/mjsunit/regress/regress-crbug-774994.js
new file mode 100644
index 0000000000..5810417f79
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-774994.js
@@ -0,0 +1,34 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --preparser-scope-analysis
+
+function f() {
+ new class extends Object {
+ constructor() {
+ eval("super(); super.__f_10();");
+ }
+ }
+}
+assertThrows(f, TypeError);
+
+function g() {
+ let obj = {
+ m() {
+ eval("super.foo()");
+ }
+ }
+ obj.m();
+}
+assertThrows(g, TypeError);
+
+function h() {
+ let obj = {
+ get m() {
+ eval("super.foo()");
+ }
+ }
+ obj.m;
+}
+assertThrows(h, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-783132.js b/deps/v8/test/mjsunit/regress/regress-crbug-783132.js
new file mode 100644
index 0000000000..600a6bf5b6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-783132.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --verify-heap
+
+function f(o, v) {
+ try {
+ f(o, v + 1);
+ } catch (e) {
+ }
+ o[v] = 43.35 + v * 5.3;
+}
+
+f(Array.prototype, 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-6940.js b/deps/v8/test/mjsunit/regress/regress-v8-6940.js
new file mode 100644
index 0000000000..c5bb6a950a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-6940.js
@@ -0,0 +1,9 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertTrue(/[ŸŶ]/i.test('ÿ'));
+assertTrue(/[ŸY]/i.test('ÿ'));
+
+assertTrue(/[YÝŸŶỲ]/i.test('ÿ'));
+assertTrue(/[YÝŸŶỲ]/iu.test('ÿ'));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-775710.js b/deps/v8/test/mjsunit/regress/wasm/regress-775710.js
new file mode 100644
index 0000000000..5e6fb8c50b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-775710.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+const kMaxLocals = 50000;
+const fn_template = '"use asm";\nfunction f() { LOCALS }\nreturn f;';
+for (var num_locals = kMaxLocals; num_locals < kMaxLocals + 2; ++num_locals) {
+ const fn_code = fn_template.replace(
+ 'LOCALS',
+ Array(num_locals)
+ .fill()
+ .map((_, idx) => 'var l' + idx + ' = 0;')
+ .join('\n'));
+ const asm_fn = new Function(fn_code);
+ const f = asm_fn();
+ f();
+ assertEquals(num_locals <= kMaxLocals, %IsAsmWasmCode(asm_fn));
+}
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-648079.js b/deps/v8/test/mjsunit/regress/wasm/regression-648079.js
index 2fa6b4db7a..acc6146ef5 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-648079.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-648079.js
@@ -8,7 +8,6 @@ load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
// Non-standard opcodes.
-let kWasmS128 = 0x7b;
let kSig_s_v = makeSig([], [kWasmS128]);
let kExprS128LoadMem = 0xc0;
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-702460.js b/deps/v8/test/mjsunit/regress/wasm/regression-702460.js
index 2d63440255..73c01e13a0 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-702460.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-702460.js
@@ -6,7 +6,6 @@ load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
// Non-standard opcodes.
-let kWasmS128 = 0x7b;
let kSig_s_v = makeSig([], [kWasmS128]);
let kExprS128LoadMem = 0xc0;
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-763697.js b/deps/v8/test/mjsunit/regress/wasm/regression-763697.js
new file mode 100644
index 0000000000..faf74e1cff
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-763697.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --no-experimental-wasm-simd
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder = new WasmModuleBuilder();
+ builder.addFunction("main", kSig_i_i)
+ .addBody([kExprGetLocal, 0])
+ .addLocals({s128_count: 1});
+
+ assertFalse(WebAssembly.validate(builder.toBuffer()));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-769846.js b/deps/v8/test/mjsunit/regress/wasm/regression-769846.js
new file mode 100644
index 0000000000..297da84f5f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-769846.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module() {
+ "use asm";
+ function div_(__v_6) {
+ __v_6 = __v_6 | 0;
+ }
+ return { f: div_}
+};
+var __f_0 = Module().f;
+__v_8 = [0];
+__v_8.__defineGetter__(0, function() { return __f_0(__v_8); });
+__v_8[0];
diff --git a/deps/v8/test/mjsunit/skipping-inner-functions-bailout.js b/deps/v8/test/mjsunit/skipping-inner-functions-bailout.js
index f09f1d9bb7..f2b6c5bc41 100644
--- a/deps/v8/test/mjsunit/skipping-inner-functions-bailout.js
+++ b/deps/v8/test/mjsunit/skipping-inner-functions-bailout.js
@@ -76,3 +76,9 @@ function TestMultiBailout3() {
function not_skippable_either() {}
}
TestMultiBailout3();
+
+// Regression test for
+// https://bugs.chromium.org/p/chromium/issues/detail?id=761980. The conditions
+// triggering a bailout occur in a context where we're not generating data
+// anyway (inside an arrow function). (This needs to be at top level.)
+x => { (y=eval()) => {} }
diff --git a/deps/v8/test/mjsunit/skipping-inner-functions.js b/deps/v8/test/mjsunit/skipping-inner-functions.js
index 51c9fd5534..e228b258c4 100644
--- a/deps/v8/test/mjsunit/skipping-inner-functions.js
+++ b/deps/v8/test/mjsunit/skipping-inner-functions.js
@@ -295,3 +295,73 @@ function TestSkippableFunctionInForOfHeaderAndBody() {
}
TestSkippableFunctionInForOfHeaderAndBody();
+
+(function TestSkippableGeneratorInSloppyBlock() {
+ var result = 0;
+
+ function lazy(ctxt_alloc_param) {
+ var ctxt_alloc_var = 10;
+ {
+ function *skip_me() {
+ result = ctxt_alloc_param + ctxt_alloc_var;
+ yield 3;
+ }
+ return skip_me;
+ }
+ }
+ // Test that parameters and variables of the outer function get context
+ // allocated even if we skip the inner function.
+ assertEquals(3, lazy(9)().next().value);
+ assertEquals(19, result);
+})();
+
+(function TestRestoringDataToAsyncArrowFunctionWithNonSimpleParams_1() {
+ // Regression test for
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=765532
+ function lazy() {
+ // The arrow function is not skippable, but we need to traverse its scopes
+ // and restore data to them.
+ async(a=0) => { const d = 0; }
+ function skippable() {}
+ }
+ lazy();
+})();
+
+(function TestRestoringDataToAsyncArrowFunctionWithNonSimpleParams_2() {
+ // Regression test for
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=765532
+ function lazy() {
+ // The arrow function is not skippable, but we need to traverse its scopes
+ // and restore data to them.
+ async(...a) => { const d = 0; }
+ function skippable() {}
+ }
+ lazy();
+})();
+
+(function TestSloppyBlockFunctionShadowingCatchVariable() {
+ // Regression test for
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=771474
+ function lazy() {
+ try {
+ } catch (my_var) {
+ if (false) {
+ function my_var() { }
+ }
+ }
+ }
+ lazy();
+})();
+
+
+(function TestLazinessDecisionWithDefaultConstructors() {
+ // Regression test for
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=773576
+
+ // The problem was that Parser and PreParser treated default constructors
+ // differently, and that threw off the "next / previous function is likely
+ // called" logic.
+
+ function lazy(p = (function() {}, class {}, function() {}, class { method1() { } })) { }
+ lazy();
+})();
diff --git a/deps/v8/test/mjsunit/string-charcodeat.js b/deps/v8/test/mjsunit/string-charcodeat.js
index 335c4b317f..6031096e0b 100644
--- a/deps/v8/test/mjsunit/string-charcodeat.js
+++ b/deps/v8/test/mjsunit/string-charcodeat.js
@@ -239,6 +239,3 @@ for (var i = 0; i < 5; i++) {
}
%OptimizeFunctionOnNextCall(directlyOnPrototype);
directlyOnPrototype();
-
-assertTrue(isNaN(%_StringCharCodeAt("ABC", -1)));
-assertTrue(isNaN(%_StringCharCodeAt("ABC", 4)));
diff --git a/deps/v8/test/mjsunit/third_party/regexp-pcre/regexp-pcre.js b/deps/v8/test/mjsunit/third_party/regexp-pcre/regexp-pcre.js
index 68b8f0525b..8b603e45d1 100644
--- a/deps/v8/test/mjsunit/third_party/regexp-pcre/regexp-pcre.js
+++ b/deps/v8/test/mjsunit/third_party/regexp-pcre/regexp-pcre.js
@@ -899,7 +899,7 @@ res[819] = /:/;
res[820] = /([\da-f:]+)$/i;
res[821] = /^.*\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$/;
res[822] = /^(\d+)\s+IN\s+SOA\s+(\S+)\s+(\S+)\s*\(\s*$/;
-res[823] = /^[a-zA-Z\d][a-zA-Z\d\-]*(\.[a-zA-Z\d][a-zA-z\d\-]*)*\.$/;
+res[823] = /^[a-zA-Z\d][a-zA-Z\d\-]*(\.[a-zA-Z\d][a-zA-Z\d\-]*)*\.$/;
res[824] = /^\*\.[a-z]([a-z\-\d]*[a-z\d]+)?(\.[a-z]([a-z\-\d]*[a-z\d]+)?)*$/;
res[825] = /^(?=ab(de))(abd)(e)/;
res[826] = /^(?!(ab)de|x)(abd)(f)/;
diff --git a/deps/v8/test/mjsunit/type-profile/collect-type-profile.js b/deps/v8/test/mjsunit/type-profile/collect-type-profile.js
deleted file mode 100644
index d21d11d9d2..0000000000
--- a/deps/v8/test/mjsunit/type-profile/collect-type-profile.js
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --type-profile --allow-natives-syntax
-
-function check_collect_types(name, expected) {
- const type_profile = %TypeProfile(name);
- if (type_profile !== undefined) {
- const result = JSON.stringify(type_profile);
- print(result);
- assertEquals(expected, result, name + " failed");
-
- }
-}
-
-function testFunction(param, flag) {
- // We want to test 2 different return positions in one function.
- if (flag) {
- var first_var = param;
- return first_var;
- }
- var second_var = param;
- return second_var;
-}
-
-class MyClass {
- constructor() {}
-}
-
-var expected = `{}`;
-check_collect_types(testFunction, expected);
-
-testFunction({});
-testFunction(123, true);
-testFunction('hello');
-testFunction(123);
-
-expected = `{\"495\":[\"Object\",\"number\",\"string\",\"number\"],\"502\":[\"undefined\",\"boolean\",\"undefined\",\"undefined\"],\"691\":[\"Object\",\"number\",\"string\",\"number\"]}`;
-check_collect_types(testFunction, expected);
-
-testFunction(undefined);
-testFunction('hello', true);
-testFunction({x: 12}, true);
-testFunction({x: 12});
-testFunction(new MyClass());
-
-expected = `{\"495\":[\"Object\",\"number\",\"string\",\"number\",\"undefined\",\"string\",\"Object\",\"Object\",\"MyClass\"],\"502\":[\"undefined\",\"boolean\",\"undefined\",\"undefined\",\"undefined\",\"boolean\",\"boolean\",\"undefined\",\"undefined\"],\"691\":[\"Object\",\"number\",\"string\",\"number\",\"undefined\",\"string\",\"Object\",\"Object\",\"MyClass\"]}`;
-check_collect_types(testFunction, expected);
-
-
-function testReturnOfNonVariable() {
- return 32;
-}
-testReturnOfNonVariable();
-expected = `{\"1724\":[\"number\"]}`;
-check_collect_types(testReturnOfNonVariable, expected);
-
-// Return statement is reached but its expression is never really returned.
-function try_finally() {
- try {
- return 23;
- } finally {
- return "nope, string is better"
- }
-}
-try_finally();
-expected = `{\"2026\":[\"string\"]}`;
-check_collect_types(try_finally, expected);
-
-// Fall-off return.
-function fall_off() {
- //nothing
-}
-fall_off();
-expected = `{\"2180\":[\"undefined\"]}`;
-check_collect_types(fall_off, expected);
-
-// Do not collect types when the function is never run.
-function never_called() {}
-expected = `{}`;
-check_collect_types(never_called, expected);
-
-
-function several_params(a, b, c, d) {
- //nothing
-}
-several_params(2, 'foo', {}, new MyClass());
-expected = `{\"2448\":[\"number\"],\"2451\":[\"string\"],\"2454\":[\"Object\"],\"2457\":[\"MyClass\"],\"2474\":[\"undefined\"]}`;
-check_collect_types(several_params, expected);
diff --git a/deps/v8/test/mjsunit/unbox-double-arrays.js b/deps/v8/test/mjsunit/unbox-double-arrays.js
index 2bebddb449..d6fc0938f9 100644
--- a/deps/v8/test/mjsunit/unbox-double-arrays.js
+++ b/deps/v8/test/mjsunit/unbox-double-arrays.js
@@ -50,11 +50,6 @@ function force_to_fast_double_array(a) {
assertTrue(%HasDoubleElements(a));
}
-function make_object_like_array(size) {
- obj = new Object();
- obj.length = size;
- return obj;
-}
function testOneArrayType(allocator) {
var large_array = new allocator(large_array_size);
@@ -349,11 +344,18 @@ function testOneArrayType(allocator) {
assertTrue(%HasDoubleElements(large_array));
}
+class ArraySubclass extends Array {
+ constructor(...args) {
+ super(...args);
+ this.marker = 42;
+ }
+}
+
// Force gc here to start with a clean heap if we repeat this test multiple
// times.
gc();
-testOneArrayType(make_object_like_array);
testOneArrayType(Array);
+testOneArrayType(ArraySubclass);
var large_array = new Array(large_array_size);
force_to_fast_double_array(large_array);
diff --git a/deps/v8/test/mjsunit/value-of.js b/deps/v8/test/mjsunit/value-of.js
index 1a242c0ec4..88dad0a9b9 100644
--- a/deps/v8/test/mjsunit/value-of.js
+++ b/deps/v8/test/mjsunit/value-of.js
@@ -25,8 +25,18 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-function MyException() { }
+let valueOf = Object.prototype.valueOf;
+
+assertEquals('object', typeof valueOf.call(true));
+assertEquals('object', typeof valueOf.call(false));
+assertEquals('object', typeof valueOf.call(1.23));
+assertEquals('object', typeof valueOf.call(0));
+assertEquals('object', typeof valueOf.call('a'));
+assertEquals('object', typeof valueOf.call(Symbol.isConcatSpreadable));
+assertThrows(() => valueOf.call(undefined), TypeError);
+assertThrows(() => valueOf.call(null), TypeError);
+function MyException() { }
var o = new Object();
o.valueOf = function() { throw new MyException(); }
diff --git a/deps/v8/test/mjsunit/wasm/asm-with-wasm-off.js b/deps/v8/test/mjsunit/wasm/asm-with-wasm-off.js
index bf3bbe712f..2fec37d6e8 100644
--- a/deps/v8/test/mjsunit/wasm/asm-with-wasm-off.js
+++ b/deps/v8/test/mjsunit/wasm/asm-with-wasm-off.js
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --allow-natives-syntax
+// Flags: --noexpose-wasm --validate-asm --allow-natives-syntax
-// NOTE: This is in it's own file because it calls %DisallowCodegenFromStrings,
+// NOTE: This is in its own file because it calls %DisallowCodegenFromStrings,
// which messes with the isolate's state.
(function testAsmWithWasmOff() {
- %DisallowCodegenFromStrings();
+ %DisallowCodegenFromStrings(true);
function Module() {
'use asm';
function foo() {
diff --git a/deps/v8/test/mjsunit/wasm/atomics.js b/deps/v8/test/mjsunit/wasm/atomics.js
index 089accb45b..a8940af1d1 100644
--- a/deps/v8/test/mjsunit/wasm/atomics.js
+++ b/deps/v8/test/mjsunit/wasm/atomics.js
@@ -21,43 +21,77 @@ function Exchange(a, b) { return b; }
let maxSize = 10;
let memory = new WebAssembly.Memory({initial: 1, maximum: maxSize, shared: true});
-function GetAtomicBinOpFunction(wasmExpression) {
+function GetAtomicBinOpFunction(wasmExpression, alignment, offset) {
let builder = new WasmModuleBuilder();
- builder.addImportedMemory("m", "imported_mem");
+ builder.addImportedMemory("m", "imported_mem", 0, maxSize, "shared");
builder.addFunction("main", kSig_i_ii)
.addBody([
kExprGetLocal, 0,
kExprGetLocal, 1,
kAtomicPrefix,
- wasmExpression])
+ wasmExpression, alignment, offset])
.exportAs("main");
// Instantiate module, get function exports
let module = new WebAssembly.Module(builder.toBuffer());
- let instance = (new WebAssembly.Instance(module,
- {m: {imported_mem: memory}}));
+ let instance = new WebAssembly.Instance(module,
+ {m: {imported_mem: memory}});
return instance.exports.main;
}
-function GetAtomicCmpExchangeFunction(wasmExpression) {
+function GetAtomicCmpExchangeFunction(wasmExpression, alignment, offset) {
let builder = new WasmModuleBuilder();
- builder.addImportedMemory("m", "imported_mem");
+ builder.addImportedMemory("m", "imported_mem", 0, maxSize, "shared");
builder.addFunction("main", kSig_i_iii)
.addBody([
kExprGetLocal, 0,
kExprGetLocal, 1,
kExprGetLocal, 2,
kAtomicPrefix,
- wasmExpression])
+ wasmExpression, alignment, offset])
.exportAs("main");
// Instantiate module, get function exports
let module = new WebAssembly.Module(builder.toBuffer());
- let instance = (new WebAssembly.Instance(module,
- {m: {imported_mem: memory}}));
+ let instance = new WebAssembly.Instance(module,
+ {m: {imported_mem: memory}});
return instance.exports.main;
}
+function GetAtomicLoadFunction(wasmExpression, alignment, offset) {
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory("m", "imported_mem", 0, maxSize, "shared");
+ builder.addFunction("main", kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kAtomicPrefix,
+ wasmExpression, alignment, offset])
+ .exportAs("main");
+
+ // Instantiate module, get function exports
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module,
+ {m: {imported_mem: memory}});
+ return instance.exports.main;
+}
+
+function GetAtomicStoreFunction(wasmExpression, alignment, offset) {
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory("m", "imported_mem", 0, maxSize, "shared");
+ builder.addFunction("main", kSig_v_ii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kAtomicPrefix,
+ wasmExpression, alignment, offset])
+ .exportAs("main");
+
+ // Instantiate module, get function exports
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module,
+ {m: {imported_mem: memory}});
+ return instance.exports.main;
+}
function VerifyBoundsCheck(func, memtype_size) {
const kPageSize = 65536;
@@ -108,109 +142,109 @@ function Test8Op(operation, func) {
(function TestAtomicAdd() {
print("TestAtomicAdd");
- let wasmAdd = GetAtomicBinOpFunction(kExprI32AtomicAdd);
+ let wasmAdd = GetAtomicBinOpFunction(kExprI32AtomicAdd, 2, 0);
Test32Op(Add, wasmAdd);
})();
(function TestAtomicAdd16U() {
print("TestAtomicAdd16U");
- let wasmAdd = GetAtomicBinOpFunction(kExprI32AtomicAdd16U);
+ let wasmAdd = GetAtomicBinOpFunction(kExprI32AtomicAdd16U, 1, 0);
Test16Op(Add, wasmAdd);
})();
(function TestAtomicAdd8U() {
print("TestAtomicAdd8U");
- let wasmAdd = GetAtomicBinOpFunction(kExprI32AtomicAdd8U);
+ let wasmAdd = GetAtomicBinOpFunction(kExprI32AtomicAdd8U, 0, 0);
Test8Op(Add, wasmAdd);
})();
(function TestAtomicSub() {
print("TestAtomicSub");
- let wasmSub = GetAtomicBinOpFunction(kExprI32AtomicSub);
+ let wasmSub = GetAtomicBinOpFunction(kExprI32AtomicSub, 2, 0);
Test32Op(Sub, wasmSub);
})();
(function TestAtomicSub16U() {
print("TestAtomicSub16U");
- let wasmSub = GetAtomicBinOpFunction(kExprI32AtomicSub16U);
+ let wasmSub = GetAtomicBinOpFunction(kExprI32AtomicSub16U, 1, 0);
Test16Op(Sub, wasmSub);
})();
(function TestAtomicSub8U() {
print("TestAtomicSub8U");
- let wasmSub = GetAtomicBinOpFunction(kExprI32AtomicSub8U);
+ let wasmSub = GetAtomicBinOpFunction(kExprI32AtomicSub8U, 0, 0);
Test8Op(Sub, wasmSub);
})();
(function TestAtomicAnd() {
print("TestAtomicAnd");
- let wasmAnd = GetAtomicBinOpFunction(kExprI32AtomicAnd);
+ let wasmAnd = GetAtomicBinOpFunction(kExprI32AtomicAnd, 2, 0);
Test32Op(And, wasmAnd);
})();
(function TestAtomicAnd16U() {
print("TestAtomicAnd16U");
- let wasmAnd = GetAtomicBinOpFunction(kExprI32AtomicAnd16U);
+ let wasmAnd = GetAtomicBinOpFunction(kExprI32AtomicAnd16U, 1, 0);
Test16Op(And, wasmAnd);
})();
(function TestAtomicAnd8U() {
print("TestAtomicAnd8U");
- let wasmAnd = GetAtomicBinOpFunction(kExprI32AtomicAnd8U);
+ let wasmAnd = GetAtomicBinOpFunction(kExprI32AtomicAnd8U, 0, 0);
Test8Op(And, wasmAnd);
})();
(function TestAtomicOr() {
print("TestAtomicOr");
- let wasmOr = GetAtomicBinOpFunction(kExprI32AtomicOr);
+ let wasmOr = GetAtomicBinOpFunction(kExprI32AtomicOr, 2, 0);
Test32Op(Or, wasmOr);
})();
(function TestAtomicOr16U() {
print("TestAtomicOr16U");
- let wasmOr = GetAtomicBinOpFunction(kExprI32AtomicOr16U);
+ let wasmOr = GetAtomicBinOpFunction(kExprI32AtomicOr16U, 1, 0);
Test16Op(Or, wasmOr);
})();
(function TestAtomicOr8U() {
print("TestAtomicOr8U");
- let wasmOr = GetAtomicBinOpFunction(kExprI32AtomicOr8U);
+ let wasmOr = GetAtomicBinOpFunction(kExprI32AtomicOr8U, 0, 0);
Test8Op(Or, wasmOr);
})();
(function TestAtomicXor() {
print("TestAtomicXor");
- let wasmXor = GetAtomicBinOpFunction(kExprI32AtomicXor);
+ let wasmXor = GetAtomicBinOpFunction(kExprI32AtomicXor, 2, 0);
Test32Op(Xor, wasmXor);
})();
(function TestAtomicXor16U() {
print("TestAtomicXor16U");
- let wasmXor = GetAtomicBinOpFunction(kExprI32AtomicXor16U);
+ let wasmXor = GetAtomicBinOpFunction(kExprI32AtomicXor16U, 1, 0);
Test16Op(Xor, wasmXor);
})();
(function TestAtomicXor8U() {
print("TestAtomicXor8U");
- let wasmXor = GetAtomicBinOpFunction(kExprI32AtomicXor8U);
+ let wasmXor = GetAtomicBinOpFunction(kExprI32AtomicXor8U, 0, 0);
Test8Op(Xor, wasmXor);
})();
(function TestAtomicExchange() {
print("TestAtomicExchange");
- let wasmExchange = GetAtomicBinOpFunction(kExprI32AtomicExchange);
+ let wasmExchange = GetAtomicBinOpFunction(kExprI32AtomicExchange, 2, 0);
Test32Op(Exchange, wasmExchange);
})();
(function TestAtomicExchange16U() {
print("TestAtomicExchange16U");
- let wasmExchange = GetAtomicBinOpFunction(kExprI32AtomicExchange16U);
+ let wasmExchange = GetAtomicBinOpFunction(kExprI32AtomicExchange16U, 1, 0);
Test16Op(Exchange, wasmExchange);
})();
(function TestAtomicExchange8U() {
print("TestAtomicExchange8U");
- let wasmExchange = GetAtomicBinOpFunction(kExprI32AtomicExchange8U);
+ let wasmExchange = GetAtomicBinOpFunction(kExprI32AtomicExchange8U, 0, 0);
Test8Op(Exchange, wasmExchange);
})();
@@ -232,7 +266,7 @@ function TestCmpExchange(func, buffer, params, size) {
(function TestAtomicCompareExchange() {
print("TestAtomicCompareExchange");
let wasmCmpExchange =
- GetAtomicCmpExchangeFunction(kExprI32AtomicCompareExchange);
+ GetAtomicCmpExchangeFunction(kExprI32AtomicCompareExchange, 2, 0);
let i32 = new Uint32Array(memory.buffer);
let params = [0x00000001, 0x00000555, 0x00099999, 0xffffffff];
TestCmpExchange(wasmCmpExchange, i32, params, kMemtypeSize32);
@@ -241,7 +275,7 @@ function TestCmpExchange(func, buffer, params, size) {
(function TestAtomicCompareExchange16U() {
print("TestAtomicCompareExchange16U");
let wasmCmpExchange =
- GetAtomicCmpExchangeFunction(kExprI32AtomicCompareExchange16U);
+ GetAtomicCmpExchangeFunction(kExprI32AtomicCompareExchange16U, 1, 0);
let i16 = new Uint16Array(memory.buffer);
let params = [0x0001, 0x0555, 0x9999];
TestCmpExchange(wasmCmpExchange, i16, params, kMemtypeSize16);
@@ -250,8 +284,130 @@ function TestCmpExchange(func, buffer, params, size) {
(function TestAtomicCompareExchange8U() {
print("TestAtomicCompareExchange8U");
let wasmCmpExchange =
- GetAtomicCmpExchangeFunction(kExprI32AtomicCompareExchange8U);
+ GetAtomicCmpExchangeFunction(kExprI32AtomicCompareExchange8U, 0, 0);
let i8 = new Uint8Array(memory.buffer);
let params = [0x01, 0x0d, 0xf9];
TestCmpExchange(wasmCmpExchange, i8, params, kMemtypeSize8);
})();
+
+function TestLoad(func, buffer, value, size) {
+ for (let i = 0; i < buffer.length; i++) {
+ buffer[i] = value;
+ assertEquals(value, func(i * size) >>> 0);
+ }
+ VerifyBoundsCheck(func, size);
+}
+
+(function TestAtomicLoad() {
+ print("TestAtomicLoad");
+ let wasmLoad = GetAtomicLoadFunction(kExprI32AtomicLoad, 2, 0);
+ let i32 = new Uint32Array(memory.buffer);
+ let value = 0xacedaced;
+ TestLoad(wasmLoad, i32, value, kMemtypeSize32);
+})();
+
+(function TestAtomicLoad16U() {
+ print("TestAtomicLoad16U");
+ let wasmLoad = GetAtomicLoadFunction(kExprI32AtomicLoad16U, 1, 0);
+ let i16 = new Uint16Array(memory.buffer);
+ let value = 0xaced;
+ TestLoad(wasmLoad, i16, value, kMemtypeSize16);
+})();
+
+(function TestAtomicLoad8U() {
+ print("TestAtomicLoad8U");
+ let wasmLoad = GetAtomicLoadFunction(kExprI32AtomicLoad8U, 0, 0);
+ let i8 = new Uint8Array(memory.buffer);
+ let value = 0xac;
+ TestLoad(wasmLoad, i8, value, kMemtypeSize8);
+})();
+
+function TestStore(func, buffer, value, size) {
+ for (let i = 0; i < buffer.length; i++) {
+ func(i * size, value)
+ assertEquals(value, buffer[i]);
+ }
+ VerifyBoundsCheck(func, size);
+}
+
+(function TestAtomicStore() {
+ print("TestAtomicStore");
+ let wasmStore = GetAtomicStoreFunction(kExprI32AtomicStore, 2, 0);
+ let i32 = new Uint32Array(memory.buffer);
+ let value = 0xacedaced;
+ TestStore(wasmStore, i32, value, kMemtypeSize32);
+})();
+
+(function TestAtomicStore16U() {
+ print("TestAtomicStore16U");
+ let wasmStore = GetAtomicStoreFunction(kExprI32AtomicStore16U, 1, 0);
+ let i16 = new Uint16Array(memory.buffer);
+ let value = 0xaced;
+ TestStore(wasmStore, i16, value, kMemtypeSize16);
+})();
+
+(function TestAtomicStore8U() {
+ print("TestAtomicStore8U");
+ let wasmStore = GetAtomicStoreFunction(kExprI32AtomicStore8U, 0, 0);
+ let i8 = new Uint8Array(memory.buffer);
+ let value = 0xac;
+ TestCmpExchange(wasmStore, i8, value, kMemtypeSize8);
+})();
+
+(function TestAtomicLoadStoreOffset() {
+ print("TestAtomicLoadStoreOffset");
+ var builder = new WasmModuleBuilder();
+ let memory = new WebAssembly.Memory({
+ initial: 16, maximum: 128, shared: true});
+ builder.addImportedMemory("m", "imported_mem", 16, 128, "shared");
+ builder.addFunction("loadStore", kSig_i_v)
+ .addBody([
+ kExprI32Const, 16,
+ kExprI32Const, 20,
+ kAtomicPrefix,
+ kExprI32AtomicStore, 0, 0xFC, 0xFF, 0x3a,
+ kExprI32Const, 16,
+ kAtomicPrefix,
+ kExprI32AtomicLoad, 0, 0xFC, 0xFF, 0x3a])
+ .exportAs("loadStore");
+ builder.addFunction("storeOob", kSig_v_v)
+ .addBody([
+ kExprI32Const, 16,
+ kExprI32Const, 20,
+ kAtomicPrefix,
+ kExprI32AtomicStore, 0, 0xFC, 0xFF, 0xFF, 0x3a])
+ .exportAs("storeOob");
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = (new WebAssembly.Instance(module,
+ {m: {imported_mem: memory}}));
+ let buf = memory.buffer;
+ assertEquals(20, instance.exports.loadStore());
+ assertTraps(kTrapMemOutOfBounds, instance.exports.storeOob);
+})();
+
+(function TestAtomicOpinLoop() {
+ print("TestAtomicOpinLoop");
+ var builder = new WasmModuleBuilder();
+ let memory = new WebAssembly.Memory({
+ initial: 16, maximum: 128, shared: true});
+ builder.addImportedMemory("m", "imported_mem", 16, 128, "shared");
+ builder.addFunction("main", kSig_i_v)
+ .addBody([
+ kExprLoop, kWasmStmt,
+ kExprI32Const, 16,
+ kExprI32Const, 20,
+ kAtomicPrefix,
+ kExprI32AtomicStore, 2, 0,
+ kExprI32Const, 16,
+ kAtomicPrefix,
+ kExprI32AtomicLoad, 2, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprI32Const, 0
+ ])
+ .exportFunc();
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = (new WebAssembly.Instance(module,
+ {m: {imported_mem: memory}}));
+ assertEquals(20, instance.exports.main());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/disable-trap-handler.js b/deps/v8/test/mjsunit/wasm/disable-trap-handler.js
new file mode 100644
index 0000000000..3389ba8cad
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/disable-trap-handler.js
@@ -0,0 +1,9 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-wasm-trap-handler
+
+// This test makes sure that --no-wasm-trap-handler has the correct effect.
+
+assertFalse(%IsWasmTrapHandlerEnabled());
diff --git a/deps/v8/test/mjsunit/wasm/disallow-codegen.js b/deps/v8/test/mjsunit/wasm/disallow-codegen.js
new file mode 100644
index 0000000000..3374a9efd7
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/disallow-codegen.js
@@ -0,0 +1,104 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --allow-natives-syntax
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+let kReturnValue = 19;
+
+let buffer = (function CreateBuffer() {
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(1, 1, true);
+ builder.addFunction('main', kSig_i_v)
+ .addBody([kExprI32Const, kReturnValue])
+ .exportFunc();
+
+ return builder.toBuffer();
+})();
+
+%DisallowCodegenFromStrings(true);
+
+async function SyncTestOk() {
+ print('sync module compile (ok)...');
+ %DisallowCodegenFromStrings(false);
+ let module = new WebAssembly.Module(buffer);
+ assertInstanceof(module, WebAssembly.Module);
+}
+
+async function SyncTestFail() {
+ print('sync module compile (fail)...');
+ %DisallowCodegenFromStrings(true);
+ try {
+ let module = new WebAssembly.Module(buffer);
+ assertUnreachable();
+ } catch (e) {
+ print(" " + e);
+ assertInstanceof(e, WebAssembly.CompileError);
+ }
+}
+
+async function AsyncTestOk() {
+ print('async module compile (ok)...');
+ %DisallowCodegenFromStrings(false);
+ let promise = WebAssembly.compile(buffer);
+ assertPromiseResult(
+ promise, module => assertInstanceof(module, WebAssembly.Module));
+}
+
+async function AsyncTestFail() {
+ print('async module compile (fail)...');
+ %DisallowCodegenFromStrings(true);
+ try {
+ let m = await WebAssembly.compile(buffer);
+ assertUnreachable();
+ } catch (e) {
+ print(" " + e);
+ assertInstanceof(e, WebAssembly.CompileError);
+ }
+}
+
+async function StreamingTestOk() {
+ print('streaming module compile (ok)...');
+ // TODO(titzer): compileStreaming must be supplied by embedder.
+ // (and it takes a response, not a buffer)
+ %DisallowCodegenFromStrings(false);
+ if ("Function" != typeof WebAssembly.compileStreaming) {
+ print(" no embedder for streaming compilation");
+ return;
+ }
+ let promise = WebAssembly.compileStreaming(buffer);
+ assertPromiseResult(
+ promise, module => assertInstanceof(module, WebAssembly.Module));
+}
+
+async function StreamingTestFail() {
+ print('streaming module compile (fail)...');
+ %DisallowCodegenFromStrings(true);
+ // TODO(titzer): compileStreaming must be supplied by embedder.
+ // (and it takes a response, not a buffer)
+ if ("Function" != typeof WebAssembly.compileStreaming) {
+ print(" no embedder for streaming compilation");
+ return;
+ }
+ try {
+ let m = await WebAssembly.compileStreaming(buffer);
+ assertUnreachable();
+ } catch (e) {
+ print(" " + e);
+ assertInstanceof(e, WebAssembly.CompileError);
+ }
+}
+
+async function RunAll() {
+ await SyncTestOk();
+ await SyncTestFail();
+ await AsyncTestOk();
+ await AsyncTestFail();
+ await StreamingTestOk();
+ await StreamingTestFail();
+}
+
+assertPromiseResult(RunAll());
diff --git a/deps/v8/test/mjsunit/wasm/exceptions.js b/deps/v8/test/mjsunit/wasm/exceptions.js
index 8fcfd15ff4..74d8e7dfb5 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions.js
@@ -19,7 +19,7 @@ var test_throw = (function () {
kExprI32Const, 0,
kExprI32Ne,
kExprIf, kWasmStmt,
- kExprThrow, 0,
+ kExprThrow, 0,
kExprEnd,
kExprI32Const, 1
]).exportFunc();
@@ -36,8 +36,8 @@ assertEquals("function", typeof test_throw.exports.throw_if_param_not_zero);
// Test expected behavior of throws
assertEquals(1, test_throw.exports.throw_if_param_not_zero(0));
-assertWasmThrows([], function() { test_throw.exports.throw_if_param_not_zero(10) });
-assertWasmThrows([], function() { test_throw.exports.throw_if_param_not_zero(-1) });
+assertWasmThrows(0, [], function() { test_throw.exports.throw_if_param_not_zero(10) });
+assertWasmThrows(0, [], function() { test_throw.exports.throw_if_param_not_zero(-1) });
// Now that we know throwing works, we test catching the exceptions we raise.
var test_catch = (function () {
@@ -72,32 +72,315 @@ assertEquals("function", typeof test_catch.exports.simple_throw_catch_to_0_1);
assertEquals(0, test_catch.exports.simple_throw_catch_to_0_1(0));
assertEquals(1, test_catch.exports.simple_throw_catch_to_0_1(1));
+// Test that we can distinguish which exception was thrown.
+var test_catch_2 = (function () {
+ var builder = new WasmModuleBuilder();
+
+ builder.addException(kSig_v_v);
+ builder.addException(kSig_v_v);
+ builder.addException(kSig_v_v);
+ builder.addFunction("catch_different_exceptions", kSig_i_i)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprTry, kWasmI32,
+ kExprGetLocal, 0,
+ kExprI32Eqz,
+ kExprIf, kWasmStmt,
+ kExprThrow, 0,
+ kExprElse,
+ kExprGetLocal, 0,
+ kExprI32Const, 1,
+ kExprI32Eq,
+ kExprIf, kWasmStmt,
+ kExprThrow, 1,
+ kExprElse,
+ kExprThrow, 2,
+ kExprEnd,
+ kExprEnd,
+ kExprI32Const, 2,
+ kExprCatch, 0,
+ kExprI32Const, 3,
+ kExprEnd,
+ kExprCatch, 1,
+ kExprI32Const, 4,
+ kExprEnd
+ ]).exportFunc();
+ return builder.instantiate();
+})();
+
+assertFalse(test_catch_2 === undefined);
+assertFalse(test_catch_2 === null);
+assertFalse(test_catch_2 === 0);
+assertEquals("object", typeof test_catch_2.exports);
+assertEquals("function", typeof test_catch_2.exports.catch_different_exceptions);
+
+assertEquals(3, test_catch_2.exports.catch_different_exceptions(0));
+assertEquals(4, test_catch_2.exports.catch_different_exceptions(1));
+assertWasmThrows(2, [], function() { test_catch_2.exports.catch_different_exceptions(2) });
+
+// Test throwing an exception with multiple values.
+var test_throw_1_2 = (function() {
+ var builder = new WasmModuleBuilder();
+ builder.addException(kSig_v_ii);
+ builder.addFunction("throw_1_2", kSig_v_v)
+ .addBody([
+ kExprI32Const, 1,
+ kExprI32Const, 2,
+ kExprThrow, 0,
+ ]).exportFunc();
+
+ return builder.instantiate();
+})();
+
+assertFalse(test_throw_1_2 === undefined);
+assertFalse(test_throw_1_2 === null);
+assertFalse(test_throw_1_2 === 0);
+assertEquals("object", typeof test_throw_1_2.exports);
+assertEquals("function", typeof test_throw_1_2.exports.throw_1_2);
+
+assertWasmThrows(0, [0, 1, 0, 2], function() { test_throw_1_2.exports.throw_1_2(); });
+
+// Test throwing/catching the i32 parameter value.
+var test_throw_catch_param_i = (function () {
+ var builder = new WasmModuleBuilder();
+ builder.addException(kSig_v_i);
+ builder.addFunction("throw_catch_param", kSig_i_i)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprGetLocal, 0,
+ kExprThrow, 0,
+ kExprI32Const, 2,
+ kExprCatch, 0,
+ kExprReturn,
+ kExprEnd,
+ ]).exportFunc();
+
+ return builder.instantiate();
+})();
+
+assertFalse(test_throw_catch_param_i === undefined);
+assertFalse(test_throw_catch_param_i === null);
+assertFalse(test_throw_catch_param_i === 0);
+assertEquals("object", typeof test_throw_catch_param_i.exports);
+assertEquals("function",
+ typeof test_throw_catch_param_i.exports.throw_catch_param);
+
+assertEquals(0, test_throw_catch_param_i.exports.throw_catch_param(0));
+assertEquals(1, test_throw_catch_param_i.exports.throw_catch_param(1));
+assertEquals(10, test_throw_catch_param_i.exports.throw_catch_param(10));
+
+// Test the encoding of a thrown exception with an integer exception.
+
+var test_throw_param_i = (function () {
+ var builder = new WasmModuleBuilder();
+ builder.addException(kSig_v_i);
+ builder.addFunction("throw_param", kSig_v_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprThrow, 0,
+ ]).exportFunc();
+
+ return builder.instantiate();
+})();
+
+assertFalse(test_throw_param_i === undefined);
+assertFalse(test_throw_param_i === null);
+assertFalse(test_throw_param_i === 0);
+assertEquals("object", typeof test_throw_param_i.exports);
+assertEquals("function",
+ typeof test_throw_param_i.exports.throw_param);
+
+assertWasmThrows(0, [0, 5], function() { test_throw_param_i.exports.throw_param(5); });
+assertWasmThrows(0, [6, 31026],
+ function() { test_throw_param_i.exports.throw_param(424242); });
+
+// Test throwing/catching the f32 parameter value.
+var test_throw_catch_param_f = (function () {
+ var builder = new WasmModuleBuilder();
+ builder.addException(kSig_v_f);
+ builder.addFunction("throw_catch_param", kSig_f_f)
+ .addBody([
+ kExprTry, kWasmF32,
+ kExprGetLocal, 0,
+ kExprThrow, 0,
+ kExprF32Const, 0, 0, 0, 0,
+ kExprCatch, 0,
+ kExprReturn,
+ kExprEnd,
+ ]).exportFunc();
+
+ return builder.instantiate();
+})();
+
+assertFalse(test_throw_catch_param_f === undefined);
+assertFalse(test_throw_catch_param_f === null);
+assertFalse(test_throw_catch_param_f === 0);
+assertEquals("object", typeof test_throw_catch_param_f.exports);
+assertEquals("function",
+ typeof test_throw_catch_param_f.exports.throw_catch_param);
+
+assertEquals(5.0, test_throw_catch_param_f.exports.throw_catch_param(5.0));
+assertEquals(10.5, test_throw_catch_param_f.exports.throw_catch_param(10.5));
+
+// Test the encoding of a thrown exception with a float value.
+
+var test_throw_param_f = (function () {
+ var builder = new WasmModuleBuilder();
+ builder.addException(kSig_v_f);
+ builder.addFunction("throw_param", kSig_v_f)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprThrow, 0,
+ ]).exportFunc();
+
+ return builder.instantiate();
+})();
+
+assertFalse(test_throw_param_f === undefined);
+assertFalse(test_throw_param_f === null);
+assertFalse(test_throw_param_f === 0);
+assertEquals("object", typeof test_throw_param_f.exports);
+assertEquals("function",
+ typeof test_throw_param_f.exports.throw_param);
+
+assertWasmThrows(0, [16544, 0],
+ function() { test_throw_param_f.exports.throw_param(5.0); });
+assertWasmThrows(0, [16680, 0],
+ function() { test_throw_param_f.exports.throw_param(10.5); });
+
+// Test throwing/catching an I64 value
+var test_throw_catch_param_l = (function () {
+ var builder = new WasmModuleBuilder();
+ builder.addException(kSig_v_l);
+ builder.addFunction("throw_catch_param", kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprI64UConvertI32,
+ kExprSetLocal, 1,
+ kExprTry, kWasmI32,
+ kExprGetLocal, 1,
+ kExprThrow, 0,
+ kExprI32Const, 2,
+ kExprCatch, 0,
+ kExprGetLocal, 1,
+ kExprI64Eq,
+ kExprIf, kWasmI32,
+ kExprI32Const, 1,
+ kExprElse,
+ kExprI32Const, 0,
+ kExprEnd,
+ // TODO(kschimpf): Why is this return necessary?
+ kExprReturn,
+ kExprEnd,
+ ]).addLocals({i64_count: 1}).exportFunc();
+
+ return builder.instantiate();
+})();
+
+assertFalse(test_throw_catch_param_l === undefined);
+assertFalse(test_throw_catch_param_l === null);
+assertFalse(test_throw_catch_param_l === 0);
+assertEquals("object", typeof test_throw_catch_param_l.exports);
+assertEquals("function",
+ typeof test_throw_catch_param_l.exports.throw_catch_param);
+
+assertEquals(1, test_throw_catch_param_l.exports.throw_catch_param(5));
+assertEquals(1, test_throw_catch_param_l.exports.throw_catch_param(0));
+assertEquals(1, test_throw_catch_param_l.exports.throw_catch_param(-1));
+
+// Test the encoding of a thrown exception with an I64 value.
+
+var test_throw_param_l = (function () {
+ var builder = new WasmModuleBuilder();
+ builder.addException(kSig_v_l);
+ builder.addFunction("throw_param", kSig_v_ii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprI64UConvertI32,
+ kExprI64Const, 32,
+ kExprI64Shl,
+ kExprGetLocal, 1,
+ kExprI64UConvertI32,
+ kExprI64Ior,
+ kExprThrow, 0
+ ]).exportFunc();
+
+ return builder.instantiate();
+})();
+
+assertFalse(test_throw_param_l === undefined);
+assertFalse(test_throw_param_l === null);
+assertFalse(test_throw_param_l === 0);
+assertEquals("object", typeof test_throw_param_l.exports);
+assertEquals("function",
+ typeof test_throw_param_l.exports.throw_param);
+
+assertWasmThrows(0, [0, 10, 0, 5],
+ function() { test_throw_param_l.exports.throw_param(10, 5); });
+assertWasmThrows(0, [65535, 65535, 0, 13],
+ function() { test_throw_param_l.exports.throw_param(-1, 13); });
+
+// Test throwing/catching the F64 parameter value
+var test_throw_catch_param_d = (function () {
+ var builder = new WasmModuleBuilder();
+ builder.addException(kSig_v_d);
+ builder.addFunction("throw_catch_param", kSig_d_d)
+ .addBody([
+ kExprTry, kWasmF64,
+ kExprGetLocal, 0,
+ kExprThrow, 0,
+ kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0,
+ kExprCatch, 0,
+ kExprReturn,
+ kExprEnd,
+ ]).exportFunc();
+
+ return builder.instantiate();
+})();
+
+assertFalse(test_throw_catch_param_d === undefined);
+assertFalse(test_throw_catch_param_d === null);
+assertFalse(test_throw_catch_param_d === 0);
+assertEquals("object", typeof test_throw_catch_param_d.exports);
+assertEquals("function",
+ typeof test_throw_catch_param_d.exports.throw_catch_param);
+
+assertEquals(5.0, test_throw_catch_param_d.exports.throw_catch_param(5.0));
+assertEquals(10.5, test_throw_catch_param_d.exports.throw_catch_param(10.5));
+
+// Test the encoding of a thrown exception with an f64 value.
+
+var test_throw_param_d = (function () {
+ var builder = new WasmModuleBuilder();
+ builder.addException(kSig_v_d);
+ builder.addFunction("throw_param", kSig_v_f)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprF64ConvertF32,
+ kExprThrow, 0
+ ]).exportFunc();
+
+ return builder.instantiate();
+})();
+
+assertFalse(test_throw_param_d === undefined);
+assertFalse(test_throw_param_d === null);
+assertFalse(test_throw_param_d === 0);
+assertEquals("object", typeof test_throw_param_d.exports);
+assertEquals("function",
+ typeof test_throw_param_d.exports.throw_param);
+
+assertWasmThrows(0, [16404, 0, 0, 0],
+ function() { test_throw_param_d.exports.throw_param(5.0); });
+assertWasmThrows(0, [16739, 4816, 0, 0],
+ function() { test_throw_param_d.exports.throw_param(10000000.5); });
+
/* TODO(kschimpf) Convert these tests to work for the proposed exceptions.
// The following methods do not attempt to catch the exception they raise.
var test_throw = (function () {
var builder = new WasmModuleBuilder();
- builder.addFunction("throw_param_if_not_zero", kSig_i_i)
- .addBody([
- kExprGetLocal, 0,
- kExprI32Const, 0,
- kExprI32Ne,
- kExprIf, kWasmStmt,
- kExprGetLocal, 0,
- kExprThrow,
- kExprEnd,
- kExprI32Const, 1
- ])
- .exportFunc()
-
- builder.addFunction("throw_20", kSig_v_v)
- .addBody([
- kExprI32Const, 20,
- kExprThrow,
- ])
- .exportFunc()
-
builder.addFunction("throw_expr_with_params", kSig_v_ddi)
.addBody([
// p2 * (p0 + min(p0, p1))|0 - 20
@@ -123,14 +406,9 @@ assertFalse(test_throw === undefined);
assertFalse(test_throw === null);
assertFalse(test_throw === 0);
assertEquals("object", typeof test_throw.exports);
-assertEquals("function", typeof test_throw.exports.throw_param_if_not_zero);
-assertEquals("function", typeof test_throw.exports.throw_20);
assertEquals("function", typeof test_throw.exports.throw_expr_with_params);
assertEquals(1, test_throw.exports.throw_param_if_not_zero(0));
-assertWasmThrows(10, function() { test_throw.exports.throw_param_if_not_zero(10) });
-assertWasmThrows(-1, function() { test_throw.exports.throw_param_if_not_zero(-1) });
-assertWasmThrows(20, test_throw.exports.throw_20);
assertWasmThrows(
-8, function() { test_throw.exports.throw_expr_with_params(1.5, 2.5, 4); });
assertWasmThrows(
diff --git a/deps/v8/test/mjsunit/wasm/expose-wasm.js b/deps/v8/test/mjsunit/wasm/expose-wasm.js
new file mode 100644
index 0000000000..836c619025
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/expose-wasm.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --noexpose-wasm
+
+assertThrows(() => { let x = WebAssembly.compile; });
diff --git a/deps/v8/test/mjsunit/wasm/js-api.js b/deps/v8/test/mjsunit/wasm/js-api.js
index 352f7caefa..952c6296cd 100644
--- a/deps/v8/test/mjsunit/wasm/js-api.js
+++ b/deps/v8/test/mjsunit/wasm/js-api.js
@@ -16,7 +16,7 @@ function unexpectedFail(error) {
}
function assertEq(val, expected) {
- assertEquals(expected, val);
+ assertSame(expected, val);
}
function assertArrayBuffer(val, expected) {
assertTrue(val instanceof ArrayBuffer);
@@ -512,13 +512,40 @@ assertTrue(buf !== mem.buffer);
assertEq(buf.byteLength, 0);
buf = mem.buffer;
assertEq(buf.byteLength, kPageSize);
-assertEq(mem.grow(1), 1);
+assertEq(mem.grow(1, 23), 1);
+assertTrue(buf !== mem.buffer);
+assertEq(buf.byteLength, 0);
+buf = mem.buffer;
+assertEq(buf.byteLength, 2 * kPageSize);
+assertEq(mem.grow(), 2);
assertTrue(buf !== mem.buffer);
assertEq(buf.byteLength, 0);
buf = mem.buffer;
assertEq(buf.byteLength, 2 * kPageSize);
assertErrorMessage(() => mem.grow(1), Error, /failed to grow memory/);
+assertErrorMessage(() => mem.grow(Infinity), Error, /failed to grow memory/);
+assertErrorMessage(() => mem.grow(-Infinity), Error, /failed to grow memory/);
assertEq(buf, mem.buffer);
+let throwOnValueOf = {
+ valueOf: function() {
+ throw Error('throwOnValueOf')
+ }
+};
+assertErrorMessage(() => mem.grow(throwOnValueOf), Error, /throwOnValueOf/);
+assertEq(buf, mem.buffer);
+let zero_wrapper = {
+ valueOf: function() {
+ ++this.call_counter;
+ return 0;
+ },
+ call_counter: 0
+};
+assertEq(mem.grow(zero_wrapper), 2);
+assertEq(zero_wrapper.call_counter, 1);
+assertTrue(buf !== mem.buffer);
+assertEq(buf.byteLength, 0);
+buf = mem.buffer;
+assertEq(buf.byteLength, 2 * kPageSize);
let empty_mem = new Memory({initial: 0, maximum: 5});
let empty_buf = empty_mem.buffer;
@@ -571,8 +598,9 @@ assertTrue(new Table({initial: 1, element: 'anyfunc'}) instanceof Table);
assertTrue(new Table({initial: 1.5, element: 'anyfunc'}) instanceof Table);
assertTrue(
new Table({initial: 1, maximum: 1.5, element: 'anyfunc'}) instanceof Table);
-// TODO:maximum assertTrue(new Table({initial:1, maximum:Math.pow(2,32)-1,
-// element:"anyfunc"}) instanceof Table);
+assertTrue(
+ new Table({initial: 1, maximum: Math.pow(2, 32) - 1, element: 'anyfunc'})
+ instanceof Table);
// 'WebAssembly.Table.prototype' data property
let tableProtoDesc = Object.getOwnPropertyDescriptor(Table, 'prototype');
@@ -623,15 +651,17 @@ assertErrorMessage(
() => get.call(), TypeError, /called on incompatible undefined/);
assertErrorMessage(
() => get.call({}), TypeError, /called on incompatible Object/);
+assertEq(get.call(tbl1), null);
assertEq(get.call(tbl1, 0), null);
+assertEq(get.call(tbl1, 0, Infinity), null);
assertEq(get.call(tbl1, 1), null);
assertEq(get.call(tbl1, 1.5), null);
assertErrorMessage(() => get.call(tbl1, 2), RangeError, /bad Table get index/);
assertErrorMessage(
() => get.call(tbl1, 2.5), RangeError, /bad Table get index/);
assertErrorMessage(() => get.call(tbl1, -1), RangeError, /bad Table get index/);
-// TODO assertErrorMessage(() => get.call(tbl1, Math.pow(2,33)), RangeError,
-// /bad Table get index/);
+assertErrorMessage(
+ () => get.call(tbl1, Math.pow(2, 33)), RangeError, /bad Table get index/);
assertErrorMessage(
() => get.call(tbl1, {valueOf() { throw new Error('hi') }}), Error, 'hi');
@@ -651,15 +681,26 @@ assertErrorMessage(
assertErrorMessage(
() => set.call(tbl1, 0), TypeError, /requires more than 1 argument/);
assertErrorMessage(
+ () => set.call(tbl1, undefined), TypeError,
+ /requires more than 1 argument/);
+assertErrorMessage(
() => set.call(tbl1, 2, null), RangeError, /bad Table set index/);
assertErrorMessage(
() => set.call(tbl1, -1, null), RangeError, /bad Table set index/);
-// TODO assertErrorMessage(() => set.call(tbl1, Math.pow(2,33), null),
-// RangeError, /bad Table set index/);
+assertErrorMessage(
+ () => set.call(tbl1, Math.pow(2, 33), null), RangeError,
+ /bad Table set index/);
+assertErrorMessage(
+ () => set.call(tbl1, Infinity, null), RangeError, /bad Table set index/);
+assertErrorMessage(
+ () => set.call(tbl1, -Infinity, null), RangeError, /bad Table set index/);
assertErrorMessage(
() => set.call(tbl1, 0, undefined), TypeError,
/can only assign WebAssembly exported functions to Table/);
assertErrorMessage(
+ () => set.call(tbl1, undefined, undefined), TypeError,
+ /can only assign WebAssembly exported functions to Table/);
+assertErrorMessage(
() => set.call(tbl1, 0, {}), TypeError,
/can only assign WebAssembly exported functions to Table/);
assertErrorMessage(() => set.call(tbl1, 0, function() {
@@ -672,6 +713,7 @@ assertErrorMessage(
'hai');
assertEq(set.call(tbl1, 0, null), undefined);
assertEq(set.call(tbl1, 1, null), undefined);
+assertEq(set.call(tbl1, undefined, null), undefined);
// 'WebAssembly.Table.prototype.grow' data property
let tblGrowDesc = Object.getOwnPropertyDescriptor(tableProto, 'grow');
@@ -693,11 +735,21 @@ assertErrorMessage(
/bad Table grow delta/);
var tbl = new Table({element: 'anyfunc', initial: 1, maximum: 2});
assertEq(tbl.length, 1);
+assertErrorMessage(
+ () => tbl.grow(Infinity), RangeError, /failed to grow table/);
+assertErrorMessage(
+ () => tbl.grow(-Infinity), RangeError, /failed to grow table/);
assertEq(tbl.grow(0), 1);
assertEq(tbl.length, 1);
-assertEq(tbl.grow(1), 1);
+assertEq(tbl.grow(1, 4), 1);
+assertEq(tbl.length, 2);
+assertEq(tbl.grow(), 2);
assertEq(tbl.length, 2);
assertErrorMessage(() => tbl.grow(1), Error, /failed to grow table/);
+assertErrorMessage(
+ () => tbl.grow(Infinity), RangeError, /failed to grow table/);
+assertErrorMessage(
+ () => tbl.grow(-Infinity), RangeError, /failed to grow table/);
// 'WebAssembly.validate' function
assertErrorMessage(() => WebAssembly.validate(), TypeError);
@@ -841,3 +893,17 @@ assertInstantiateSuccess(importingModuleBinary, {'': {f: () => {}}});
assertInstantiateSuccess(importingModuleBinary.buffer, {'': {f: () => {}}});
assertInstantiateSuccess(
memoryImportingModuleBinary, {'': {'my_memory': scratch_memory}});
+
+(function TestSubclassing() {
+ class M extends WebAssembly.Module { }
+ assertThrows(() => new M());
+
+ class I extends WebAssembly.Instance { }
+ assertThrows(() => new I());
+
+ class T extends WebAssembly.Table { }
+ assertThrows(() => new T());
+
+ class Y extends WebAssembly.Memory { }
+ assertThrows(() => new Y());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/memory-external-call.js b/deps/v8/test/mjsunit/wasm/memory-external-call.js
new file mode 100644
index 0000000000..0095ba1e7d
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/memory-external-call.js
@@ -0,0 +1,149 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let initialMemoryPages = 1;
+let maximumMemoryPages = 5;
+let other_fn_idx = 0;
+
+// This builder can be used to generate a module with memory + load/store
+// functions and/or an additional imported function.
+function generateBuilder(add_memory, import_sig) {
+ let builder = new WasmModuleBuilder();
+ if (import_sig) {
+ // Add the import if we expect a module builder with imported functions.
+ let idx = builder.addImport('import_module', 'other_module_fn', import_sig);
+ // The imported function should always have index 0. With this assertion we
+ // verify that we can use other_fn_idx to refer to this function.
+ assertEquals(idx, other_fn_idx)
+ }
+ if (add_memory) {
+ // Add the memory if we expect a module builder with memory and load/store.
+ builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
+ builder.addFunction('load', kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .exportFunc();
+ builder.addFunction('store', kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0,
+ kExprGetLocal, 1
+ ])
+ .exportFunc();
+ }
+ return builder;
+}
+
+// This test verifies that when a Wasm module without memory invokes a function
+// imported from another module that has memory, the second module reads its own
+// memory and returns the expected value.
+(function TestExternalCallBetweenTwoWasmModulesWithoutAndWithMemory() {
+ print('TestExternalCallBetweenTwoWasmModulesWithoutAndWithMemory');
+
+ let first_module = generateBuilder(add_memory = false, import_sig = kSig_i_i);
+ // Function to invoke the imported function and add 1 to the result.
+ first_module.addFunction('plus_one', kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0, // -
+ kExprCallFunction, other_fn_idx, // call the imported function
+ kExprI32Const, 1, // -
+ kExprI32Add, // add 1 to the result
+ kExprReturn // -
+ ])
+ .exportFunc();
+ let second_module =
+ generateBuilder(add_memory = true, import_sig = undefined);
+
+ let index = kPageSize - 4;
+ let second_value = 2222;
+ // Instantiate the instances.
+ let second_instance = second_module.instantiate();
+ let first_instance = first_module.instantiate(
+ {import_module: {other_module_fn: second_instance.exports.load}});
+ // Write the values in the second instance.
+ second_instance.exports.store(index, second_value);
+ assertEquals(second_value, second_instance.exports.load(index));
+ // Verify that the value is correct when passing from the imported function.
+ assertEquals(second_value + 1, first_instance.exports.plus_one(index));
+})();
+
+// This test verifies that when a Wasm module with memory invokes a function
+// imported from another module that also has memory, the second module reads
+// its own memory and returns the expected value.
+(function TestExternalCallBetweenTwoWasmModulesWithMemory() {
+ print('TestExternalCallBetweenTwoWasmModulesWithMemory');
+
+ let first_module = generateBuilder(add_memory = true, import_sig = kSig_i_i);
+ // Function to invoke the imported function and add 1 to the result.
+ first_module.addFunction('plus_one', kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0, // -
+ kExprCallFunction, other_fn_idx, // call the imported function
+ kExprI32Const, 1, // -
+ kExprI32Add, // add 1 to the result
+ kExprReturn // -
+ ])
+ .exportFunc();
+ let second_module =
+ generateBuilder(add_memory = true, import_sig = undefined);
+
+ let index = kPageSize - 4;
+ let first_value = 1111;
+ let second_value = 2222;
+ // Instantiate the instances.
+ let second_instance = second_module.instantiate();
+ let first_instance = first_module.instantiate(
+ {import_module: {other_module_fn: second_instance.exports.load}});
+ // Write the values in the two instances.
+ first_instance.exports.store(index, first_value);
+ second_instance.exports.store(index, second_value);
+ // Verify that the values were stored to memory.
+ assertEquals(first_value, first_instance.exports.load(index));
+ assertEquals(second_value, second_instance.exports.load(index));
+ // Verify that the value is correct when passing from the imported function.
+ assertEquals(second_value + 1, first_instance.exports.plus_one(index));
+})();
+
+// This test verifies that the correct memory is accessed after returning
+// from a function imported from another module that also has memory.
+(function TestCorrectMemoryAccessedAfterReturningFromExternalCall() {
+ print('TestCorrectMemoryAccessedAfterReturningFromExternalCall');
+
+ let first_module = generateBuilder(add_memory = true, import_sig = kSig_i_ii);
+ // Function to invoke the imported function and add 1 to the result.
+ first_module.addFunction('sandwich', kSig_i_iii)
+ .addBody([
+ kExprGetLocal, 0, // param0 (index)
+ kExprGetLocal, 1, // param1 (first_value)
+ kExprI32StoreMem, 0, 0, // store value in first_instance
+ kExprGetLocal, 0, // param0 (index)
+ kExprGetLocal, 2, // param2 (second_value)
+ kExprCallFunction, other_fn_idx, // call the imported function
+ kExprDrop, // drop the return value
+ kExprGetLocal, 0, // param0 (index)
+ kExprI32LoadMem, 0, 0, // load from first_instance
+ kExprReturn // -
+ ])
+ .exportFunc();
+ let second_module =
+ generateBuilder(add_memory = true, import_sig = undefined);
+
+ let index = kPageSize - 4;
+ let first_value = 1111;
+ let second_value = 2222;
+ // Instantiate the instances.
+ let second_instance = second_module.instantiate();
+ let first_instance = first_module.instantiate(
+ {import_module: {other_module_fn: second_instance.exports.store}});
+ // Call the sandwich function and check that it returns the correct value.
+ assertEquals(
+ first_value,
+ first_instance.exports.sandwich(index, first_value, second_value));
+ // Verify that the values are correct in both memories.
+ assertEquals(first_value, first_instance.exports.load(index));
+ assertEquals(second_value, second_instance.exports.load(index));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/module-memory.js b/deps/v8/test/mjsunit/wasm/module-memory.js
index fdab182665..f5b5981436 100644
--- a/deps/v8/test/mjsunit/wasm/module-memory.js
+++ b/deps/v8/test/mjsunit/wasm/module-memory.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --expose-gc --stress-compaction
+// Flags: --expose-wasm --expose-gc --stress-compaction --allow-natives-syntax
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
@@ -162,8 +162,12 @@ function testOOBThrows() {
for (offset = 65534; offset < 66536; offset++) {
+ const trap_count = %GetWasmRecoveredTrapCount();
assertTraps(kTrapMemOutOfBounds, read);
assertTraps(kTrapMemOutOfBounds, write);
+ if (%IsWasmTrapHandlerEnabled()) {
+ assertEquals(trap_count + 2, %GetWasmRecoveredTrapCount());
+ }
}
}
diff --git a/deps/v8/test/mjsunit/wasm/shared-memory.js b/deps/v8/test/mjsunit/wasm/shared-memory.js
index 98ebd57e79..fa51a8307f 100644
--- a/deps/v8/test/mjsunit/wasm/shared-memory.js
+++ b/deps/v8/test/mjsunit/wasm/shared-memory.js
@@ -4,6 +4,9 @@
// Flags: --experimental-wasm-threads
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
function assertMemoryIsValid(memory) {
assertSame(WebAssembly.Memory.prototype, memory.__proto__);
assertSame(WebAssembly.Memory, memory.constructor);
@@ -50,3 +53,51 @@ function assertMemoryIsValid(memory) {
assertThrows(() => new WebAssembly.Memory({initial: 0, shared: true}),
TypeError);
})();
+
+(function TestCompileWithUndefinedShared() {
+ print("TestCompileWithUndefinedShared");
+ let memory = new WebAssembly.Memory({
+ initial: 0, maximum: 10, shared: true});
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory("m", "imported_mem", 0, undefined, "shared");
+ assertThrows(() => new WebAssembly.Module(builder.toBuffer()),
+ WebAssembly.CompileError);
+})();
+
+(function TestCompileAtomicOpUndefinedShared() {
+ print("TestCompileAtomicOpUndefinedShared");
+ let memory = new WebAssembly.Memory({
+ initial: 0, maximum: 10, shared: true});
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kAtomicPrefix,
+ kExprI32AtomicAdd]);
+ builder.addImportedMemory("m", "imported_mem");
+ assertThrows(() => new WebAssembly.Module(builder.toBuffer()),
+ WebAssembly.CompileError);
+})();
+
+(function TestInstantiateWithUndefinedShared() {
+ print("TestInstantiateWithUndefinedShared");
+ let memory = new WebAssembly.Memory({
+ initial: 0, maximum: 10, shared: true});
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory("m", "imported_mem");
+ let module = new WebAssembly.Module(builder.toBuffer());
+ assertThrows(() => new WebAssembly.Instance(module,
+ {m: {imported_mem: memory}}), WebAssembly.LinkError);
+})();
+
+(function TestInstantiateWithImportNotSharedDefined() {
+ print("TestInstantiateWithImportNotSharedDefined");
+ let memory = new WebAssembly.Memory({
+ initial: 0, maximum: 10, shared: false});
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory("m", "imported_mem", 0, 10, "shared");
+ let module = new WebAssembly.Module(builder.toBuffer());
+ assertThrows(() => new WebAssembly.Instance(module,
+ {m: {imported_mem: memory}}), WebAssembly.LinkError);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/stack.js b/deps/v8/test/mjsunit/wasm/stack.js
index 57e57d6467..f49dca3585 100644
--- a/deps/v8/test/mjsunit/wasm/stack.js
+++ b/deps/v8/test/mjsunit/wasm/stack.js
@@ -128,7 +128,6 @@ Error.prepareStackTrace = function(error, frames) {
}
})();
-
(function testStackOverflow() {
print("testStackOverflow");
var builder = new WasmModuleBuilder();
@@ -139,7 +138,7 @@ Error.prepareStackTrace = function(error, frames) {
kExprI32Const, 0,
kExprCallIndirect, sig_index, kTableZero
])
- .exportFunc()
+ .exportFunc();
builder.appendToTable([0]);
try {
@@ -157,3 +156,29 @@ Error.prepareStackTrace = function(error, frames) {
]);
}
})();
+
+(function testBigOffset() {
+ print('testBigOffset');
+ var builder = new WasmModuleBuilder();
+
+ let body = [kExprI32Const, 0, kExprI32Add];
+ while (body.length <= 65536) body = body.concat(body);
+ body.unshift(kExprI32Const, 0);
+ body.push(kExprUnreachable);
+ let unreachable_pos = body.length - 1;
+
+ builder.addFunction('main', kSig_v_v).addBody(body).exportFunc();
+
+ try {
+ builder.instantiate().exports.main();
+ fail('expected wasm exception');
+ } catch (e) {
+ assertEquals('unreachable', e.message, 'trap reason');
+ verifyStack(e.stack, [
+ // isWasm, function, line, pos, file
+ [true, 'main', 0, unreachable_pos + 1, null], // -
+ [false, 'testBigOffset', 173, 0, 'stack.js'], //-
+ [false, null, 184, 0, 'stack.js']
+ ]);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/streaming-compile.js b/deps/v8/test/mjsunit/wasm/streaming-compile.js
new file mode 100644
index 0000000000..5f2ca6b9fa
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/streaming-compile.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-test-streaming -async-compilation --expose-wasm --allow-natives-syntax
+
+load("test/mjsunit/wasm/async-compile.js");
diff --git a/deps/v8/test/mjsunit/wasm/streaming-error-position.js b/deps/v8/test/mjsunit/wasm/streaming-error-position.js
new file mode 100644
index 0000000000..c2b86c03fa
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/streaming-error-position.js
@@ -0,0 +1,374 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-test-streaming --wasm-async-compilation --expose-wasm --allow-natives-syntax
+
+'use strict';
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+function module(bytes) {
+ let buffer = bytes;
+ if (typeof buffer === 'string') {
+ buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
+ view[i] = bytes.charCodeAt(i);
+ }
+ }
+ return new WebAssembly.Module(buffer);
+}
+
+function toBuffer(binary) {
+ let buffer = new ArrayBuffer(binary.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < binary.length; i++) {
+ let val = binary[i];
+ if ((typeof val) == 'string') val = val.charCodeAt(0);
+ view[i] = val | 0;
+ }
+ return buffer;
+}
+
+function testErrorPosition(bytes, pos, test_name) {
+ assertPromiseResult(
+ WebAssembly.compile(toBuffer(bytes)), assertUnreachable, e => {
+ print(test_name);
+ assertInstanceof(e, WebAssembly.CompileError);
+ let regex = new RegExp('@\\+' + pos);
+ print(e.message);
+ assertMatches(regex, e.message, 'Error Position');
+ });
+}
+
+(function testInvalidMagic() {
+ let bytes = new Binary;
+ bytes.push(
+ kWasmH0, kWasmH1 + 1, kWasmH2, kWasmH3, kWasmV0, kWasmV1, kWasmV2,
+ kWasmV3);
+ // Error at pos==0 because that's where the magic word is.
+ testErrorPosition(bytes, 0, 'testInvalidMagic');
+})();
+
+(function testInvalidVersion() {
+ let bytes = new Binary;
+ bytes.push(
+ kWasmH0, kWasmH1, kWasmH2, kWasmH3, kWasmV0, kWasmV1 + 1, kWasmV2,
+ kWasmV3);
+ // Error at pos==4 because that's where the version word is.
+ testErrorPosition(bytes, 4, 'testInvalidVersion');
+})();
+
+(function testSectionLengthInvalidVarint() {
+ let bytes = new Binary;
+ bytes.emit_header();
+ bytes.emit_u8(kTypeSectionCode);
+ bytes.push(0x80, 0x80, 0x80, 0x80, 0x80, 0x00);
+ let pos = bytes.length - 1 - 1;
+ testErrorPosition(bytes, pos, 'testSectionLengthInvalidVarint');
+})();
+
+(function testSectionLengthTooBig() {
+ let bytes = new Binary;
+ bytes.emit_header();
+ bytes.emit_u8(kTypeSectionCode);
+ bytes.emit_u32v(0xffffff23);
+ let pos = bytes.length - 1;
+ testErrorPosition(bytes, pos, 'testSectionLengthTooBig');
+})();
+
+(function testFunctionsCountInvalidVarint() {
+ let bytes = new Binary;
+ bytes.emit_header();
+ bytes.push(
+ kTypeSectionCode, // section id
+ 1, // section length
+ 0 // number of types
+ );
+ bytes.push(
+ kFunctionSectionCode, // section id
+ 1, // section length
+ 0 // number of functions
+ );
+ bytes.push(
+ kCodeSectionCode, // section id
+ 20, // section length (arbitrary value > 6)
+ );
+ // Functions count
+ bytes.push(0x80, 0x80, 0x80, 0x80, 0x80, 0x00);
+
+ let pos = bytes.length - 1 - 1;
+ testErrorPosition(bytes, pos, 'testFunctionsCountInvalidVarint');
+})();
+
+(function testFunctionsCountTooBig() {
+ let bytes = new Binary;
+ bytes.emit_header();
+ bytes.push(
+ kTypeSectionCode, // section id
+ 1, // section length
+ 0 // number of types
+ );
+ bytes.push(
+ kFunctionSectionCode, // section id
+ 1, // section length
+ 0 // number of functions
+ );
+ bytes.push(
+ kCodeSectionCode, // section id
+ 20, // section length (arbitrary value > 6)
+ );
+ // Functions count
+ bytes.emit_u32v(0xffffff23);
+
+ let pos = bytes.length - 1;
+ testErrorPosition(bytes, pos, 'testFunctionsCountTooBig');
+})();
+
+(function testFunctionsCountDoesNotMatch() {
+ let bytes = new Binary;
+ bytes.emit_header();
+ bytes.push(
+ kTypeSectionCode, // section id
+ 1, // section length
+ 0 // number of types
+ );
+ bytes.push(
+ kFunctionSectionCode, // section id
+ 1, // section length
+ 0 // number of functions
+ );
+ bytes.push(
+ kCodeSectionCode, // section id
+ 20, // section length (arbitrary value > 6)
+ );
+ // Functions count (different than the count in the functions section.
+ bytes.emit_u32v(5);
+
+ let pos = bytes.length - 1;
+ testErrorPosition(bytes, pos, 'testFunctionsCountDoesNotMatch');
+})();
+
+(function testBodySizeInvalidVarint() {
+ let bytes = new Binary;
+ bytes.emit_header();
+ bytes.push(
+ kTypeSectionCode, // section id
+ 4, // section length
+ 1, // number of types
+ kWasmFunctionTypeForm, // type
+ 0, // number of parameter
+ 0 // number of returns
+ );
+ bytes.push(
+ kFunctionSectionCode, // section id
+ 2, // section length
+ 1, // number of functions
+ 0 // signature index
+ );
+ bytes.push(
+ kCodeSectionCode, // section id
+ 20, // section length (arbitrary value > 6)
+ 1 // functions count
+ );
+ // Invalid function body size.
+ bytes.push(0x80, 0x80, 0x80, 0x80, 0x80, 0x00);
+
+ let pos = bytes.length - 1 - 1;
+ testErrorPosition(bytes, pos, 'testBodySizeInvalidVarint');
+})();
+
+(function testBodySizeTooBig() {
+ let bytes = new Binary;
+ bytes.emit_header();
+ bytes.push(
+ kTypeSectionCode, // section id
+ 4, // section length
+ 1, // number of types
+ kWasmFunctionTypeForm, // type
+ 0, // number of parameter
+ 0 // number of returns
+ );
+ bytes.push(
+ kFunctionSectionCode, // section id
+ 2, // section length
+ 1, // number of functions
+ 0 // signature index
+ );
+ bytes.push(
+ kCodeSectionCode, // section id
+ 20, // section length (arbitrary value > 6)
+ 1 // functions count
+ );
+ // Invalid function body size.
+ bytes.emit_u32v(0xffffff23);
+
+ let pos = bytes.length - 1;
+ testErrorPosition(bytes, pos, 'testBodySizeTooBig');
+})();
+
+(function testBodySizeDoesNotFit() {
+ let bytes = new Binary;
+ bytes.emit_header();
+ bytes.push(
+ kTypeSectionCode, // section id
+ 4, // section length
+ 1, // number of types
+ kWasmFunctionTypeForm, // type
+ 0, // number of parameter
+ 0 // number of returns
+ );
+ bytes.push(
+ kFunctionSectionCode, // section id
+ 2, // section length
+ 1, // number of functions
+ 0 // signature index
+ );
+ bytes.push(
+ kCodeSectionCode, // section id
+ 20, // section length (arbitrary value > 6)
+ 1 // functions count
+ );
+ // Invalid function body size (does not fit into the code section).
+ bytes.emit_u32v(20);
+
+ let pos = bytes.length - 1;
+ testErrorPosition(bytes, pos, 'testBodySizeDoesNotFit');
+})();
+
+(function testBodySizeIsZero() {
+ let bytes = new Binary;
+ bytes.emit_header();
+ bytes.push(
+ kTypeSectionCode, // section id
+ 4, // section length
+ 1, // number of types
+ kWasmFunctionTypeForm, // type
+ 0, // number of parameter
+ 0 // number of returns
+ );
+ bytes.push(
+ kFunctionSectionCode, // section id
+ 2, // section length
+ 1, // number of functions
+ 0 // signature index
+ );
+ bytes.push(
+ kCodeSectionCode, // section id
+ 20, // section length (arbitrary value > 6)
+ 1 // functions count
+ );
+ // Invalid function body size (body size of 0 is invalid).
+ bytes.emit_u32v(0);
+
+ let pos = bytes.length - 1;
+ testErrorPosition(bytes, pos, 'testBodySizeIsZero');
+})();
+
+(function testStaleCodeSectionBytes() {
+ let bytes = new Binary;
+ bytes.emit_header();
+ bytes.push(
+ kTypeSectionCode, // section id
+ 4, // section length
+ 1, // number of types
+ kWasmFunctionTypeForm, // type
+ 0, // number of parameter
+ 0 // number of returns
+ );
+ bytes.push(
+ kFunctionSectionCode, // section id
+ 2, // section length
+ 1, // number of functions
+ 0 // signature index
+ );
+ bytes.push(
+ kCodeSectionCode, // section id
+ 20, // section length (too big)
+ 1, // functions count
+ 2, // body size
+ 0, // locals count
+ kExprEnd // body
+ );
+
+ let pos = bytes.length - 1;
+ testErrorPosition(bytes, pos, 'testStaleCodeSectionBytes');
+})();
+
+(function testInvalidCode() {
+ let bytes = new Binary;
+ bytes.emit_header();
+ bytes.push(
+ kTypeSectionCode, // section id
+ 4, // section length
+ 1, // number of types
+ kWasmFunctionTypeForm, // type
+ 0, // number of parameter
+ 0 // number of returns
+ );
+ bytes.push(
+ kFunctionSectionCode, // section id
+ 2, // section length
+ 1, // number of functions
+ 0 // signature index
+ );
+ bytes.push(
+ kCodeSectionCode, // section id
+ 6, // section length (too big)
+ 1, // functions count
+ 4, // body size
+ 0, // locals count
+ kExprGetLocal, 0, // Access a non-existing local
+ kExprEnd // --
+ );
+
+ // Find error at the index of kExprGetLocal.
+ let pos = bytes.length - 1 - 1;
+ testErrorPosition(bytes, pos, 'testInvalidCode');
+})();
+
+(function testCodeSectionSizeZero() {
+ let bytes = new Binary;
+ bytes.emit_header();
+ bytes.push(
+ kTypeSectionCode, // section id
+ 4, // section length
+ 1, // number of types
+ kWasmFunctionTypeForm, // type
+ 0, // number of parameter
+ 0 // number of returns
+ );
+ bytes.push(
+ kFunctionSectionCode, // section id
+ 2, // section length
+ 1, // number of functions
+ 0 // signature index
+ );
+ bytes.push(
+ kCodeSectionCode, // section id
+ 0, // section length (too big)
+ );
+
+ // Find error at the index of kExprGetLocal.
+ let pos = bytes.length - 1;
+ testErrorPosition(bytes, pos, 'testCodeSectionSizeZero');
+})();
+
+(function testInvalidSection() {
+ let bytes = new Binary;
+ bytes.emit_header();
+ bytes.push(
+ kTypeSectionCode, // section id
+ 5, // section length
+ 1, // number of types
+ kWasmFunctionTypeForm, // type
+ 1, // number of parameter
+ 0x7b, // invalid type
+ 0 // number of returns
+ );
+
+ let pos = bytes.length - 1 - 1;
+ testErrorPosition(bytes, pos, 'testInvalidSection');
+})();
diff --git a/deps/v8/test/mjsunit/wasm/streaming-trap-location.js b/deps/v8/test/mjsunit/wasm/streaming-trap-location.js
new file mode 100644
index 0000000000..1607ca76f2
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/streaming-trap-location.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-test-streaming --wasm-async-compilation --expose-wasm
+
+load("test/mjsunit/wasm/trap-location.js");
diff --git a/deps/v8/test/mjsunit/wasm/trap-location.js b/deps/v8/test/mjsunit/wasm/trap-location.js
index 390ad74d8d..0c646c92cd 100644
--- a/deps/v8/test/mjsunit/wasm/trap-location.js
+++ b/deps/v8/test/mjsunit/wasm/trap-location.js
@@ -14,12 +14,16 @@ Error.prepareStackTrace = function(error, frames) {
function testTrapLocations(instance, expected_stack_length) {
function testWasmTrap(value, reason, position) {
+ let function_name = arguments.callee.name;
try {
instance.exports.main(value);
fail('expected wasm exception');
} catch (e) {
assertEquals(kTrapMsgs[reason], e.message, 'trap reason');
- assertEquals(expected_stack_length, e.stack.length, 'number of frames');
+ // Check that the trapping function is the one which was called from this
+ // function.
+ assertTrue(
+ e.stack[1].toString().startsWith(function_name), 'stack depth');
assertEquals(0, e.stack[0].getLineNumber(), 'wasmFunctionIndex');
assertEquals(position, e.stack[0].getPosition(), 'position');
}
diff --git a/deps/v8/test/mjsunit/wasm/unreachable-validation.js b/deps/v8/test/mjsunit/wasm/unreachable-validation.js
index 113bcc3102..5b98b1713b 100644
--- a/deps/v8/test/mjsunit/wasm/unreachable-validation.js
+++ b/deps/v8/test/mjsunit/wasm/unreachable-validation.js
@@ -128,3 +128,5 @@ run(I, "U (block (iblock 0 0 brt01) drop)", [unr, ...block, ...iblock, ...zero,
run(V, "(iblock (iblock U 0 brt01)) drop", [...iblock, ...iblock, unr, ...zero, ...brt01, end, end, drop]);
run(I, "(block (fblock U 0 brt01) drop)", [...iblock, ...fblock, unr, ...zero, ...brt01, end, drop, end]);
run(I, "(iblock (fblock U 0 brt01) drop 0) drop", [...iblock, ...fblock, unr, ...zero, ...brt01, end, drop, ...zero, end, drop]);
+
+run(I, "(iblock (block (U brif 1))", [...iblock, ...block, unr, kExprBrIf, 0, end, end, kExprDrop]);
diff --git a/deps/v8/test/mjsunit/wasm/wasm-constants.js b/deps/v8/test/mjsunit/wasm/wasm-constants.js
index c556b109a1..cc5f1e9c4a 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-constants.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-constants.js
@@ -92,6 +92,7 @@ let kWasmI32 = 0x7f;
let kWasmI64 = 0x7e;
let kWasmF32 = 0x7d;
let kWasmF64 = 0x7c;
+let kWasmS128 = 0x7b;
let kExternalFunction = 0;
let kExternalTable = 1;
@@ -123,6 +124,10 @@ let kSig_v_d = makeSig([kWasmF64], []);
let kSig_v_dd = makeSig([kWasmF64, kWasmF64], []);
let kSig_v_ddi = makeSig([kWasmF64, kWasmF64, kWasmI32], []);
+let kSig_v_f = makeSig([kWasmF32], []);
+let kSig_f_f = makeSig([kWasmF32], [kWasmF32]);
+let kSig_d_d = makeSig([kWasmF64], [kWasmF64]);
+
function makeSig(params, results) {
return {params: params, results: results};
}
@@ -327,6 +332,12 @@ let kExprF64ReinterpretI64 = 0xbf;
// Prefix opcodes
let kAtomicPrefix = 0xfe;
+let kExprI32AtomicLoad = 0x10;
+let kExprI32AtomicLoad8U = 0x12;
+let kExprI32AtomicLoad16U = 0x13;
+let kExprI32AtomicStore = 0x17;
+let kExprI32AtomicStore8U = 0x19;
+let kExprI32AtomicStore16U = 0x1a;
let kExprI32AtomicAdd = 0x1e;
let kExprI32AtomicAdd8U = 0x20;
let kExprI32AtomicAdd16U = 0x21;
@@ -387,7 +398,7 @@ function assertTraps(trap, code) {
throw new MjsUnitAssertionError('Did not trap, expected: ' + kTrapMsgs[trap]);
}
-function assertWasmThrows(values, code) {
+function assertWasmThrows(runtime_id, values, code) {
try {
if (typeof code === 'function') {
code();
@@ -396,15 +407,18 @@ function assertWasmThrows(values, code) {
}
} catch (e) {
assertTrue(e instanceof WebAssembly.RuntimeError);
- assertNotEquals(e['WasmExceptionTag'], undefined);
- assertTrue(Number.isInteger(e['WasmExceptionTag']));
- // TODO(kschimpf): Extract values from the exception.
- let e_values = [];
- assertEquals(values, e_values);
+ var e_runtime_id = e['WasmExceptionRuntimeId'];
+ assertEquals(e_runtime_id, runtime_id);
+ assertTrue(Number.isInteger(e_runtime_id));
+ var e_values = e['WasmExceptionValues'];
+ assertEquals(values.length, e_values.length);
+ for (i = 0; i < values.length; ++i) {
+ assertEquals(values[i], e_values[i]);
+ }
// Success.
return;
}
- throw new MjsUnitAssertionError('Did not throw, expected: ' + values);
+ throw new MjsUnitAssertionError('Did not throw expected: ' + runtime_id + values);
}
function wasmI32Const(val) {
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index 78e3d6bde8..d21067b36e 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -171,8 +171,8 @@ class WasmModuleBuilder {
return this;
}
- addMemory(min, max, exp) {
- this.memory = {min: min, max: max, exp: exp};
+ addMemory(min, max, exp, shared) {
+ this.memory = {min: min, max: max, exp: exp, shared: shared};
return this;
}
@@ -240,9 +240,9 @@ class WasmModuleBuilder {
return this.num_imported_globals++;
}
- addImportedMemory(module = "", name, initial = 0, maximum) {
+ addImportedMemory(module = "", name, initial = 0, maximum, shared) {
let o = {module: module, name: name, kind: kExternalMemory,
- initial: initial, maximum: maximum};
+ initial: initial, maximum: maximum, shared: shared};
this.imports.push(o);
return this;
}
@@ -348,7 +348,12 @@ class WasmModuleBuilder {
section.emit_u8(imp.mutable);
} else if (imp.kind == kExternalMemory) {
var has_max = (typeof imp.maximum) != "undefined";
- section.emit_u8(has_max ? 1 : 0); // flags
+ var is_shared = (typeof imp.shared) != "undefined";
+ if (is_shared) {
+ section.emit_u8(has_max ? 3 : 2); // flags
+ } else {
+ section.emit_u8(has_max ? 1 : 0); // flags
+ }
section.emit_u32v(imp.initial); // initial
if (has_max) section.emit_u32v(imp.maximum); // maximum
} else if (imp.kind == kExternalTable) {
@@ -395,9 +400,16 @@ class WasmModuleBuilder {
binary.emit_section(kMemorySectionCode, section => {
section.emit_u8(1); // one memory entry
const has_max = wasm.memory.max !== undefined;
- section.emit_u32v(has_max ? kResizableMaximumFlag : 0);
+ const is_shared = wasm.memory.shared !== undefined;
+ // Emit flags (bit 0: reszeable max, bit 1: shared memory)
+ if (is_shared) {
+ section.emit_u8(has_max ? 3 : 2);
+ } else {
+ section.emit_u8(has_max ? 1 : 0);
+ }
section.emit_u32v(wasm.memory.min);
if (has_max) section.emit_u32v(wasm.memory.max);
+ if (wasm.memory.shared) section.emit_u8(1);
});
}
@@ -511,7 +523,7 @@ class WasmModuleBuilder {
for (let type of wasm.exceptions) {
section.emit_u32v(type.params.length);
for (let param of type.params) {
- section.enit_u8(param);
+ section.emit_u8(param);
}
}
});
@@ -541,6 +553,9 @@ class WasmModuleBuilder {
if (l.f64_count > 0) {
local_decls.push({count: l.f64_count, type: kWasmF64});
}
+ if (l.s128_count > 0) {
+ local_decls.push({count: l.s128_count, type: kWasmS128});
+ }
}
let header = new Binary;
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 7ba7b7bc52..686018c007 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -297,6 +297,10 @@
# This section is for tests that fail in both V8 and JSC. Thus they
# have been determined to be incompatible between Mozilla and V8/JSC.
+ # d8 does not implement a window object.
+ 'js1_5/Regress/regress-317476': [FAIL],
+ 'js1_5/Regress/regress-314401': [FAIL],
+
# Any local 'arguments' variable should not be allowed to shadow the value
# returned via the indirect 'arguments' property accessor.
'js1_4/Functions/function-001': [FAIL_OK],
diff --git a/deps/v8/test/test262/local-tests/test/intl402/NumberFormat/prototype/formatToParts/default-parameter.js b/deps/v8/test/test262/local-tests/test/intl402/NumberFormat/prototype/formatToParts/default-parameter.js
new file mode 100644
index 0000000000..408694c48c
--- /dev/null
+++ b/deps/v8/test/test262/local-tests/test/intl402/NumberFormat/prototype/formatToParts/default-parameter.js
@@ -0,0 +1,30 @@
+// Copyright (C) 2017 Josh Wolfe. All rights reserved.
+// This code is governed by the BSD license found in the LICENSE file.
+/*---
+esid: #sec-intl.numberformat.prototype.formattoparts
+description: Intl.NumberFormat.prototype.formatToParts called with no parameters
+info: >
+ Intl.NumberFormat.prototype.formatToParts ([ value ])
+
+ 3. If value is not provided, let value be undefined.
+---*/
+
+var nf = new Intl.NumberFormat();
+
+// Example value: [{"type":"nan","value":"NaN"}]
+var implicit = nf.formatToParts();
+var explicit = nf.formatToParts(undefined);
+
+assert(partsEquals(implicit, explicit),
+ "formatToParts() should be equivalent to formatToParts(undefined)");
+
+function partsEquals(parts1, parts2) {
+ if (parts1.length !== parts2.length) return false;
+ for (var i = 0; i < parts1.length; i++) {
+ var part1 = parts1[i];
+ var part2 = parts2[i];
+ if (part1.type !== part2.type) return false;
+ if (part1.value !== part2.value) return false;
+ }
+ return true;
+}
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index eec9d869b7..8e716309ee 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -109,11 +109,6 @@
'built-ins/TypedArray/prototype/map/callbackfn-detachbuffer': [FAIL],
'built-ins/TypedArray/prototype/reduce/callbackfn-detachbuffer': [FAIL],
'built-ins/TypedArray/prototype/reduceRight/callbackfn-detachbuffer': [FAIL],
- 'built-ins/TypedArray/prototype/set/array-arg-targetbuffer-detached-on-get-src-value-throws': [FAIL],
- 'built-ins/TypedArray/prototype/set/array-arg-targetbuffer-detached-on-tointeger-offset-throws': [FAIL],
- 'built-ins/TypedArray/prototype/set/array-arg-targetbuffer-detached-throws': [FAIL],
- 'built-ins/TypedArray/prototype/set/typedarray-arg-srcbuffer-detached-during-tointeger-offset-throws': [FAIL],
- 'built-ins/TypedArray/prototype/set/typedarray-arg-targetbuffer-detached-during-tointeger-offset-throws': [FAIL],
'built-ins/TypedArray/prototype/slice/detached-buffer-custom-ctor-other-targettype': [FAIL],
'built-ins/TypedArray/prototype/slice/detached-buffer-custom-ctor-same-targettype': [FAIL],
'built-ins/TypedArray/prototype/slice/detached-buffer-get-ctor': [FAIL],
@@ -203,6 +198,7 @@
'language/statements/for-of/dstr-array-rest-iter-thrw-close': [FAIL],
'language/statements/for-of/dstr-array-rest-iter-thrw-close-err': [FAIL],
'language/statements/for-of/dstr-array-rest-lref-err': [FAIL],
+ 'language/statements/for-await-of/async-gen-decl-dstr-array-elem-iter-rtrn-close-null': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=896
'built-ins/RegExp/property-escapes/binary-properties-with-value': [FAIL],
@@ -364,11 +360,6 @@
'language/expressions/object/method-definition/name-param-redecl': [FAIL],
'language/statements/async-function/early-errors-declaration-formals-body-duplicate': [FAIL],
- # Module-related tests
- # https://github.com/tc39/ecma262/pull/858
- 'language/module-code/namespace/internals/set': [FAIL],
- 'language/module-code/namespace/internals/define-own-property': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=5601
'intl402/PluralRules/*': ['--harmony-plural-rules'],
@@ -458,6 +449,13 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=4743#c28
'built-ins/RegExp/property-escapes/generated/Emoji_Component': [FAIL],
+ # ICU 59 uses Unicode 9 data; property escape tests were generated for Unicode 10
+ 'built-ins/RegExp/property-escapes/generated/*': [SKIP],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=6776
+ 'built-ins/Proxy/ownKeys/return-duplicate-entries-throws': [FAIL],
+ 'built-ins/Proxy/ownKeys/return-duplicate-symbol-entries-throws': [FAIL],
+
######################## NEEDS INVESTIGATION ###########################
# These test failures are specific to the intl402 suite and need investigation
@@ -467,15 +465,12 @@
'intl402/6.2.3': [FAIL],
'intl402/Collator/10.1.2_a': [PASS, FAIL],
'intl402/Collator/10.2.3_b': [PASS, FAIL],
- 'intl402/Collator/prototype/10.3_a': [FAIL],
'intl402/DateTimeFormat/12.1.2': [PASS, FAIL],
'intl402/DateTimeFormat/12.2.3_b': [FAIL],
- 'intl402/DateTimeFormat/prototype/12.3_a': [FAIL],
'intl402/Number/prototype/toLocaleString/13.2.1_5': [PASS, FAIL],
'intl402/NumberFormat/11.1.1_20_c': [FAIL],
'intl402/NumberFormat/11.1.2': [PASS, FAIL],
'intl402/NumberFormat/11.2.3_b': [FAIL],
- 'intl402/NumberFormat/prototype/11.3_a': [FAIL],
'intl402/String/prototype/localeCompare/13.1.1_7': [PASS, FAIL],
##################### DELIBERATE INCOMPATIBILITIES #####################
@@ -513,14 +508,14 @@
'language/statements/for-await-of/escaped-of': [FAIL],
'language/statements/for-of/escaped-of': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=6542
- 'built-ins/Array/prototype/sort/comparefn-nonfunction-call-throws': [FAIL],
- 'built-ins/TypedArray/prototype/sort/comparefn-nonfunction-call-throws': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=6543
'language/statements/labeled/value-await-non-module-escaped': [FAIL],
'language/statements/labeled/value-yield-non-strict-escaped': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=6912
+ 'built-ins/RegExp/named-groups/string-replace-missing': [FAIL],
+ 'built-ins/RegExp/named-groups/string-replace-unclosed': [FAIL],
+
############################ INVALID TESTS #############################
# Test makes unjustified assumptions about the number of calls to SortCompare.
@@ -539,81 +534,9 @@
'built-ins/Date/prototype/setFullYear/new-value-time-clip': [PASS, FAIL],
'built-ins/Date/prototype/setMonth/new-value-time-clip': [PASS, FAIL],
- # https://github.com/tc39/test262/issues/1085
- 'language/directive-prologue/10.1.1-5gs': [PASS, FAIL_SLOPPY],
- 'language/directive-prologue/10.1.1-2gs': [PASS, FAIL_SLOPPY],
- 'language/directive-prologue/14.1-4gs': [PASS, FAIL_SLOPPY],
- 'language/directive-prologue/10.1.1-8gs': [PASS, FAIL_SLOPPY],
- 'language/directive-prologue/14.1-5gs': [PASS, FAIL_SLOPPY],
-
# Test against internals of harness; we plug in differently
'harness/detachArrayBuffer': [SKIP],
- 'harness/detachArrayBuffer-$262.detachArrayBuffer': [SKIP],
-
- # Invalid {Assignment,Binding}RestPattern
- # https://bugs.chromium.org/p/v8/issues/detail?id=6500
- 'language/expressions/assignment/dstr-obj-rest-obj-own-property': [FAIL],
- 'language/statements/for-of/dstr-obj-rest-obj-own-property': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-const-async-obj-ptrn-rest-obj-nested-rest': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-let-obj-ptrn-rest-obj-own-property': [FAIL],
- 'language/statements/for-await-of/async-gen-decl-dstr-obj-rest-nested-obj': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-let-async-obj-ptrn-rest-obj-nested-rest': [FAIL],
- 'language/expressions/assignment/dstr-obj-rest-nested-obj-nested-rest': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-let-async-obj-ptrn-rest-obj-own-property': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-var-obj-ptrn-rest-obj-nested-rest': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-const-obj-ptrn-rest-nested-obj': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-const-async-obj-ptrn-rest-obj-own-property': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-let-obj-ptrn-rest-obj-nested-rest': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-var-obj-ptrn-rest-obj-own-property': [FAIL],
- 'language/expressions/assignment/dstr-obj-rest-nested-obj': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-let-obj-ptrn-rest-nested-obj': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-var-async-obj-ptrn-rest-obj-nested-rest': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-let-obj-ptrn-rest-obj-nested-rest': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-var-async-obj-ptrn-rest-obj-nested-rest': [FAIL],
- 'language/statements/for-of/dstr-obj-rest-nested-obj': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-const-obj-ptrn-rest-obj-nested-rest': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-var-async-obj-ptrn-rest-nested-obj': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-const-async-obj-ptrn-rest-obj-own-property': [FAIL],
- 'language/statements/for-await-of/async-gen-decl-dstr-obj-rest-nested-obj-nested-rest': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-var-async-obj-ptrn-rest-obj-own-property': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-var-obj-ptrn-rest-nested-obj': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-const-obj-ptrn-rest-obj-own-property': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-let-async-obj-ptrn-rest-obj-nested-rest': [FAIL],
- 'language/statements/for-await-of/async-func-decl-dstr-obj-rest-nested-obj-nested-rest': [FAIL],
- 'language/statements/for-await-of/async-func-decl-dstr-obj-rest-obj-own-property': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-const-async-obj-ptrn-rest-nested-obj': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-let-async-obj-ptrn-rest-nested-obj': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-var-obj-ptrn-rest-nested-obj': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-const-async-obj-ptrn-rest-nested-obj': [FAIL],
- 'language/statements/for-await-of/async-func-decl-dstr-obj-rest-nested-obj': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-var-obj-ptrn-rest-obj-nested-rest': [FAIL],
- 'language/statements/for-await-of/async-gen-decl-dstr-obj-rest-obj-own-property': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-const-async-obj-ptrn-rest-obj-nested-rest': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-let-obj-ptrn-rest-obj-own-property': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-var-async-obj-ptrn-rest-nested-obj': [FAIL],
- 'language/statements/for-of/dstr-obj-rest-nested-obj-nested-rest': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-const-obj-ptrn-rest-obj-nested-rest': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-let-obj-ptrn-rest-nested-obj': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-let-async-obj-ptrn-rest-nested-obj': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-var-obj-ptrn-rest-obj-own-property': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-var-async-obj-ptrn-rest-obj-own-property': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-let-async-obj-ptrn-rest-obj-own-property': [FAIL],
- 'language/statements/for-await-of/async-gen-dstr-const-obj-ptrn-rest-nested-obj': [FAIL],
- 'language/statements/for-await-of/async-func-dstr-const-obj-ptrn-rest-obj-own-property': [FAIL],
-
- # Async Iteration https://github.com/tc39/test262/issues/1154
- 'language/statements/for-await-of/async-func-decl-dstr-array-elem-init-let': [FAIL],
- 'language/statements/for-await-of/async-func-decl-dstr-array-elem-put-let': [FAIL],
- 'language/statements/for-await-of/async-gen-decl-dstr-array-elem-init-let': [FAIL],
- 'language/statements/for-await-of/async-gen-decl-dstr-array-elem-put-let': [FAIL],
- 'language/statements/for-await-of/async-gen-decl-dstr-array-elem-trlg-iter-elision-iter-abpt': [FAIL],
- 'language/statements/for-await-of/async-gen-decl-dstr-array-elem-trlg-iter-get-err': [FAIL],
- 'language/statements/for-await-of/async-gen-decl-dstr-array-elem-trlg-iter-list-nrml-close-null': [FAIL],
- 'language/statements/for-await-of/async-gen-decl-dstr-array-elem-trlg-iter-list-rtrn-close-err': [FAIL],
- 'language/statements/for-await-of/async-gen-decl-dstr-array-elem-trlg-iter-list-rtrn-close-null': [FAIL],
- 'language/statements/for-await-of/async-gen-decl-dstr-array-elem-trlg-iter-rest-rtrn-close-null': [FAIL],
- 'language/statements/for-await-of/async-gen-decl-dstr-array-elem-trlg-iter-list-rtrn-close': [FAIL],
- 'language/statements/for-await-of/async-gen-decl-dstr-array-elem-trlg-iter-rest-rtrn-close-err': [PASS, FAIL],
+ 'harness/detachArrayBuffer-host-detachArrayBuffer': [SKIP],
############################ SKIPPED TESTS #############################
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 59650f3d25..849a3036cd 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -47,8 +47,11 @@ FEATURE_FLAGS = {
'regexp-named-groups': '--harmony-regexp-named-captures',
'regexp-unicode-property-escapes': '--harmony-regexp-property',
'regexp-lookbehind': '--harmony-regexp-lookbehind',
+ 'Promise.prototype.finally': '--harmony-promise-finally',
}
+SKIPPED_FEATURES = set(['BigInt', 'class-fields', 'optional-catch-binding'])
+
DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
ARCHIVE = DATA + ".tar"
@@ -130,7 +133,6 @@ class Test262TestSuite(testsuite.TestSuite):
self.ParseTestRecord = None
def ListTests(self, context):
- tests = []
testnames = set()
for dirname, dirs, files in itertools.chain(os.walk(self.testroot),
os.walk(self.localtestroot)):
@@ -148,7 +150,10 @@ class Test262TestSuite(testsuite.TestSuite):
fullpath = os.path.join(dirname, filename)
relpath = re.match(TEST_262_RELPATH_REGEXP, fullpath).group(1)
testnames.add(relpath.replace(os.path.sep, "/"))
- return [testcase.TestCase(self, testname) for testname in testnames]
+ cases = [testcase.TestCase(self, testname) for testname in testnames]
+ return [case for case in cases if len(
+ SKIPPED_FEATURES.intersection(
+ self.GetTestRecord(case).get("features", []))) == 0]
def GetFlagsForTestCase(self, testcase, context):
return (testcase.flags + context.mode_flags + self.harness +
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index 37b9e46f8c..0f8d8c6cfc 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -7,6 +7,30 @@ import("../../gni/v8.gni")
v8_executable("unittests") {
testonly = true
+ # TODO(machenbach): Translate from gyp.
+ #['OS=="aix"', {
+ # 'ldflags': [ '-Wl,-bbigtoc' ],
+ #}],
+
+ deps = [
+ ":unittests_sources",
+ "../..:v8_for_testing",
+ "../..:v8_libbase",
+ "../..:v8_libplatform",
+ "//build/config:exe_and_shlib_deps",
+ "//build/win:default_exe_manifest",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+
+ configs = [
+ "../..:external_config",
+ "../..:internal_config_base",
+ ]
+}
+v8_source_set("unittests_sources") {
+ testonly = true
+
sources = [
"../../test/common/wasm/wasm-macro-gen.h",
"../../testing/gmock-support.h",
@@ -41,6 +65,8 @@ v8_executable("unittests") {
"base/utils/random-number-generator-unittest.cc",
"cancelable-tasks-unittest.cc",
"char-predicates-unittest.cc",
+ "code-stub-assembler-unittest.cc",
+ "code-stub-assembler-unittest.h",
"compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc",
"compiler-dispatcher/compiler-dispatcher-unittest.cc",
"compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc",
@@ -48,6 +74,8 @@ v8_executable("unittests") {
"compiler/branch-elimination-unittest.cc",
"compiler/bytecode-analysis-unittest.cc",
"compiler/checkpoint-elimination-unittest.cc",
+ "compiler/code-assembler-unittest.cc",
+ "compiler/code-assembler-unittest.h",
"compiler/common-operator-reducer-unittest.cc",
"compiler/common-operator-unittest.cc",
"compiler/compiler-test-utils.h",
@@ -56,7 +84,6 @@ v8_executable("unittests") {
"compiler/dead-code-elimination-unittest.cc",
"compiler/diamond-unittest.cc",
"compiler/effect-control-linearizer-unittest.cc",
- "compiler/escape-analysis-unittest.cc",
"compiler/graph-reducer-unittest.cc",
"compiler/graph-reducer-unittest.h",
"compiler/graph-trimmer-unittest.cc",
@@ -101,8 +128,10 @@ v8_executable("unittests") {
"compiler/value-numbering-reducer-unittest.cc",
"compiler/zone-stats-unittest.cc",
"counters-unittest.cc",
+ "detachable-vector-unittest.cc",
"eh-frame-iterator-unittest.cc",
"eh-frame-writer-unittest.cc",
+ "heap/barrier-unittest.cc",
"heap/bitmap-unittest.cc",
"heap/embedder-tracing-unittest.cc",
"heap/gc-idle-time-handler-unittest.cc",
@@ -153,6 +182,7 @@ v8_executable("unittests") {
"wasm/loop-assignment-analysis-unittest.cc",
"wasm/module-decoder-unittest.cc",
"wasm/streaming-decoder-unittest.cc",
+ "wasm/wasm-heap-unittest.cc",
"wasm/wasm-macro-gen-unittest.cc",
"wasm/wasm-module-builder-unittest.cc",
"wasm/wasm-opcodes-unittest.cc",
@@ -162,6 +192,15 @@ v8_executable("unittests") {
"zone/zone-unittest.cc",
]
+ if (use_jumbo_build) {
+ jumbo_excluded_sources = [
+ # TODO(mostynb@opera.com): figure out how to make this build in both
+ # modes. Template issues on windows if we add a functional_unittest
+ # namespace as with the other files.
+ "base/functional-unittest.cc",
+ ]
+ }
+
if (v8_current_cpu == "arm") {
sources += [ "compiler/arm/instruction-selector-arm-unittest.cc" ]
} else if (v8_current_cpu == "arm64") {
@@ -185,11 +224,6 @@ v8_executable("unittests") {
"../..:internal_config_base",
]
- # TODO(machenbach): Translate from gyp.
- #['OS=="aix"', {
- # 'ldflags': [ '-Wl,-bbigtoc' ],
- #}],
-
deps = [
"../..:v8_for_testing",
"../..:v8_libbase",
diff --git a/deps/v8/test/unittests/api/remote-object-unittest.cc b/deps/v8/test/unittests/api/remote-object-unittest.cc
index 27612fae6f..40754d50f4 100644
--- a/deps/v8/test/unittests/api/remote-object-unittest.cc
+++ b/deps/v8/test/unittests/api/remote-object-unittest.cc
@@ -11,6 +11,7 @@
#include "test/unittests/test-utils.h"
namespace v8 {
+namespace remote_object_unittest {
typedef TestWithIsolate RemoteObjectTest;
@@ -115,4 +116,5 @@ TEST_F(RemoteObjectTest, ClassOf) {
EXPECT_STREQ("test_class", *result);
}
+} // namespace remote_object_unittest
} // namespace v8
diff --git a/deps/v8/test/unittests/base/logging-unittest.cc b/deps/v8/test/unittests/base/logging-unittest.cc
index 35da8a33db..fd334ec49d 100644
--- a/deps/v8/test/unittests/base/logging-unittest.cc
+++ b/deps/v8/test/unittests/base/logging-unittest.cc
@@ -10,6 +10,7 @@
namespace v8 {
namespace base {
+namespace logging_unittest {
namespace {
@@ -67,6 +68,23 @@ TEST(LoggingTest, CompareAgainstStaticConstPointer) {
CHECK_##name(lhs, rhs); \
DCHECK_##name(lhs, rhs)
+namespace {
+std::string FailureMessage(const char* msg, const char* debug_msg) {
+ std::string regexp(msg);
+#ifdef DEBUG
+ regexp.append(" (").append(debug_msg).append(")");
+#endif
+ size_t last_pos = 0;
+ do {
+ size_t pos = regexp.find_first_of("(){}+*", last_pos);
+ if (pos == std::string::npos) break;
+ regexp.insert(pos, "\\");
+ last_pos = pos + 2;
+ } while (true);
+ return regexp;
+}
+} // namespace
+
TEST(LoggingTest, CompareWithDifferentSignedness) {
int32_t i32 = 10;
uint32_t u32 = 20;
@@ -80,6 +98,11 @@ TEST(LoggingTest, CompareWithDifferentSignedness) {
CHECK_BOTH(IMPLIES, i32, i64);
CHECK_BOTH(IMPLIES, u32, i64);
CHECK_BOTH(IMPLIES, !u32, !i64);
+
+ // Check that the values are output correctly on error.
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_GT(i32, u64); })(),
+ FailureMessage("Check failed: i32 > u64", "10 vs. 40"));
}
TEST(LoggingTest, CompareWithReferenceType) {
@@ -93,7 +116,134 @@ TEST(LoggingTest, CompareWithReferenceType) {
CHECK_BOTH(LT, *&i32, u64);
CHECK_BOTH(IMPLIES, *&i32, i64);
CHECK_BOTH(IMPLIES, *&i32, u64);
+
+ // Check that the values are output correctly on error.
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_GT(*&i32, u64); })(),
+ FailureMessage("Check failed: *&i32 > u64", "10 vs. 40"));
+}
+
+enum TestEnum1 { ONE, TWO };
+enum TestEnum2 : uint16_t { FOO = 14, BAR = 5 };
+enum class TestEnum3 { A, B };
+enum class TestEnum4 : uint8_t { FIRST, SECOND };
+
+TEST(LoggingTest, CompareEnumTypes) {
+ // All these checks should compile (!) and succeed.
+ CHECK_BOTH(EQ, ONE, ONE);
+ CHECK_BOTH(LT, ONE, TWO);
+ CHECK_BOTH(EQ, BAR, 5);
+ CHECK_BOTH(LT, BAR, FOO);
+ CHECK_BOTH(EQ, TestEnum3::A, TestEnum3::A);
+ CHECK_BOTH(LT, TestEnum3::A, TestEnum3::B);
+ CHECK_BOTH(EQ, TestEnum4::FIRST, TestEnum4::FIRST);
+ CHECK_BOTH(LT, TestEnum4::FIRST, TestEnum4::SECOND);
+}
+
+class TestClass1 {
+ public:
+ bool operator==(const TestClass1&) const { return true; }
+ bool operator!=(const TestClass1&) const { return false; }
+};
+class TestClass2 {
+ public:
+ explicit TestClass2(int val) : val_(val) {}
+ bool operator<(const TestClass2& other) const { return val_ < other.val_; }
+ int val() const { return val_; }
+
+ private:
+ int val_;
+};
+std::ostream& operator<<(std::ostream& str, const TestClass2& val) {
+ return str << "TestClass2(" << val.val() << ")";
+}
+
+TEST(LoggingTest, CompareClassTypes) {
+ // All these checks should compile (!) and succeed.
+ CHECK_BOTH(EQ, TestClass1{}, TestClass1{});
+ CHECK_BOTH(LT, TestClass2{2}, TestClass2{7});
+
+ // Check that the values are output correctly on error.
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_NE(TestClass1{}, TestClass1{}); })(),
+ FailureMessage("Check failed: TestClass1{} != TestClass1{}",
+ "<unprintable> vs. <unprintable>"));
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_LT(TestClass2{4}, TestClass2{3}); })(),
+ FailureMessage("Check failed: TestClass2{4} < TestClass2{3}",
+ "TestClass2(4) vs. TestClass2(3)"));
+}
+
+TEST(LoggingDeathTest, OutputEnumValues) {
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_EQ(ONE, TWO); })(),
+ FailureMessage("Check failed: ONE == TWO", "0 vs. 1"));
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_NE(BAR, 2 + 3); })(),
+ FailureMessage("Check failed: BAR != 2 + 3", "5 vs. 5"));
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_EQ(TestEnum3::A, TestEnum3::B); })(),
+ FailureMessage("Check failed: TestEnum3::A == TestEnum3::B", "0 vs. 1"));
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_GE(TestEnum4::FIRST, TestEnum4::SECOND); })(),
+ FailureMessage("Check failed: TestEnum4::FIRST >= TestEnum4::SECOND",
+ "0 vs. 1"));
+}
+
+enum TestEnum5 { TEST_A, TEST_B };
+enum class TestEnum6 { TEST_C, TEST_D };
+std::ostream& operator<<(std::ostream& str, TestEnum5 val) {
+ return str << (val == TEST_A ? "A" : "B");
+}
+void operator<<(std::ostream& str, TestEnum6 val) {
+ str << (val == TestEnum6::TEST_C ? "C" : "D");
+}
+
+TEST(LoggingDeathTest, OutputEnumWithOutputOperator) {
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_EQ(TEST_A, TEST_B); })(),
+ FailureMessage("Check failed: TEST_A == TEST_B", "A vs. B"));
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_GE(TestEnum6::TEST_C, TestEnum6::TEST_D); })(),
+ FailureMessage("Check failed: TestEnum6::TEST_C >= TestEnum6::TEST_D",
+ "C vs. D"));
+}
+
+TEST(LoggingDeathTest, FatalKills) {
+ ASSERT_DEATH_IF_SUPPORTED(FATAL("Dread pirate"), "Dread pirate");
+}
+
+TEST(LoggingDeathTest, DcheckIsOnlyFatalInDebug) {
+#ifdef DEBUG
+ ASSERT_DEATH_IF_SUPPORTED(DCHECK(false && "Dread pirate"), "Dread pirate");
+#else
+ // DCHECK should be non-fatal if DEBUG is undefined.
+ DCHECK(false && "I'm a benign teapot");
+#endif
+}
+
+namespace {
+void DcheckOverrideFunction(const char*, int, const char*) {}
+} // namespace
+
+TEST(LoggingDeathTest, V8_DcheckCanBeOverridden) {
+ // Default DCHECK state should be fatal.
+ ASSERT_DEATH_IF_SUPPORTED(V8_Dcheck(__FILE__, __LINE__, "Dread pirate"),
+ "Dread pirate");
+
+ ASSERT_DEATH_IF_SUPPORTED(
+ {
+ v8::base::SetDcheckFunction(&DcheckOverrideFunction);
+ // This should be non-fatal.
+ V8_Dcheck(__FILE__, __LINE__, "I'm a benign teapot.");
+
+ // Restore default behavior, and assert on lethality.
+ v8::base::SetDcheckFunction(nullptr);
+ V8_Dcheck(__FILE__, __LINE__, "Dread pirate");
+ },
+ "Dread pirate");
}
+} // namespace logging_unittest
} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/base/template-utils-unittest.cc b/deps/v8/test/unittests/base/template-utils-unittest.cc
index 1f434faee8..ea8796a123 100644
--- a/deps/v8/test/unittests/base/template-utils-unittest.cc
+++ b/deps/v8/test/unittests/base/template-utils-unittest.cc
@@ -8,6 +8,7 @@
namespace v8 {
namespace base {
+namespace template_utils_unittest {
////////////////////////////
// Test make_array.
@@ -80,5 +81,26 @@ TEST_PASS_VALUE_OR_REF0(false, const std::string&, const std::string&);
TEST_PASS_VALUE_OR_REF0(false, int, const int);
TEST_PASS_VALUE_OR_REF0(false, int, const int&);
+//////////////////////////////
+// Test has_output_operator.
+//////////////////////////////
+
+// Intrinsic types:
+static_assert(has_output_operator<int>::value, "int can be output");
+static_assert(has_output_operator<void*>::value, "void* can be output");
+static_assert(has_output_operator<uint64_t>::value, "int can be output");
+
+// Classes:
+class TestClass1 {};
+class TestClass2 {};
+extern std::ostream& operator<<(std::ostream& str, TestClass2&);
+static_assert(!has_output_operator<TestClass1>::value,
+ "TestClass1 can not be output");
+static_assert(has_output_operator<TestClass2>::value,
+ "non-const TestClass2 can be output");
+static_assert(!has_output_operator<const TestClass2>::value,
+ "const TestClass2 can not be output");
+
+} // namespace template_utils_unittest
} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/code-stub-assembler-unittest.cc b/deps/v8/test/unittests/code-stub-assembler-unittest.cc
new file mode 100644
index 0000000000..0ef06c71ff
--- /dev/null
+++ b/deps/v8/test/unittests/code-stub-assembler-unittest.cc
@@ -0,0 +1,72 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/code-stub-assembler-unittest.h"
+
+#include "src/code-factory.h"
+#include "src/compiler/node.h"
+#include "src/interface-descriptors.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+using ::testing::_;
+using v8::internal::compiler::Node;
+
+namespace c = v8::internal::compiler;
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_VERIFY_CSA
+#define IS_BITCAST_WORD_TO_TAGGED_SIGNED(x) IsBitcastWordToTaggedSigned(x)
+#define IS_BITCAST_TAGGED_TO_WORD(x) IsBitcastTaggedToWord(x)
+#else
+#define IS_BITCAST_WORD_TO_TAGGED_SIGNED(x) (x)
+#define IS_BITCAST_TAGGED_TO_WORD(x) (x)
+#endif
+
+CodeStubAssemblerTestState::CodeStubAssemblerTestState(
+ CodeStubAssemblerTest* test)
+ : compiler::CodeAssemblerState(test->isolate(), test->zone(),
+ VoidDescriptor(test->isolate()), Code::STUB,
+ "test") {}
+
+TARGET_TEST_F(CodeStubAssemblerTest, SmiTag) {
+ CodeStubAssemblerTestState state(this);
+ CodeStubAssemblerForTest m(&state);
+ Node* value = m.Int32Constant(44);
+ EXPECT_THAT(m.SmiTag(value),
+ IS_BITCAST_WORD_TO_TAGGED_SIGNED(c::IsIntPtrConstant(
+ static_cast<intptr_t>(44) << (kSmiShiftSize + kSmiTagSize))));
+ EXPECT_THAT(m.SmiUntag(value),
+ c::IsIntPtrConstant(static_cast<intptr_t>(44) >>
+ (kSmiShiftSize + kSmiTagSize)));
+}
+
+TARGET_TEST_F(CodeStubAssemblerTest, IntPtrMax) {
+ CodeStubAssemblerTestState state(this);
+ CodeStubAssemblerForTest m(&state);
+ {
+ Node* a = m.IntPtrConstant(100);
+ Node* b = m.IntPtrConstant(1);
+ Node* z = m.IntPtrMax(a, b);
+ EXPECT_THAT(z, c::IsIntPtrConstant(100));
+ }
+}
+
+TARGET_TEST_F(CodeStubAssemblerTest, IntPtrMin) {
+ CodeStubAssemblerTestState state(this);
+ CodeStubAssemblerForTest m(&state);
+ {
+ Node* a = m.IntPtrConstant(100);
+ Node* b = m.IntPtrConstant(1);
+ Node* z = m.IntPtrMin(a, b);
+ EXPECT_THAT(z, c::IsIntPtrConstant(1));
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/code-stub-assembler-unittest.h b/deps/v8/test/unittests/code-stub-assembler-unittest.h
new file mode 100644
index 0000000000..2c32e0f9b7
--- /dev/null
+++ b/deps/v8/test/unittests/code-stub-assembler-unittest.h
@@ -0,0 +1,35 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_CODE_STUB_ASSEMBLER_UNITTEST_H_
+#define V8_UNITTESTS_CODE_STUB_ASSEMBLER_UNITTEST_H_
+
+#include "src/code-stub-assembler.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock-support.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeStubAssemblerTest : public TestWithIsolateAndZone {
+ public:
+ CodeStubAssemblerTest() {}
+ ~CodeStubAssemblerTest() override {}
+};
+
+class CodeStubAssemblerTestState : public compiler::CodeAssemblerState {
+ public:
+ explicit CodeStubAssemblerTestState(CodeStubAssemblerTest* test);
+};
+
+class CodeStubAssemblerForTest : public CodeStubAssembler {
+ public:
+ explicit CodeStubAssemblerForTest(CodeStubAssemblerTestState* state)
+ : CodeStubAssembler(state) {}
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNITTESTS_CODE_STUB_ASSEMBLER_UNITTEST_H_
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index 317feaab1c..5d776d318b 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -160,6 +160,10 @@ class MockPlatform : public v8::Platform {
return time_;
}
+ double CurrentClockTimeMillis() override {
+ return time_ * base::Time::kMillisecondsPerSecond;
+ }
+
v8::TracingController* GetTracingController() override {
return tracing_controller_;
}
diff --git a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
index c78b817c69..8280810293 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
@@ -30,8 +30,7 @@ class BlockingCompilationJob : public CompilationJob {
State::kReadyToExecute),
shared_(function->shared()),
parse_info_(shared_),
- info_(parse_info_.zone(), function->GetIsolate(), parse_info_.script(),
- shared_, function),
+ info_(parse_info_.zone(), function->GetIsolate(), shared_, function),
blocking_(false),
semaphore_(0) {}
~BlockingCompilationJob() override = default;
diff --git a/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc
index d852a19026..36bafcf006 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc
@@ -351,7 +351,7 @@ TEST_F(UnoptimizedCompileJobTest, LazyInnerFunctions) {
Handle<JSFunction> e =
Handle<JSFunction>::cast(test::RunJS(isolate(), "f();"));
- ASSERT_FALSE(e->shared()->HasBaselineCode());
+ ASSERT_FALSE(e->shared()->is_compiled());
job->ResetOnMainThread(i_isolate());
ASSERT_JOB_STATUS(UnoptimizedCompileJob::Status::kInitial, job);
diff --git a/deps/v8/test/unittests/compiler/code-assembler-unittest.cc b/deps/v8/test/unittests/compiler/code-assembler-unittest.cc
new file mode 100644
index 0000000000..604e1baf86
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/code-assembler-unittest.cc
@@ -0,0 +1,254 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/compiler/code-assembler-unittest.h"
+
+#include "src/code-factory.h"
+#include "src/compiler/node.h"
+#include "src/interface-descriptors.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+using ::testing::_;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+CodeAssemblerTestState::CodeAssemblerTestState(CodeAssemblerTest* test)
+ : CodeAssemblerState(test->isolate(), test->zone(),
+ VoidDescriptor(test->isolate()), Code::STUB, "test") {}
+
+TARGET_TEST_F(CodeAssemblerTest, IntPtrAdd) {
+ CodeAssemblerTestState state(this);
+ CodeAssemblerForTest m(&state);
+ {
+ Node* a = m.Parameter(0);
+ Node* b = m.Int32Constant(1);
+ Node* add = m.IntPtrAdd(a, b);
+ EXPECT_THAT(add, IsIntPtrAdd(a, b));
+ }
+ // x + 0 => x
+ {
+ Node* a = m.Parameter(0);
+ Node* b = m.Int32Constant(0);
+ Node* add = m.IntPtrAdd(a, b);
+ EXPECT_THAT(add, a);
+ }
+ // 0 + x => x
+ {
+ Node* a = m.Parameter(0);
+ Node* b = m.Int32Constant(0);
+ Node* add = m.IntPtrAdd(b, a);
+ EXPECT_THAT(add, a);
+ }
+ // CONST_a + CONST_b => CONST_c
+ {
+ Node* a = m.IntPtrConstant(22);
+ Node* b = m.IntPtrConstant(33);
+ Node* c = m.IntPtrAdd(a, b);
+ EXPECT_THAT(c, IsIntPtrConstant(55));
+ }
+}
+
+TARGET_TEST_F(CodeAssemblerTest, IntPtrSub) {
+ CodeAssemblerTestState state(this);
+ CodeAssemblerForTest m(&state);
+ {
+ Node* a = m.Parameter(0);
+ Node* b = m.Int32Constant(1);
+ Node* sub = m.IntPtrSub(a, b);
+ EXPECT_THAT(sub, IsIntPtrSub(a, b));
+ }
+ // x - 0 => x
+ {
+ Node* a = m.Parameter(0);
+ Node* b = m.Int32Constant(0);
+ Node* c = m.IntPtrSub(a, b);
+ EXPECT_THAT(c, a);
+ }
+ // CONST_a - CONST_b => CONST_c
+ {
+ Node* a = m.IntPtrConstant(100);
+ Node* b = m.IntPtrConstant(1);
+ Node* c = m.IntPtrSub(a, b);
+ EXPECT_THAT(c, IsIntPtrConstant(99));
+ }
+}
+
+TARGET_TEST_F(CodeAssemblerTest, IntPtrMul) {
+ CodeAssemblerTestState state(this);
+ CodeAssemblerForTest m(&state);
+ {
+ Node* a = m.Parameter(0);
+ Node* b = m.Int32Constant(100);
+ Node* mul = m.IntPtrMul(a, b);
+ EXPECT_THAT(mul, IsIntPtrMul(a, b));
+ }
+ // x * 1 => x
+ {
+ Node* a = m.Parameter(0);
+ Node* b = m.Int32Constant(1);
+ Node* mul = m.IntPtrMul(a, b);
+ EXPECT_THAT(mul, a);
+ }
+ // 1 * x => x
+ {
+ Node* a = m.Parameter(0);
+ Node* b = m.Int32Constant(1);
+ Node* mul = m.IntPtrMul(b, a);
+ EXPECT_THAT(mul, a);
+ }
+ // CONST_a * CONST_b => CONST_c
+ {
+ Node* a = m.IntPtrConstant(100);
+ Node* b = m.IntPtrConstant(5);
+ Node* c = m.IntPtrMul(a, b);
+ EXPECT_THAT(c, IsIntPtrConstant(500));
+ }
+}
+
+TARGET_TEST_F(CodeAssemblerTest, WordShl) {
+ CodeAssemblerTestState state(this);
+ CodeAssemblerForTest m(&state);
+ {
+ Node* a = m.Parameter(0);
+ Node* add = m.WordShl(a, 10);
+ EXPECT_THAT(add, IsWordShl(a, IsIntPtrConstant(10)));
+ }
+ // x << 0 => x
+ {
+ Node* a = m.Parameter(0);
+ Node* add = m.WordShl(a, 0);
+ EXPECT_THAT(add, a);
+ }
+ // CONST_a << CONST_b => CONST_c
+ {
+ Node* a = m.IntPtrConstant(1024);
+ Node* shl = m.WordShl(a, 2);
+ EXPECT_THAT(shl, IsIntPtrConstant(4096));
+ }
+}
+
+TARGET_TEST_F(CodeAssemblerTest, WordShr) {
+ CodeAssemblerTestState state(this);
+ CodeAssemblerForTest m(&state);
+ {
+ Node* a = m.Parameter(0);
+ Node* shr = m.WordShr(a, 10);
+ EXPECT_THAT(shr, IsWordShr(a, IsIntPtrConstant(10)));
+ }
+ // x >> 0 => x
+ {
+ Node* a = m.Parameter(0);
+ Node* shr = m.WordShr(a, 0);
+ EXPECT_THAT(shr, a);
+ }
+ // +CONST_a >> CONST_b => CONST_c
+ {
+ Node* a = m.IntPtrConstant(4096);
+ Node* shr = m.WordShr(a, 2);
+ EXPECT_THAT(shr, IsIntPtrConstant(1024));
+ }
+ // -CONST_a >> CONST_b => CONST_c
+ {
+ Node* a = m.IntPtrConstant(-1234);
+ Node* shr = m.WordShr(a, 2);
+ EXPECT_THAT(shr, IsIntPtrConstant(static_cast<uintptr_t>(-1234) >> 2));
+ }
+}
+
+TARGET_TEST_F(CodeAssemblerTest, WordSar) {
+ CodeAssemblerTestState state(this);
+ CodeAssemblerForTest m(&state);
+ {
+ Node* a = m.Parameter(0);
+ Node* sar = m.WordSar(a, m.IntPtrConstant(10));
+ EXPECT_THAT(sar, IsWordSar(a, IsIntPtrConstant(10)));
+ }
+ // x >>> 0 => x
+ {
+ Node* a = m.Parameter(0);
+ Node* sar = m.WordSar(a, m.IntPtrConstant(0));
+ EXPECT_THAT(sar, a);
+ }
+ // +CONST_a >>> CONST_b => CONST_c
+ {
+ Node* a = m.IntPtrConstant(4096);
+ Node* sar = m.WordSar(a, m.IntPtrConstant(2));
+ EXPECT_THAT(sar, IsIntPtrConstant(1024));
+ }
+ // -CONST_a >>> CONST_b => CONST_c
+ {
+ Node* a = m.IntPtrConstant(-1234);
+ Node* sar = m.WordSar(a, m.IntPtrConstant(2));
+ EXPECT_THAT(sar, IsIntPtrConstant(static_cast<intptr_t>(-1234) >> 2));
+ }
+}
+
+TARGET_TEST_F(CodeAssemblerTest, WordOr) {
+ CodeAssemblerTestState state(this);
+ CodeAssemblerForTest m(&state);
+ {
+ Node* a = m.Parameter(0);
+ Node* z = m.WordOr(a, m.IntPtrConstant(8));
+ EXPECT_THAT(z, IsWordOr(a, IsIntPtrConstant(8)));
+ }
+ // x | 0 => x
+ {
+ Node* a = m.Parameter(0);
+ Node* z = m.WordOr(a, m.IntPtrConstant(0));
+ EXPECT_THAT(z, a);
+ }
+ // 0 | x => x
+ {
+ Node* a = m.Parameter(0);
+ Node* z = m.WordOr(m.IntPtrConstant(0), a);
+ EXPECT_THAT(z, a);
+ }
+ // CONST_a | CONST_b => CONST_c
+ {
+ Node* a = m.IntPtrConstant(3);
+ Node* b = m.WordOr(a, m.IntPtrConstant(7));
+ EXPECT_THAT(b, IsIntPtrConstant(7));
+ }
+}
+
+TARGET_TEST_F(CodeAssemblerTest, WordAnd) {
+ CodeAssemblerTestState state(this);
+ CodeAssemblerForTest m(&state);
+ {
+ Node* a = m.Parameter(0);
+ Node* z = m.WordAnd(a, m.IntPtrConstant(8));
+ EXPECT_THAT(z, IsWordAnd(a, IsIntPtrConstant(8)));
+ }
+ // CONST_a & CONST_b => CONST_c
+ {
+ Node* a = m.IntPtrConstant(3);
+ Node* b = m.WordAnd(a, m.IntPtrConstant(7));
+ EXPECT_THAT(b, IsIntPtrConstant(3));
+ }
+}
+
+TARGET_TEST_F(CodeAssemblerTest, WordXor) {
+ CodeAssemblerTestState state(this);
+ CodeAssemblerForTest m(&state);
+ {
+ Node* a = m.Parameter(0);
+ Node* z = m.WordXor(a, m.IntPtrConstant(8));
+ EXPECT_THAT(z, IsWordXor(a, IsIntPtrConstant(8)));
+ }
+ // CONST_a ^ CONST_b => CONST_c
+ {
+ Node* a = m.IntPtrConstant(3);
+ Node* b = m.WordXor(a, m.IntPtrConstant(7));
+ EXPECT_THAT(b, IsIntPtrConstant(4));
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/code-assembler-unittest.h b/deps/v8/test/unittests/compiler/code-assembler-unittest.h
new file mode 100644
index 0000000000..21f3df5f4b
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/code-assembler-unittest.h
@@ -0,0 +1,37 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_COMPILER_CODE_ASSEMBLER_UNITTEST_H_
+#define V8_UNITTESTS_COMPILER_CODE_ASSEMBLER_UNITTEST_H_
+
+#include "src/compiler/code-assembler.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock-support.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class CodeAssemblerTest : public TestWithIsolateAndZone {
+ public:
+ CodeAssemblerTest() {}
+ ~CodeAssemblerTest() override {}
+};
+
+class CodeAssemblerTestState : public CodeAssemblerState {
+ public:
+ explicit CodeAssemblerTestState(CodeAssemblerTest* test);
+};
+
+class CodeAssemblerForTest : public CodeAssembler {
+ public:
+ explicit CodeAssemblerForTest(CodeAssemblerTestState* state)
+ : CodeAssembler(state) {}
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNITTESTS_COMPILER_CODE_ASSEMBLER_UNITTEST_H_
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
index ecc3070785..6618dfb452 100644
--- a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -17,6 +17,7 @@ using testing::StrictMock;
namespace v8 {
namespace internal {
namespace compiler {
+namespace common_operator_reducer_unittest {
class CommonOperatorReducerTest : public GraphTest {
public:
@@ -491,6 +492,7 @@ TEST_F(CommonOperatorReducerTest, SelectToFloat64Abs) {
EXPECT_THAT(r.replacement(), IsFloat64Abs(p0));
}
+} // namespace common_operator_reducer_unittest
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/common-operator-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
index 761ce70a01..b51d5f3cf6 100644
--- a/deps/v8/test/unittests/compiler/common-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
@@ -13,7 +13,7 @@
namespace v8 {
namespace internal {
namespace compiler {
-
+namespace common_operator_unittest {
// -----------------------------------------------------------------------------
// Shared operators.
@@ -387,6 +387,7 @@ TEST_F(CommonOperatorTest, Projection) {
}
}
+} // namespace common_operator_unittest
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc b/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
index d0351bf5f5..d1f914c68e 100644
--- a/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
@@ -14,6 +14,7 @@ using testing::StrictMock;
namespace v8 {
namespace internal {
namespace compiler {
+namespace dead_code_elimination_unittest {
class DeadCodeEliminationTest : public GraphTest {
public:
@@ -368,6 +369,7 @@ TEST_F(DeadCodeEliminationTest, TerminateWithDeadControlInput) {
EXPECT_THAT(r.replacement(), IsDead());
}
+} // namespace dead_code_elimination_unittest
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/diamond-unittest.cc b/deps/v8/test/unittests/compiler/diamond-unittest.cc
index 5b28a001ef..17a45a5894 100644
--- a/deps/v8/test/unittests/compiler/diamond-unittest.cc
+++ b/deps/v8/test/unittests/compiler/diamond-unittest.cc
@@ -131,13 +131,13 @@ TEST_F(DiamondTest, DiamondPhis) {
TEST_F(DiamondTest, BranchHint) {
Diamond dn(graph(), common(), Parameter(0));
- CHECK(BranchHint::kNone == BranchHintOf(dn.branch->op()));
+ CHECK_EQ(BranchHint::kNone, BranchHintOf(dn.branch->op()));
Diamond dt(graph(), common(), Parameter(0), BranchHint::kTrue);
- CHECK(BranchHint::kTrue == BranchHintOf(dt.branch->op()));
+ CHECK_EQ(BranchHint::kTrue, BranchHintOf(dt.branch->op()));
Diamond df(graph(), common(), Parameter(0), BranchHint::kFalse);
- CHECK(BranchHint::kFalse == BranchHintOf(df.branch->op()));
+ CHECK_EQ(BranchHint::kFalse, BranchHintOf(df.branch->op()));
}
diff --git a/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc b/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
deleted file mode 100644
index 55c0f42fe1..0000000000
--- a/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
+++ /dev/null
@@ -1,523 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/escape-analysis.h"
-#include "src/bit-vector.h"
-#include "src/compiler/escape-analysis-reducer.h"
-#include "src/compiler/graph-visualizer.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/compiler/types.h"
-#include "src/zone/zone-containers.h"
-#include "test/unittests/compiler/graph-unittest.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class EscapeAnalysisTest : public TypedGraphTest {
- public:
- EscapeAnalysisTest()
- : simplified_(zone()),
- jsgraph_(isolate(), graph(), common(), nullptr, nullptr, nullptr),
- escape_analysis_(graph(), common(), zone()),
- effect_(graph()->start()),
- control_(graph()->start()) {}
-
- ~EscapeAnalysisTest() {}
-
- EscapeAnalysis* escape_analysis() { return &escape_analysis_; }
-
- protected:
- void Analysis() { escape_analysis_.Run(); }
-
- void Transformation() {
- GraphReducer graph_reducer(zone(), graph());
- EscapeAnalysisReducer escape_reducer(&graph_reducer, &jsgraph_,
- &escape_analysis_, zone());
- graph_reducer.AddReducer(&escape_reducer);
- graph_reducer.ReduceGraph();
- }
-
- // ---------------------------------Node Creation Helper----------------------
-
- Node* BeginRegion(Node* effect = nullptr) {
- if (!effect) {
- effect = effect_;
- }
-
- return effect_ = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kObservable), effect);
- }
-
- Node* FinishRegion(Node* value, Node* effect = nullptr) {
- if (!effect) {
- effect = effect_;
- }
- return effect_ = graph()->NewNode(common()->FinishRegion(), value, effect);
- }
-
- Node* Allocate(Node* size, Node* effect = nullptr, Node* control = nullptr) {
- if (!effect) {
- effect = effect_;
- }
- if (!control) {
- control = control_;
- }
- return effect_ = graph()->NewNode(simplified()->Allocate(Type::Any()), size,
- effect, control);
- }
-
- Node* Constant(int num) {
- return graph()->NewNode(common()->NumberConstant(num));
- }
-
- Node* Store(const FieldAccess& access, Node* allocation, Node* value,
- Node* effect = nullptr, Node* control = nullptr) {
- if (!effect) {
- effect = effect_;
- }
- if (!control) {
- control = control_;
- }
- return effect_ = graph()->NewNode(simplified()->StoreField(access),
- allocation, value, effect, control);
- }
-
- Node* StoreElement(const ElementAccess& access, Node* allocation, Node* index,
- Node* value, Node* effect = nullptr,
- Node* control = nullptr) {
- if (!effect) {
- effect = effect_;
- }
- if (!control) {
- control = control_;
- }
- return effect_ =
- graph()->NewNode(simplified()->StoreElement(access), allocation,
- index, value, effect, control);
- }
-
- Node* Load(const FieldAccess& access, Node* from, Node* effect = nullptr,
- Node* control = nullptr) {
- if (!effect) {
- effect = effect_;
- }
- if (!control) {
- control = control_;
- }
- return graph()->NewNode(simplified()->LoadField(access), from, effect,
- control);
- }
-
- Node* Return(Node* value, Node* effect = nullptr, Node* control = nullptr) {
- if (!effect) {
- effect = effect_;
- }
- if (!control) {
- control = control_;
- }
- Node* zero = graph()->NewNode(common()->NumberConstant(0));
- return control_ = graph()->NewNode(common()->Return(), zero, value, effect,
- control);
- }
-
- void EndGraph() {
- for (Edge edge : graph()->end()->input_edges()) {
- if (NodeProperties::IsControlEdge(edge)) {
- edge.UpdateTo(control_);
- }
- }
- }
-
- Node* Branch() {
- return control_ =
- graph()->NewNode(common()->Branch(), Constant(0), control_);
- }
-
- Node* IfTrue() {
- return control_ = graph()->NewNode(common()->IfTrue(), control_);
- }
-
- Node* IfFalse() { return graph()->NewNode(common()->IfFalse(), control_); }
-
- Node* Merge2(Node* control1, Node* control2) {
- return control_ = graph()->NewNode(common()->Merge(2), control1, control2);
- }
-
- FieldAccess FieldAccessAtIndex(int offset) {
- FieldAccess access = {kTaggedBase, offset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier};
- return access;
- }
-
- ElementAccess MakeElementAccess(int header_size) {
- ElementAccess access = {kTaggedBase, header_size, Type::Any(),
- MachineType::AnyTagged(), kFullWriteBarrier};
- return access;
- }
-
- // ---------------------------------Assertion Helper--------------------------
-
- void ExpectReplacement(Node* node, Node* rep) {
- EXPECT_EQ(rep, escape_analysis()->GetReplacement(node));
- }
-
- void ExpectReplacementPhi(Node* node, Node* left, Node* right) {
- Node* rep = escape_analysis()->GetReplacement(node);
- ASSERT_NE(nullptr, rep);
- ASSERT_EQ(IrOpcode::kPhi, rep->opcode());
- EXPECT_EQ(left, NodeProperties::GetValueInput(rep, 0));
- EXPECT_EQ(right, NodeProperties::GetValueInput(rep, 1));
- }
-
- void ExpectVirtual(Node* node) {
- EXPECT_TRUE(node->opcode() == IrOpcode::kAllocate ||
- node->opcode() == IrOpcode::kFinishRegion);
- EXPECT_TRUE(escape_analysis()->IsVirtual(node));
- }
-
- void ExpectEscaped(Node* node) {
- EXPECT_TRUE(node->opcode() == IrOpcode::kAllocate ||
- node->opcode() == IrOpcode::kFinishRegion);
- EXPECT_TRUE(escape_analysis()->IsEscaped(node));
- }
-
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
-
- Node* effect() { return effect_; }
- Node* control() { return control_; }
-
- private:
- SimplifiedOperatorBuilder simplified_;
- JSGraph jsgraph_;
- EscapeAnalysis escape_analysis_;
-
- Node* effect_;
- Node* control_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Test cases.
-
-
-TEST_F(EscapeAnalysisTest, StraightNonEscape) {
- Node* object1 = Constant(1);
- BeginRegion();
- Node* allocation = Allocate(Constant(kPointerSize));
- Store(FieldAccessAtIndex(0), allocation, object1);
- Node* finish = FinishRegion(allocation);
- Node* load = Load(FieldAccessAtIndex(0), finish);
- Node* result = Return(load);
- EndGraph();
-
- Analysis();
-
- ExpectVirtual(allocation);
- ExpectReplacement(load, object1);
-
- Transformation();
-
- ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 1));
-}
-
-
-TEST_F(EscapeAnalysisTest, StraightNonEscapeNonConstStore) {
- Node* object1 = Constant(1);
- Node* object2 = Constant(2);
- BeginRegion();
- Node* allocation = Allocate(Constant(kPointerSize));
- Store(FieldAccessAtIndex(0), allocation, object1);
- Node* index =
- graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
- object1, object2, control());
- StoreElement(MakeElementAccess(0), allocation, index, object1);
- Node* finish = FinishRegion(allocation);
- Node* load = Load(FieldAccessAtIndex(0), finish);
- Node* result = Return(load);
- EndGraph();
-
- Analysis();
-
- ExpectEscaped(allocation);
- ExpectReplacement(load, nullptr);
-
- Transformation();
-
- ASSERT_EQ(load, NodeProperties::GetValueInput(result, 1));
-}
-
-
-TEST_F(EscapeAnalysisTest, StraightEscape) {
- Node* object1 = Constant(1);
- BeginRegion();
- Node* allocation = Allocate(Constant(kPointerSize));
- Store(FieldAccessAtIndex(0), allocation, object1);
- Node* finish = FinishRegion(allocation);
- Node* load = Load(FieldAccessAtIndex(0), finish);
- Node* result = Return(allocation);
- EndGraph();
- graph()->end()->AppendInput(zone(), load);
-
- Analysis();
-
- ExpectEscaped(allocation);
- ExpectReplacement(load, object1);
-
- Transformation();
-
- ASSERT_EQ(allocation, NodeProperties::GetValueInput(result, 1));
-}
-
-
-TEST_F(EscapeAnalysisTest, StoreLoadEscape) {
- Node* object1 = Constant(1);
-
- BeginRegion();
- Node* allocation1 = Allocate(Constant(kPointerSize));
- Store(FieldAccessAtIndex(0), allocation1, object1);
- Node* finish1 = FinishRegion(allocation1);
-
- BeginRegion();
- Node* allocation2 = Allocate(Constant(kPointerSize));
- Store(FieldAccessAtIndex(0), allocation2, finish1);
- Node* finish2 = FinishRegion(allocation2);
-
- Node* load = Load(FieldAccessAtIndex(0), finish2);
- Node* result = Return(load);
- EndGraph();
- Analysis();
-
- ExpectEscaped(allocation1);
- ExpectVirtual(allocation2);
- ExpectReplacement(load, finish1);
-
- Transformation();
-
- ASSERT_EQ(finish1, NodeProperties::GetValueInput(result, 1));
-}
-
-
-TEST_F(EscapeAnalysisTest, BranchNonEscape) {
- Node* object1 = Constant(1);
- Node* object2 = Constant(2);
- BeginRegion();
- Node* allocation = Allocate(Constant(kPointerSize));
- Store(FieldAccessAtIndex(0), allocation, object1);
- Node* finish = FinishRegion(allocation);
- Branch();
- Node* ifFalse = IfFalse();
- Node* ifTrue = IfTrue();
- Node* effect1 =
- Store(FieldAccessAtIndex(0), allocation, object1, finish, ifFalse);
- Node* effect2 =
- Store(FieldAccessAtIndex(0), allocation, object2, finish, ifTrue);
- Node* merge = Merge2(ifFalse, ifTrue);
- Node* phi = graph()->NewNode(common()->EffectPhi(2), effect1, effect2, merge);
- Node* load = Load(FieldAccessAtIndex(0), finish, phi, merge);
- Node* result = Return(load, phi);
- EndGraph();
- graph()->end()->AppendInput(zone(), result);
-
- Analysis();
-
- ExpectVirtual(allocation);
- ExpectReplacementPhi(load, object1, object2);
- Node* replacement_phi = escape_analysis()->GetReplacement(load);
-
- Transformation();
-
- ASSERT_EQ(replacement_phi, NodeProperties::GetValueInput(result, 1));
-}
-
-
-TEST_F(EscapeAnalysisTest, BranchEscapeOne) {
- Node* object1 = Constant(1);
- Node* object2 = Constant(2);
- Node* index = graph()->NewNode(common()->Parameter(0), start());
- BeginRegion();
- Node* allocation = Allocate(Constant(kPointerSize));
- Store(FieldAccessAtIndex(0), allocation, object1);
- Node* finish = FinishRegion(allocation);
- Branch();
- Node* ifFalse = IfFalse();
- Node* ifTrue = IfTrue();
- Node* effect1 =
- Store(FieldAccessAtIndex(0), allocation, object1, finish, ifFalse);
- Node* effect2 = StoreElement(MakeElementAccess(0), allocation, index, object2,
- finish, ifTrue);
- Node* merge = Merge2(ifFalse, ifTrue);
- Node* phi = graph()->NewNode(common()->EffectPhi(2), effect1, effect2, merge);
- Node* load = Load(FieldAccessAtIndex(0), finish, phi, merge);
- Node* result = Return(load, phi);
- EndGraph();
-
- Analysis();
-
- ExpectEscaped(allocation);
- ExpectReplacement(load, nullptr);
-
- Transformation();
-
- ASSERT_EQ(load, NodeProperties::GetValueInput(result, 1));
-}
-
-
-TEST_F(EscapeAnalysisTest, BranchEscapeThroughStore) {
- Node* object1 = Constant(1);
- Node* object2 = Constant(2);
- BeginRegion();
- Node* allocation = Allocate(Constant(kPointerSize));
- Store(FieldAccessAtIndex(0), allocation, object1);
- FinishRegion(allocation);
- BeginRegion();
- Node* allocation2 = Allocate(Constant(kPointerSize));
- Store(FieldAccessAtIndex(0), allocation, object2);
- Node* finish2 = FinishRegion(allocation2);
- Branch();
- Node* ifFalse = IfFalse();
- Node* ifTrue = IfTrue();
- Node* effect1 =
- Store(FieldAccessAtIndex(0), allocation, allocation2, finish2, ifFalse);
- Node* merge = Merge2(ifFalse, ifTrue);
- Node* phi = graph()->NewNode(common()->EffectPhi(2), effect1, finish2, merge);
- Node* load = Load(FieldAccessAtIndex(0), finish2, phi, merge);
- Node* result = Return(allocation, phi);
- EndGraph();
- graph()->end()->AppendInput(zone(), load);
-
- Analysis();
-
- ExpectEscaped(allocation);
- ExpectEscaped(allocation2);
- ExpectReplacement(load, nullptr);
-
- Transformation();
-
- ASSERT_EQ(allocation, NodeProperties::GetValueInput(result, 1));
-}
-
-
-TEST_F(EscapeAnalysisTest, DanglingLoadOrder) {
- Node* object1 = Constant(1);
- Node* object2 = Constant(2);
- Node* allocation = Allocate(Constant(kPointerSize));
- Node* store1 = Store(FieldAccessAtIndex(0), allocation, object1);
- Node* load1 = Load(FieldAccessAtIndex(0), allocation);
- Node* store2 = Store(FieldAccessAtIndex(0), allocation, object2);
- Node* load2 = Load(FieldAccessAtIndex(0), allocation, store1);
- Node* result = Return(load2);
- EndGraph();
- graph()->end()->AppendInput(zone(), store2);
- graph()->end()->AppendInput(zone(), load1);
-
- Analysis();
-
- ExpectVirtual(allocation);
- ExpectReplacement(load1, object1);
- ExpectReplacement(load2, object1);
-
- Transformation();
-
- ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 1));
-}
-
-
-TEST_F(EscapeAnalysisTest, DeoptReplacement) {
- Node* object1 = Constant(1);
- BeginRegion();
- Node* allocation = Allocate(Constant(kPointerSize));
- Store(FieldAccessAtIndex(0), allocation, object1);
- Node* finish = FinishRegion(allocation);
- Node* effect1 = Store(FieldAccessAtIndex(0), allocation, object1, finish);
- Branch();
- Node* ifFalse = IfFalse();
- Node* state_values1 = graph()->NewNode(
- common()->StateValues(1, SparseInputMask::Dense()), finish);
- Node* state_values2 =
- graph()->NewNode(common()->StateValues(0, SparseInputMask::Dense()));
- Node* state_values3 =
- graph()->NewNode(common()->StateValues(0, SparseInputMask::Dense()));
- Node* frame_state = graph()->NewNode(
- common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
- nullptr),
- state_values1, state_values2, state_values3, UndefinedConstant(),
- graph()->start(), graph()->start());
- Node* deopt = graph()->NewNode(
- common()->Deoptimize(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason),
- frame_state, effect1, ifFalse);
- Node* ifTrue = IfTrue();
- Node* load = Load(FieldAccessAtIndex(0), finish, effect1, ifTrue);
- Node* result = Return(load, effect1, ifTrue);
- EndGraph();
- graph()->end()->AppendInput(zone(), deopt);
- Analysis();
-
- ExpectVirtual(allocation);
- ExpectReplacement(load, object1);
-
- Transformation();
-
- ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 1));
- Node* object_state = NodeProperties::GetValueInput(state_values1, 0);
- ASSERT_EQ(object_state->opcode(), IrOpcode::kObjectState);
- ASSERT_EQ(1, object_state->op()->ValueInputCount());
- ASSERT_EQ(object1, NodeProperties::GetValueInput(object_state, 0));
-}
-
-TEST_F(EscapeAnalysisTest, DISABLED_DeoptReplacementIdentity) {
- Node* object1 = Constant(1);
- BeginRegion();
- Node* allocation = Allocate(Constant(kPointerSize * 2));
- Store(FieldAccessAtIndex(0), allocation, object1);
- Store(FieldAccessAtIndex(kPointerSize), allocation, allocation);
- Node* finish = FinishRegion(allocation);
- Node* effect1 = Store(FieldAccessAtIndex(0), allocation, object1, finish);
- Branch();
- Node* ifFalse = IfFalse();
- Node* state_values1 = graph()->NewNode(
- common()->StateValues(1, SparseInputMask::Dense()), finish);
- Node* state_values2 = graph()->NewNode(
- common()->StateValues(1, SparseInputMask::Dense()), finish);
- Node* state_values3 =
- graph()->NewNode(common()->StateValues(0, SparseInputMask::Dense()));
- Node* frame_state = graph()->NewNode(
- common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
- nullptr),
- state_values1, state_values2, state_values3, UndefinedConstant(),
- graph()->start(), graph()->start());
- Node* deopt = graph()->NewNode(
- common()->Deoptimize(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason),
- frame_state, effect1, ifFalse);
- Node* ifTrue = IfTrue();
- Node* load = Load(FieldAccessAtIndex(0), finish, effect1, ifTrue);
- Node* result = Return(load, effect1, ifTrue);
- EndGraph();
- graph()->end()->AppendInput(zone(), deopt);
- Analysis();
-
- ExpectVirtual(allocation);
- ExpectReplacement(load, object1);
-
- Transformation();
-
- ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 1));
-
- Node* object_state = NodeProperties::GetValueInput(state_values1, 0);
- ASSERT_EQ(object_state->opcode(), IrOpcode::kObjectState);
- ASSERT_EQ(2, object_state->op()->ValueInputCount());
- ASSERT_EQ(object1, NodeProperties::GetValueInput(object_state, 0));
- ASSERT_EQ(object_state, NodeProperties::GetValueInput(object_state, 1));
-
- Node* object_state2 = NodeProperties::GetValueInput(state_values1, 0);
- ASSERT_EQ(object_state, object_state2);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
index f6f5994932..dc2f2189d1 100644
--- a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
@@ -21,6 +21,7 @@ using testing::UnorderedElementsAre;
namespace v8 {
namespace internal {
namespace compiler {
+namespace graph_reducer_unittest {
namespace {
@@ -872,6 +873,7 @@ TEST_F(GraphReducerTest, Order) {
}
}
+} // namespace graph_reducer_unittest
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index 8f2161a303..b9f5fc4b9f 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -4,6 +4,8 @@
#include "test/unittests/compiler/instruction-selector-unittest.h"
+#include "src/objects-inl.h"
+
namespace v8 {
namespace internal {
namespace compiler {
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
index ee9f7914a6..d629639c49 100644
--- a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
@@ -113,7 +113,7 @@ InstructionSequence* InstructionSequenceTest::sequence() {
void InstructionSequenceTest::StartLoop(int loop_blocks) {
- CHECK(current_block_ == nullptr);
+ CHECK_NULL(current_block_);
if (!loop_blocks_.empty()) {
CHECK(!loop_blocks_.back().loop_header_.IsValid());
}
@@ -123,7 +123,7 @@ void InstructionSequenceTest::StartLoop(int loop_blocks) {
void InstructionSequenceTest::EndLoop() {
- CHECK(current_block_ == nullptr);
+ CHECK_NULL(current_block_);
CHECK(!loop_blocks_.empty());
CHECK_EQ(0, loop_blocks_.back().expected_blocks_);
loop_blocks_.pop_back();
@@ -158,7 +158,7 @@ Instruction* InstructionSequenceTest::EndBlock(BlockCompletion completion) {
break;
}
completions_.push_back(completion);
- CHECK(current_block_ != nullptr);
+ CHECK_NOT_NULL(current_block_);
sequence()->EndBlock(current_block_->rpo_number());
current_block_ = nullptr;
return result;
@@ -195,7 +195,7 @@ PhiInstruction* InstructionSequenceTest::Phi(VReg incoming_vreg_0,
for (; input_count < arraysize(inputs); ++input_count) {
if (inputs[input_count].value_ == kNoValue) break;
}
- CHECK(input_count > 0);
+ CHECK_LT(0, input_count);
auto phi = new (zone()) PhiInstruction(zone(), NewReg().value_, input_count);
for (size_t i = 0; i < input_count; ++i) {
SetInput(phi, i, inputs[i]);
@@ -216,7 +216,7 @@ PhiInstruction* InstructionSequenceTest::Phi(VReg incoming_vreg_0,
void InstructionSequenceTest::SetInput(PhiInstruction* phi, size_t input,
VReg vreg) {
- CHECK(vreg.value_ != kNoValue);
+ CHECK_NE(kNoValue, vreg.value_);
phi->SetInput(input, vreg.value_);
}
@@ -474,7 +474,7 @@ InstructionOperand InstructionSequenceTest::ConvertOutputOp(VReg vreg,
InstructionBlock* InstructionSequenceTest::NewBlock(bool deferred) {
- CHECK(current_block_ == nullptr);
+ CHECK_NULL(current_block_);
Rpo rpo = Rpo::FromInt(static_cast<int>(instruction_blocks_.size()));
Rpo loop_header = Rpo::Invalid();
Rpo loop_end = Rpo::Invalid();
diff --git a/deps/v8/test/unittests/compiler/instruction-unittest.cc b/deps/v8/test/unittests/compiler/instruction-unittest.cc
index 443c42b62a..96add7fdd8 100644
--- a/deps/v8/test/unittests/compiler/instruction-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-unittest.cc
@@ -10,6 +10,7 @@
namespace v8 {
namespace internal {
namespace compiler {
+namespace instruction_unittest {
namespace {
@@ -170,6 +171,7 @@ TEST_F(InstructionTest, PrepareInsertAfter) {
}
}
+} // namespace instruction_unittest
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index 35f296442e..a6bd1f2dad 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -298,11 +298,11 @@ TEST_F(Int64LoweringTest, TruncateInt64ToInt32) {
}
TEST_F(Int64LoweringTest, Parameter) {
- LowerGraph(Parameter(0), MachineRepresentation::kWord64,
+ LowerGraph(Parameter(1), MachineRepresentation::kWord64,
MachineRepresentation::kWord64, 1);
EXPECT_THAT(graph()->end()->InputAt(1),
- IsReturn2(IsParameter(0), IsParameter(1), start(), start()));
+ IsReturn2(IsParameter(1), IsParameter(2), start(), start()));
}
TEST_F(Int64LoweringTest, Parameter2) {
@@ -316,17 +316,23 @@ TEST_F(Int64LoweringTest, Parameter2) {
sig_builder.AddParam(MachineRepresentation::kWord32);
int start_parameter = start()->op()->ValueOutputCount();
- LowerGraph(Parameter(4), sig_builder.Build());
+ LowerGraph(Parameter(5), sig_builder.Build());
EXPECT_THAT(graph()->end()->InputAt(1),
- IsReturn(IsParameter(6), start(), start()));
+ IsReturn(IsParameter(7), start(), start()));
// The parameter of the start node should increase by 2, because we lowered
// two parameter nodes.
EXPECT_THAT(start()->op()->ValueOutputCount(), start_parameter + 2);
}
+// The following tests assume that pointers are 32 bit and therefore pointers do
+// not get lowered. This assumption does not hold on 64 bit platforms, which
+// invalidates these tests.
+// TODO(wasm): We can find an alternative to re-activate these tests.
+#if V8_TARGET_ARCH_32_BIT
TEST_F(Int64LoweringTest, CallI64Return) {
int32_t function = 0x9999;
+ Node* context_address = Int32Constant(0);
Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0);
sig_builder.AddReturn(MachineRepresentation::kWord64);
@@ -335,7 +341,7 @@ TEST_F(Int64LoweringTest, CallI64Return) {
compiler::GetWasmCallDescriptor(zone(), sig_builder.Build());
LowerGraph(graph()->NewNode(common()->Call(desc), Int32Constant(function),
- start(), start()),
+ context_address, start(), start()),
MachineRepresentation::kWord64);
Capture<Node*> call;
@@ -355,6 +361,7 @@ TEST_F(Int64LoweringTest, CallI64Return) {
TEST_F(Int64LoweringTest, CallI64Parameter) {
int32_t function = 0x9999;
+ Node* context_address = Int32Constant(0);
Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 3);
sig_builder.AddReturn(MachineRepresentation::kWord32);
@@ -366,14 +373,14 @@ TEST_F(Int64LoweringTest, CallI64Parameter) {
compiler::GetWasmCallDescriptor(zone(), sig_builder.Build());
LowerGraph(graph()->NewNode(common()->Call(desc), Int32Constant(function),
- Int64Constant(value(0)),
+ context_address, Int64Constant(value(0)),
Int32Constant(low_word_value(1)),
Int64Constant(value(2)), start(), start()),
MachineRepresentation::kWord32);
EXPECT_THAT(
graph()->end()->InputAt(1),
- IsReturn(IsCall(testing::_, IsInt32Constant(function),
+ IsReturn(IsCall(testing::_, IsInt32Constant(function), context_address,
IsInt32Constant(low_word_value(0)),
IsInt32Constant(high_word_value(0)),
IsInt32Constant(low_word_value(1)),
@@ -401,6 +408,7 @@ TEST_F(Int64LoweringTest, Int64Add) {
IsProjection(1, AllOf(CaptureEq(&add), add_matcher)),
start(), start()));
}
+#endif
TEST_F(Int64LoweringTest, Int64Sub) {
LowerGraph(graph()->NewNode(machine()->Int64Sub(), Int64Constant(value(0)),
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index 7fac0e6bc5..e185c64795 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -40,8 +40,7 @@ class JSCreateLoweringTest : public TypedGraphTest {
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
- JSCreateLowering reducer(&graph_reducer, &deps_, &jsgraph,
- MaybeHandle<FeedbackVector>(), native_context(),
+ JSCreateLowering reducer(&graph_reducer, &deps_, &jsgraph, native_context(),
zone());
return reducer.Reduce(node);
}
@@ -93,7 +92,7 @@ TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedMapped) {
Node* const closure = Parameter(Type::Any());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
- Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
+ Handle<SharedFunctionInfo> shared(isolate()->script_function()->shared());
Node* const frame_state_outer = FrameState(shared, graph()->start());
Node* const frame_state_inner = FrameState(shared, frame_state_outer);
Reduction r = Reduce(graph()->NewNode(
@@ -111,7 +110,7 @@ TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedUnmapped) {
Node* const closure = Parameter(Type::Any());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
- Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
+ Handle<SharedFunctionInfo> shared(isolate()->script_function()->shared());
Node* const frame_state_outer = FrameState(shared, graph()->start());
Node* const frame_state_inner = FrameState(shared, frame_state_outer);
Reduction r = Reduce(graph()->NewNode(
@@ -129,7 +128,7 @@ TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedRestArray) {
Node* const closure = Parameter(Type::Any());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
- Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
+ Handle<SharedFunctionInfo> shared(isolate()->script_function()->shared());
Node* const frame_state_outer = FrameState(shared, graph()->start());
Node* const frame_state_inner = FrameState(shared, frame_state_outer);
Reduction r = Reduce(graph()->NewNode(
diff --git a/deps/v8/test/unittests/compiler/js-operator-unittest.cc b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
index 886fbe02ce..8a458a736f 100644
--- a/deps/v8/test/unittests/compiler/js-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
@@ -11,6 +11,7 @@
namespace v8 {
namespace internal {
namespace compiler {
+namespace js_operator_unittest {
// -----------------------------------------------------------------------------
// Shared operators.
@@ -112,6 +113,7 @@ TEST_P(JSSharedOperatorTest, Properties) {
INSTANTIATE_TEST_CASE_P(JSOperatorTest, JSSharedOperatorTest,
::testing::ValuesIn(kSharedOperators));
+} // namespace js_operator_unittest
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
index b041597ccd..2f1e7e8be2 100644
--- a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -4,6 +4,8 @@
#include "test/unittests/compiler/instruction-selector-unittest.h"
+#include "src/objects-inl.h"
+
namespace v8 {
namespace internal {
namespace compiler {
diff --git a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
index 5c57e5ce9f..74af374379 100644
--- a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
@@ -4,6 +4,8 @@
#include "test/unittests/compiler/instruction-selector-unittest.h"
+#include "src/objects-inl.h"
+
namespace v8 {
namespace internal {
namespace compiler {
diff --git a/deps/v8/test/unittests/compiler/node-cache-unittest.cc b/deps/v8/test/unittests/compiler/node-cache-unittest.cc
index 3c92876abd..f77377deda 100644
--- a/deps/v8/test/unittests/compiler/node-cache-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-cache-unittest.cc
@@ -12,6 +12,7 @@ using testing::Contains;
namespace v8 {
namespace internal {
namespace compiler {
+namespace node_cache_unittest {
typedef GraphTest NodeCacheTest;
@@ -154,6 +155,7 @@ TEST_F(NodeCacheTest, GetCachedNodes_int64) {
}
}
+} // namespace node_cache_unittest
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/node-properties-unittest.cc b/deps/v8/test/unittests/compiler/node-properties-unittest.cc
index a18f2032f5..b3b76cef79 100644
--- a/deps/v8/test/unittests/compiler/node-properties-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-properties-unittest.cc
@@ -14,6 +14,7 @@ using testing::IsNull;
namespace v8 {
namespace internal {
namespace compiler {
+namespace node_properties_unittest {
class NodePropertiesTest : public TestWithZone {
public:
@@ -118,6 +119,7 @@ TEST_F(NodePropertiesTest, CollectControlProjections_Switch) {
EXPECT_EQ(if_default, result[2]);
}
+} // namespace node_properties_unittest
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 0e903f9c76..52fd02b0a6 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -47,10 +47,9 @@ bool PrintMatchAndExplain(const T& value, const std::string& value_name,
return true;
}
-
-class NodeMatcher : public MatcherInterface<Node*> {
+class TestNodeMatcher : public MatcherInterface<Node*> {
public:
- explicit NodeMatcher(IrOpcode::Value opcode) : opcode_(opcode) {}
+ explicit TestNodeMatcher(IrOpcode::Value opcode) : opcode_(opcode) {}
void DescribeTo(std::ostream* os) const override {
*os << "is a " << IrOpcode::Mnemonic(opcode_) << " node";
@@ -74,17 +73,16 @@ class NodeMatcher : public MatcherInterface<Node*> {
const IrOpcode::Value opcode_;
};
-
-class IsBranchMatcher final : public NodeMatcher {
+class IsBranchMatcher final : public TestNodeMatcher {
public:
IsBranchMatcher(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kBranch),
+ : TestNodeMatcher(IrOpcode::kBranch),
value_matcher_(value_matcher),
control_matcher_(control_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose value (";
value_matcher_.DescribeTo(os);
*os << ") and control (";
@@ -93,7 +91,7 @@ class IsBranchMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
"value", value_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetControlInput(node),
@@ -105,17 +103,16 @@ class IsBranchMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-
-class IsSwitchMatcher final : public NodeMatcher {
+class IsSwitchMatcher final : public TestNodeMatcher {
public:
IsSwitchMatcher(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kSwitch),
+ : TestNodeMatcher(IrOpcode::kSwitch),
value_matcher_(value_matcher),
control_matcher_(control_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose value (";
value_matcher_.DescribeTo(os);
*os << ") and control (";
@@ -124,7 +121,7 @@ class IsSwitchMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
"value", value_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetControlInput(node),
@@ -136,17 +133,16 @@ class IsSwitchMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-
-class IsIfValueMatcher final : public NodeMatcher {
+class IsIfValueMatcher final : public TestNodeMatcher {
public:
IsIfValueMatcher(const Matcher<int32_t>& value_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kIfValue),
+ : TestNodeMatcher(IrOpcode::kIfValue),
value_matcher_(value_matcher),
control_matcher_(control_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose value (";
value_matcher_.DescribeTo(os);
*os << ") and control (";
@@ -155,7 +151,7 @@ class IsIfValueMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<int32_t>(node->op()), "value",
value_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetControlInput(node),
@@ -167,22 +163,21 @@ class IsIfValueMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-
-class IsControl1Matcher final : public NodeMatcher {
+class IsControl1Matcher final : public TestNodeMatcher {
public:
IsControl1Matcher(IrOpcode::Value opcode,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(opcode), control_matcher_(control_matcher) {}
+ : TestNodeMatcher(opcode), control_matcher_(control_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose control (";
control_matcher_.DescribeTo(os);
*os << ")";
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetControlInput(node),
"control", control_matcher_, listener));
}
@@ -191,18 +186,17 @@ class IsControl1Matcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-
-class IsControl2Matcher final : public NodeMatcher {
+class IsControl2Matcher final : public TestNodeMatcher {
public:
IsControl2Matcher(IrOpcode::Value opcode,
const Matcher<Node*>& control0_matcher,
const Matcher<Node*>& control1_matcher)
- : NodeMatcher(opcode),
+ : TestNodeMatcher(opcode),
control0_matcher_(control0_matcher),
control1_matcher_(control1_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose control0 (";
control0_matcher_.DescribeTo(os);
*os << ") and control1 (";
@@ -211,7 +205,7 @@ class IsControl2Matcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetControlInput(node, 0),
"control0", control0_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetControlInput(node, 1),
@@ -223,20 +217,19 @@ class IsControl2Matcher final : public NodeMatcher {
const Matcher<Node*> control1_matcher_;
};
-
-class IsControl3Matcher final : public NodeMatcher {
+class IsControl3Matcher final : public TestNodeMatcher {
public:
IsControl3Matcher(IrOpcode::Value opcode,
const Matcher<Node*>& control0_matcher,
const Matcher<Node*>& control1_matcher,
const Matcher<Node*>& control2_matcher)
- : NodeMatcher(opcode),
+ : TestNodeMatcher(opcode),
control0_matcher_(control0_matcher),
control1_matcher_(control1_matcher),
control2_matcher_(control2_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose control0 (";
control0_matcher_.DescribeTo(os);
*os << ") and control1 (";
@@ -247,7 +240,7 @@ class IsControl3Matcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetControlInput(node, 0),
"control0", control0_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetControlInput(node, 1),
@@ -262,21 +255,21 @@ class IsControl3Matcher final : public NodeMatcher {
const Matcher<Node*> control2_matcher_;
};
-
-class IsBeginRegionMatcher final : public NodeMatcher {
+class IsBeginRegionMatcher final : public TestNodeMatcher {
public:
explicit IsBeginRegionMatcher(const Matcher<Node*>& effect_matcher)
- : NodeMatcher(IrOpcode::kBeginRegion), effect_matcher_(effect_matcher) {}
+ : TestNodeMatcher(IrOpcode::kBeginRegion),
+ effect_matcher_(effect_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose effect (";
effect_matcher_.DescribeTo(os);
*os << ")";
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
effect_matcher_, listener));
}
@@ -285,17 +278,16 @@ class IsBeginRegionMatcher final : public NodeMatcher {
const Matcher<Node*> effect_matcher_;
};
-
-class IsFinishRegionMatcher final : public NodeMatcher {
+class IsFinishRegionMatcher final : public TestNodeMatcher {
public:
IsFinishRegionMatcher(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher)
- : NodeMatcher(IrOpcode::kFinishRegion),
+ : TestNodeMatcher(IrOpcode::kFinishRegion),
value_matcher_(value_matcher),
effect_matcher_(effect_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose value (";
value_matcher_.DescribeTo(os);
*os << ") and effect (";
@@ -304,7 +296,7 @@ class IsFinishRegionMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
"value", value_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
@@ -316,13 +308,12 @@ class IsFinishRegionMatcher final : public NodeMatcher {
const Matcher<Node*> effect_matcher_;
};
-
-class IsReturnMatcher final : public NodeMatcher {
+class IsReturnMatcher final : public TestNodeMatcher {
public:
IsReturnMatcher(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kReturn),
+ : TestNodeMatcher(IrOpcode::kReturn),
value_matcher_(value_matcher),
value2_matcher_(_),
effect_matcher_(effect_matcher),
@@ -333,7 +324,7 @@ class IsReturnMatcher final : public NodeMatcher {
const Matcher<Node*>& value2_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kReturn),
+ : TestNodeMatcher(IrOpcode::kReturn),
value_matcher_(value_matcher),
value2_matcher_(value2_matcher),
effect_matcher_(effect_matcher),
@@ -341,7 +332,7 @@ class IsReturnMatcher final : public NodeMatcher {
has_second_return_value_(true) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose value (";
value_matcher_.DescribeTo(os);
if (has_second_return_value_) {
@@ -356,7 +347,7 @@ class IsReturnMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
"value", value_matcher_, listener) &&
(!has_second_return_value_ ||
@@ -376,17 +367,16 @@ class IsReturnMatcher final : public NodeMatcher {
bool has_second_return_value_;
};
-
-class IsTerminateMatcher final : public NodeMatcher {
+class IsTerminateMatcher final : public TestNodeMatcher {
public:
IsTerminateMatcher(const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kTerminate),
+ : TestNodeMatcher(IrOpcode::kTerminate),
effect_matcher_(effect_matcher),
control_matcher_(control_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose effect (";
effect_matcher_.DescribeTo(os);
*os << ") and control (";
@@ -395,7 +385,7 @@ class IsTerminateMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
effect_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetControlInput(node),
@@ -407,16 +397,16 @@ class IsTerminateMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-class IsTypeGuardMatcher final : public NodeMatcher {
+class IsTypeGuardMatcher final : public TestNodeMatcher {
public:
IsTypeGuardMatcher(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kTypeGuard),
+ : TestNodeMatcher(IrOpcode::kTypeGuard),
value_matcher_(value_matcher),
control_matcher_(control_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose value (";
value_matcher_.DescribeTo(os);
*os << ") and control (";
@@ -425,7 +415,7 @@ class IsTypeGuardMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
"value", value_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetControlInput(node),
@@ -438,20 +428,20 @@ class IsTypeGuardMatcher final : public NodeMatcher {
};
template <typename T>
-class IsConstantMatcher final : public NodeMatcher {
+class IsConstantMatcher final : public TestNodeMatcher {
public:
IsConstantMatcher(IrOpcode::Value opcode, const Matcher<T>& value_matcher)
- : NodeMatcher(opcode), value_matcher_(value_matcher) {}
+ : TestNodeMatcher(opcode), value_matcher_(value_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose value (";
value_matcher_.DescribeTo(os);
*os << ")";
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<T>(node), "value", value_matcher_,
listener));
}
@@ -460,21 +450,20 @@ class IsConstantMatcher final : public NodeMatcher {
const Matcher<T> value_matcher_;
};
-
-class IsSelectMatcher final : public NodeMatcher {
+class IsSelectMatcher final : public TestNodeMatcher {
public:
IsSelectMatcher(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher)
- : NodeMatcher(IrOpcode::kSelect),
+ : TestNodeMatcher(IrOpcode::kSelect),
type_matcher_(type_matcher),
value0_matcher_(value0_matcher),
value1_matcher_(value1_matcher),
value2_matcher_(value2_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose representation (";
type_matcher_.DescribeTo(os);
*os << "), value0 (";
@@ -488,7 +477,7 @@ class IsSelectMatcher final : public NodeMatcher {
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (
- NodeMatcher::MatchAndExplain(node, listener) &&
+ TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(SelectParametersOf(node->op()).representation(),
"representation", type_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "value0",
@@ -506,21 +495,20 @@ class IsSelectMatcher final : public NodeMatcher {
const Matcher<Node*> value2_matcher_;
};
-
-class IsPhiMatcher final : public NodeMatcher {
+class IsPhiMatcher final : public TestNodeMatcher {
public:
IsPhiMatcher(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kPhi),
+ : TestNodeMatcher(IrOpcode::kPhi),
type_matcher_(type_matcher),
value0_matcher_(value0_matcher),
value1_matcher_(value1_matcher),
control_matcher_(control_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose representation (";
type_matcher_.DescribeTo(os);
*os << "), value0 (";
@@ -533,7 +521,7 @@ class IsPhiMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(PhiRepresentationOf(node->op()),
"representation", type_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
@@ -551,15 +539,14 @@ class IsPhiMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-
-class IsPhi2Matcher final : public NodeMatcher {
+class IsPhi2Matcher final : public TestNodeMatcher {
public:
IsPhi2Matcher(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kPhi),
+ : TestNodeMatcher(IrOpcode::kPhi),
type_matcher_(type_matcher),
value0_matcher_(value0_matcher),
value1_matcher_(value1_matcher),
@@ -567,7 +554,7 @@ class IsPhi2Matcher final : public NodeMatcher {
control_matcher_(control_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose representation (";
type_matcher_.DescribeTo(os);
*os << "), value0 (";
@@ -582,7 +569,7 @@ class IsPhi2Matcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(PhiRepresentationOf(node->op()),
"representation", type_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
@@ -603,19 +590,18 @@ class IsPhi2Matcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-
-class IsEffectPhiMatcher final : public NodeMatcher {
+class IsEffectPhiMatcher final : public TestNodeMatcher {
public:
IsEffectPhiMatcher(const Matcher<Node*>& effect0_matcher,
const Matcher<Node*>& effect1_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kEffectPhi),
+ : TestNodeMatcher(IrOpcode::kEffectPhi),
effect0_matcher_(effect0_matcher),
effect1_matcher_(effect1_matcher),
control_matcher_(control_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << "), effect0 (";
effect0_matcher_.DescribeTo(os);
*os << "), effect1 (";
@@ -626,7 +612,7 @@ class IsEffectPhiMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetEffectInput(node, 0),
"effect0", effect0_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetEffectInput(node, 1),
@@ -641,17 +627,16 @@ class IsEffectPhiMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-
-class IsProjectionMatcher final : public NodeMatcher {
+class IsProjectionMatcher final : public TestNodeMatcher {
public:
IsProjectionMatcher(const Matcher<size_t>& index_matcher,
const Matcher<Node*>& base_matcher)
- : NodeMatcher(IrOpcode::kProjection),
+ : TestNodeMatcher(IrOpcode::kProjection),
index_matcher_(index_matcher),
base_matcher_(base_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose index (";
index_matcher_.DescribeTo(os);
*os << ") and base (";
@@ -660,7 +645,7 @@ class IsProjectionMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<size_t>(node), "index",
index_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
@@ -672,21 +657,20 @@ class IsProjectionMatcher final : public NodeMatcher {
const Matcher<Node*> base_matcher_;
};
-
-class IsCallMatcher final : public NodeMatcher {
+class IsCallMatcher final : public TestNodeMatcher {
public:
IsCallMatcher(const Matcher<const CallDescriptor*>& descriptor_matcher,
const std::vector<Matcher<Node*>>& value_matchers,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kCall),
+ : TestNodeMatcher(IrOpcode::kCall),
descriptor_matcher_(descriptor_matcher),
value_matchers_(value_matchers),
effect_matcher_(effect_matcher),
control_matcher_(control_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
for (size_t i = 0; i < value_matchers_.size(); ++i) {
if (i == 0) {
*os << " whose value0 (";
@@ -703,7 +687,7 @@ class IsCallMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- if (!NodeMatcher::MatchAndExplain(node, listener) ||
+ if (!TestNodeMatcher::MatchAndExplain(node, listener) ||
!PrintMatchAndExplain(OpParameter<const CallDescriptor*>(node),
"descriptor", descriptor_matcher_, listener)) {
return false;
@@ -738,21 +722,20 @@ class IsCallMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-
-class IsTailCallMatcher final : public NodeMatcher {
+class IsTailCallMatcher final : public TestNodeMatcher {
public:
IsTailCallMatcher(const Matcher<CallDescriptor const*>& descriptor_matcher,
const std::vector<Matcher<Node*>>& value_matchers,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kTailCall),
+ : TestNodeMatcher(IrOpcode::kTailCall),
descriptor_matcher_(descriptor_matcher),
value_matchers_(value_matchers),
effect_matcher_(effect_matcher),
control_matcher_(control_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
for (size_t i = 0; i < value_matchers_.size(); ++i) {
if (i == 0) {
*os << " whose value0 (";
@@ -769,7 +752,7 @@ class IsTailCallMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- if (!NodeMatcher::MatchAndExplain(node, listener) ||
+ if (!TestNodeMatcher::MatchAndExplain(node, listener) ||
!PrintMatchAndExplain(OpParameter<CallDescriptor const*>(node),
"descriptor", descriptor_matcher_, listener)) {
return false;
@@ -804,7 +787,7 @@ class IsTailCallMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-class IsSpeculativeBinopMatcher final : public NodeMatcher {
+class IsSpeculativeBinopMatcher final : public TestNodeMatcher {
public:
IsSpeculativeBinopMatcher(IrOpcode::Value opcode,
const Matcher<NumberOperationHint>& hint_matcher,
@@ -812,7 +795,7 @@ class IsSpeculativeBinopMatcher final : public NodeMatcher {
const Matcher<Node*>& rhs_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(opcode),
+ : TestNodeMatcher(opcode),
hint_matcher_(hint_matcher),
lhs_matcher_(lhs_matcher),
rhs_matcher_(rhs_matcher),
@@ -820,7 +803,7 @@ class IsSpeculativeBinopMatcher final : public NodeMatcher {
control_matcher_(control_matcher) {}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
// TODO(bmeurer): The type parameter is currently ignored.
PrintMatchAndExplain(OpParameter<NumberOperationHint>(node->op()),
"hints", hint_matcher_, listener) &&
@@ -843,18 +826,18 @@ class IsSpeculativeBinopMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-class IsAllocateMatcher final : public NodeMatcher {
+class IsAllocateMatcher final : public TestNodeMatcher {
public:
IsAllocateMatcher(const Matcher<Node*>& size_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kAllocate),
+ : TestNodeMatcher(IrOpcode::kAllocate),
size_matcher_(size_matcher),
effect_matcher_(effect_matcher),
control_matcher_(control_matcher) {}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "size",
size_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
@@ -869,21 +852,20 @@ class IsAllocateMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-
-class IsLoadFieldMatcher final : public NodeMatcher {
+class IsLoadFieldMatcher final : public TestNodeMatcher {
public:
IsLoadFieldMatcher(const Matcher<FieldAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kLoadField),
+ : TestNodeMatcher(IrOpcode::kLoadField),
access_matcher_(access_matcher),
base_matcher_(base_matcher),
effect_matcher_(effect_matcher),
control_matcher_(control_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose access (";
access_matcher_.DescribeTo(os);
*os << "), base (";
@@ -896,7 +878,7 @@ class IsLoadFieldMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<FieldAccess>(node), "access",
access_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
@@ -914,15 +896,14 @@ class IsLoadFieldMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-
-class IsStoreFieldMatcher final : public NodeMatcher {
+class IsStoreFieldMatcher final : public TestNodeMatcher {
public:
IsStoreFieldMatcher(const Matcher<FieldAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kStoreField),
+ : TestNodeMatcher(IrOpcode::kStoreField),
access_matcher_(access_matcher),
base_matcher_(base_matcher),
value_matcher_(value_matcher),
@@ -930,7 +911,7 @@ class IsStoreFieldMatcher final : public NodeMatcher {
control_matcher_(control_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose access (";
access_matcher_.DescribeTo(os);
*os << "), base (";
@@ -945,7 +926,7 @@ class IsStoreFieldMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<FieldAccess>(node), "access",
access_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
@@ -966,14 +947,14 @@ class IsStoreFieldMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-class IsLoadElementMatcher final : public NodeMatcher {
+class IsLoadElementMatcher final : public TestNodeMatcher {
public:
IsLoadElementMatcher(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kLoadElement),
+ : TestNodeMatcher(IrOpcode::kLoadElement),
access_matcher_(access_matcher),
base_matcher_(base_matcher),
index_matcher_(index_matcher),
@@ -981,7 +962,7 @@ class IsLoadElementMatcher final : public NodeMatcher {
control_matcher_(control_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose access (";
access_matcher_.DescribeTo(os);
*os << "), base (";
@@ -996,7 +977,7 @@ class IsLoadElementMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<ElementAccess>(node), "access",
access_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
@@ -1017,8 +998,7 @@ class IsLoadElementMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-
-class IsStoreElementMatcher final : public NodeMatcher {
+class IsStoreElementMatcher final : public TestNodeMatcher {
public:
IsStoreElementMatcher(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
@@ -1026,7 +1006,7 @@ class IsStoreElementMatcher final : public NodeMatcher {
const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kStoreElement),
+ : TestNodeMatcher(IrOpcode::kStoreElement),
access_matcher_(access_matcher),
base_matcher_(base_matcher),
index_matcher_(index_matcher),
@@ -1035,7 +1015,7 @@ class IsStoreElementMatcher final : public NodeMatcher {
control_matcher_(control_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose access (";
access_matcher_.DescribeTo(os);
*os << "), base (";
@@ -1052,7 +1032,7 @@ class IsStoreElementMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<ElementAccess>(node), "access",
access_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
@@ -1077,14 +1057,14 @@ class IsStoreElementMatcher final : public NodeMatcher {
};
#define LOAD_MATCHER(kLoad) \
- class Is##kLoad##Matcher final : public NodeMatcher { \
+ class Is##kLoad##Matcher final : public TestNodeMatcher { \
public: \
Is##kLoad##Matcher(const Matcher<kLoad##Representation>& rep_matcher, \
const Matcher<Node*>& base_matcher, \
const Matcher<Node*>& index_matcher, \
const Matcher<Node*>& effect_matcher, \
const Matcher<Node*>& control_matcher) \
- : NodeMatcher(IrOpcode::k##kLoad), \
+ : TestNodeMatcher(IrOpcode::k##kLoad), \
rep_matcher_(rep_matcher), \
base_matcher_(base_matcher), \
index_matcher_(index_matcher), \
@@ -1092,7 +1072,7 @@ class IsStoreElementMatcher final : public NodeMatcher {
control_matcher_(control_matcher) {} \
\
void DescribeTo(std::ostream* os) const final { \
- NodeMatcher::DescribeTo(os); \
+ TestNodeMatcher::DescribeTo(os); \
*os << " whose rep ("; \
rep_matcher_.DescribeTo(os); \
*os << "), base ("; \
@@ -1116,7 +1096,7 @@ class IsStoreElementMatcher final : public NodeMatcher {
if (NodeProperties::FirstControlIndex(node) < node->InputCount()) { \
control_node = NodeProperties::GetControlInput(node); \
} \
- return (NodeMatcher::MatchAndExplain(node, listener) && \
+ return (TestNodeMatcher::MatchAndExplain(node, listener) && \
PrintMatchAndExplain(OpParameter<kLoad##Representation>(node), \
"rep", rep_matcher_, listener) && \
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), \
@@ -1141,7 +1121,7 @@ LOAD_MATCHER(Load)
LOAD_MATCHER(UnalignedLoad)
#define STORE_MATCHER(kStore) \
- class Is##kStore##Matcher final : public NodeMatcher { \
+ class Is##kStore##Matcher final : public TestNodeMatcher { \
public: \
Is##kStore##Matcher(const Matcher<kStore##Representation>& rep_matcher, \
const Matcher<Node*>& base_matcher, \
@@ -1149,7 +1129,7 @@ LOAD_MATCHER(UnalignedLoad)
const Matcher<Node*>& value_matcher, \
const Matcher<Node*>& effect_matcher, \
const Matcher<Node*>& control_matcher) \
- : NodeMatcher(IrOpcode::k##kStore), \
+ : TestNodeMatcher(IrOpcode::k##kStore), \
rep_matcher_(rep_matcher), \
base_matcher_(base_matcher), \
index_matcher_(index_matcher), \
@@ -1158,7 +1138,7 @@ LOAD_MATCHER(UnalignedLoad)
control_matcher_(control_matcher) {} \
\
void DescribeTo(std::ostream* os) const final { \
- NodeMatcher::DescribeTo(os); \
+ TestNodeMatcher::DescribeTo(os); \
*os << " whose rep ("; \
rep_matcher_.DescribeTo(os); \
*os << "), base ("; \
@@ -1184,7 +1164,7 @@ LOAD_MATCHER(UnalignedLoad)
if (NodeProperties::FirstControlIndex(node) < node->InputCount()) { \
control_node = NodeProperties::GetControlInput(node); \
} \
- return (NodeMatcher::MatchAndExplain(node, listener) && \
+ return (TestNodeMatcher::MatchAndExplain(node, listener) && \
PrintMatchAndExplain(OpParameter<kStore##Representation>(node), \
"rep", rep_matcher_, listener) && \
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), \
@@ -1211,21 +1191,21 @@ LOAD_MATCHER(UnalignedLoad)
STORE_MATCHER(Store)
STORE_MATCHER(UnalignedStore)
-class IsStackSlotMatcher final : public NodeMatcher {
+class IsStackSlotMatcher final : public TestNodeMatcher {
public:
explicit IsStackSlotMatcher(
const Matcher<StackSlotRepresentation>& rep_matcher)
- : NodeMatcher(IrOpcode::kStackSlot), rep_matcher_(rep_matcher) {}
+ : TestNodeMatcher(IrOpcode::kStackSlot), rep_matcher_(rep_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose rep (";
rep_matcher_.DescribeTo(os);
*os << ")";
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<StackSlotRepresentation>(node),
"rep", rep_matcher_, listener));
}
@@ -1234,20 +1214,20 @@ class IsStackSlotMatcher final : public NodeMatcher {
const Matcher<StackSlotRepresentation> rep_matcher_;
};
-class IsToNumberMatcher final : public NodeMatcher {
+class IsToNumberMatcher final : public TestNodeMatcher {
public:
IsToNumberMatcher(const Matcher<Node*>& base_matcher,
const Matcher<Node*>& context_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kJSToNumber),
+ : TestNodeMatcher(IrOpcode::kJSToNumber),
base_matcher_(base_matcher),
context_matcher_(context_matcher),
effect_matcher_(effect_matcher),
control_matcher_(control_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose base (";
base_matcher_.DescribeTo(os);
*os << "), context (";
@@ -1260,7 +1240,7 @@ class IsToNumberMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
base_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetContextInput(node),
@@ -1278,17 +1258,16 @@ class IsToNumberMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-
-class IsLoadContextMatcher final : public NodeMatcher {
+class IsLoadContextMatcher final : public TestNodeMatcher {
public:
IsLoadContextMatcher(const Matcher<ContextAccess>& access_matcher,
const Matcher<Node*>& context_matcher)
- : NodeMatcher(IrOpcode::kJSLoadContext),
+ : TestNodeMatcher(IrOpcode::kJSLoadContext),
access_matcher_(access_matcher),
context_matcher_(context_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose access (";
access_matcher_.DescribeTo(os);
*os << ") and context (";
@@ -1297,7 +1276,7 @@ class IsLoadContextMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<ContextAccess>(node), "access",
access_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetContextInput(node),
@@ -1309,20 +1288,20 @@ class IsLoadContextMatcher final : public NodeMatcher {
const Matcher<Node*> context_matcher_;
};
-class IsQuadopMatcher final : public NodeMatcher {
+class IsQuadopMatcher final : public TestNodeMatcher {
public:
IsQuadopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& a_matcher,
const Matcher<Node*>& b_matcher,
const Matcher<Node*>& c_matcher,
const Matcher<Node*>& d_matcher)
- : NodeMatcher(opcode),
+ : TestNodeMatcher(opcode),
a_matcher_(a_matcher),
b_matcher_(b_matcher),
c_matcher_(c_matcher),
d_matcher_(d_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose a (";
a_matcher_.DescribeTo(os);
*os << ") and b (";
@@ -1335,7 +1314,7 @@ class IsQuadopMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "a",
a_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "b",
@@ -1353,18 +1332,18 @@ class IsQuadopMatcher final : public NodeMatcher {
const Matcher<Node*> d_matcher_;
};
-class IsTernopMatcher final : public NodeMatcher {
+class IsTernopMatcher final : public TestNodeMatcher {
public:
IsTernopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& mid_matcher,
const Matcher<Node*>& rhs_matcher)
- : NodeMatcher(opcode),
+ : TestNodeMatcher(opcode),
lhs_matcher_(lhs_matcher),
mid_matcher_(mid_matcher),
rhs_matcher_(rhs_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose lhs (";
lhs_matcher_.DescribeTo(os);
*os << ") and mid (";
@@ -1375,7 +1354,7 @@ class IsTernopMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "lhs",
lhs_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "mid",
@@ -1390,16 +1369,16 @@ class IsTernopMatcher final : public NodeMatcher {
const Matcher<Node*> rhs_matcher_;
};
-class IsBinopMatcher final : public NodeMatcher {
+class IsBinopMatcher final : public TestNodeMatcher {
public:
IsBinopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher)
- : NodeMatcher(opcode),
+ : TestNodeMatcher(opcode),
lhs_matcher_(lhs_matcher),
rhs_matcher_(rhs_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose lhs (";
lhs_matcher_.DescribeTo(os);
*os << ") and rhs (";
@@ -1408,7 +1387,7 @@ class IsBinopMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "lhs",
lhs_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "rhs",
@@ -1420,21 +1399,20 @@ class IsBinopMatcher final : public NodeMatcher {
const Matcher<Node*> rhs_matcher_;
};
-
-class IsUnopMatcher final : public NodeMatcher {
+class IsUnopMatcher final : public TestNodeMatcher {
public:
IsUnopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& input_matcher)
- : NodeMatcher(opcode), input_matcher_(input_matcher) {}
+ : TestNodeMatcher(opcode), input_matcher_(input_matcher) {}
void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
+ TestNodeMatcher::DescribeTo(os);
*os << " whose input (";
input_matcher_.DescribeTo(os);
*os << ")";
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
"input", input_matcher_, listener));
}
@@ -1443,11 +1421,10 @@ class IsUnopMatcher final : public NodeMatcher {
const Matcher<Node*> input_matcher_;
};
-
-class IsParameterMatcher final : public NodeMatcher {
+class IsParameterMatcher final : public TestNodeMatcher {
public:
explicit IsParameterMatcher(const Matcher<int>& index_matcher)
- : NodeMatcher(IrOpcode::kParameter), index_matcher_(index_matcher) {}
+ : TestNodeMatcher(IrOpcode::kParameter), index_matcher_(index_matcher) {}
void DescribeTo(std::ostream* os) const override {
*os << "is a Parameter node with index(";
@@ -1456,7 +1433,7 @@ class IsParameterMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(ParameterIndexOf(node->op()), "index",
index_matcher_, listener));
}
@@ -1468,7 +1445,7 @@ class IsParameterMatcher final : public NodeMatcher {
} // namespace
Matcher<Node*> IsDead() {
- return MakeMatcher(new NodeMatcher(IrOpcode::kDead));
+ return MakeMatcher(new TestNodeMatcher(IrOpcode::kDead));
}
Matcher<Node*> IsEnd(const Matcher<Node*>& control0_matcher) {
@@ -2050,11 +2027,11 @@ Matcher<Node*> IsParameter(const Matcher<int> index_matcher) {
Matcher<Node*> IsLoadFramePointer() {
- return MakeMatcher(new NodeMatcher(IrOpcode::kLoadFramePointer));
+ return MakeMatcher(new TestNodeMatcher(IrOpcode::kLoadFramePointer));
}
Matcher<Node*> IsLoadParentFramePointer() {
- return MakeMatcher(new NodeMatcher(IrOpcode::kLoadParentFramePointer));
+ return MakeMatcher(new TestNodeMatcher(IrOpcode::kLoadParentFramePointer));
}
#define IS_QUADOP_MATCHER(Name) \
@@ -2110,8 +2087,10 @@ IS_BINOP_MATCHER(Word32Ror)
IS_BINOP_MATCHER(Word32Equal)
IS_BINOP_MATCHER(Word64And)
IS_BINOP_MATCHER(Word64Or)
+IS_BINOP_MATCHER(Word64Xor)
IS_BINOP_MATCHER(Word64Sar)
IS_BINOP_MATCHER(Word64Shl)
+IS_BINOP_MATCHER(Word64Shr)
IS_BINOP_MATCHER(Word64Equal)
IS_BINOP_MATCHER(Int32AddWithOverflow)
IS_BINOP_MATCHER(Int32SubWithOverflow)
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 683ee2c964..81e471f30f 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -358,8 +358,12 @@ Matcher<Node*> IsWord64And(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord64Or(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64Xor(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord64Shl(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64Shr(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord64Sar(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord64Equal(const Matcher<Node*>& lhs_matcher,
@@ -473,6 +477,81 @@ Matcher<Node*> IsWord32ReverseBytes(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsStackSlot();
+// Helpers
+static inline Matcher<Node*> IsIntPtrConstant(const intptr_t value) {
+ return kPointerSize == 8 ? IsInt64Constant(static_cast<int64_t>(value))
+ : IsInt32Constant(static_cast<int32_t>(value));
+}
+
+static inline Matcher<Node*> IsIntPtrAdd(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsInt64Add(lhs_matcher, rhs_matcher)
+ : IsInt32Add(lhs_matcher, rhs_matcher);
+}
+
+static inline Matcher<Node*> IsIntPtrSub(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsInt64Sub(lhs_matcher, rhs_matcher)
+ : IsInt32Sub(lhs_matcher, rhs_matcher);
+}
+
+static inline Matcher<Node*> IsIntPtrMul(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsInt64Mul(lhs_matcher, rhs_matcher)
+ : IsInt32Mul(lhs_matcher, rhs_matcher);
+}
+
+static inline Matcher<Node*> IsWordShl(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsWord64Shl(lhs_matcher, rhs_matcher)
+ : IsWord32Shl(lhs_matcher, rhs_matcher);
+}
+
+static inline Matcher<Node*> IsWordShr(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsWord64Shr(lhs_matcher, rhs_matcher)
+ : IsWord32Shr(lhs_matcher, rhs_matcher);
+}
+
+static inline Matcher<Node*> IsWordSar(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsWord64Sar(lhs_matcher, rhs_matcher)
+ : IsWord32Sar(lhs_matcher, rhs_matcher);
+}
+
+static inline Matcher<Node*> IsWordAnd(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsWord64And(lhs_matcher, rhs_matcher)
+ : IsWord32And(lhs_matcher, rhs_matcher);
+}
+
+static inline Matcher<Node*> IsWordOr(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsWord64Or(lhs_matcher, rhs_matcher)
+ : IsWord32Or(lhs_matcher, rhs_matcher);
+}
+
+static inline Matcher<Node*> IsWordXor(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsWord64Xor(lhs_matcher, rhs_matcher)
+ : IsWord32Xor(lhs_matcher, rhs_matcher);
+}
+
+static inline Matcher<Node*> IsChangeInt32ToIntPtr(
+ const Matcher<Node*>& matcher) {
+ return kPointerSize == 8 ? IsChangeInt32ToInt64(matcher) : matcher;
+}
+
+static inline Matcher<Node*> IsChangeUint32ToWord(
+ const Matcher<Node*>& matcher) {
+ return kPointerSize == 8 ? IsChangeUint32ToUint64(matcher) : matcher;
+}
+
+static inline Matcher<Node*> IsTruncateWordToWord32(
+ const Matcher<Node*>& matcher) {
+ return kPointerSize == 8 ? IsTruncateInt64ToInt32(matcher) : matcher;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc b/deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc
index 5fe72eec40..86f7d69ec9 100644
--- a/deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc
@@ -4,6 +4,8 @@
#include "test/unittests/compiler/instruction-selector-unittest.h"
+#include "src/assembler-inl.h"
+
namespace v8 {
namespace internal {
namespace compiler {} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
index 2dc161b150..0ad114241f 100644
--- a/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
@@ -296,7 +296,7 @@ TEST_F(MoveOptimizerTest, GapsCanMoveOverInstruction) {
last->GetParallelMove(Instruction::GapPosition::START);
CHECK(inst1_start == nullptr || NonRedundantSize(inst1_start) == 0);
CHECK(inst1_end == nullptr || NonRedundantSize(inst1_end) == 0);
- CHECK(last_start->size() == 2);
+ CHECK_EQ(2, last_start->size());
int redundants = 0;
int assignment = 0;
for (MoveOperands* move : *last_start) {
diff --git a/deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc b/deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc
index 5fe72eec40..86f7d69ec9 100644
--- a/deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc
+++ b/deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc
@@ -4,6 +4,8 @@
#include "test/unittests/compiler/instruction-selector-unittest.h"
+#include "src/assembler-inl.h"
+
namespace v8 {
namespace internal {
namespace compiler {} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index cde85133f3..2e67c0d4df 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -19,6 +19,7 @@ using testing::BitEq;
namespace v8 {
namespace internal {
namespace compiler {
+namespace simplified_operator_reducer_unittest {
class SimplifiedOperatorReducerTest : public GraphTest {
public:
@@ -498,6 +499,7 @@ TEST_F(SimplifiedOperatorReducerTest, ObjectIsSmiWithNumberConstant) {
}
}
+} // namespace simplified_operator_reducer_unittest
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
index 820ef56e60..b527a36c55 100644
--- a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
@@ -22,6 +22,7 @@ using testing::IsNaN;
namespace v8 {
namespace internal {
namespace compiler {
+namespace typed_optimization_unittest {
namespace {
@@ -219,6 +220,7 @@ TEST_F(TypedOptimizationTest, JSToBooleanWithNonZeroPlainNumber) {
EXPECT_THAT(r.replacement(), IsTrueConstant());
}
+} // namespace typed_optimization_unittest
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/detachable-vector-unittest.cc b/deps/v8/test/unittests/detachable-vector-unittest.cc
new file mode 100644
index 0000000000..f9c846df22
--- /dev/null
+++ b/deps/v8/test/unittests/detachable-vector-unittest.cc
@@ -0,0 +1,67 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/detachable-vector.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+TEST(DetachableVector, ConstructIsEmpty) {
+ DetachableVector<int> v;
+
+ size_t empty_size = 0;
+ EXPECT_EQ(empty_size, v.size());
+ EXPECT_TRUE(v.empty());
+}
+
+TEST(DetachableVector, PushAddsElement) {
+ DetachableVector<int> v;
+
+ v.push_back(1);
+
+ EXPECT_EQ(1, v.front());
+ EXPECT_EQ(1, v.back());
+ EXPECT_EQ(1, v.at(0));
+ size_t one_size = 1;
+ EXPECT_EQ(one_size, v.size());
+ EXPECT_FALSE(v.empty());
+}
+
+TEST(DetachableVector, AfterFreeIsEmpty) {
+ DetachableVector<int> v;
+
+ v.push_back(1);
+ v.free();
+
+ size_t empty_size = 0;
+ EXPECT_EQ(empty_size, v.size());
+ EXPECT_TRUE(v.empty());
+}
+
+// This test relies on ASAN to detect leaks and double-frees.
+TEST(DetachableVector, DetachLeaksBackingStore) {
+ DetachableVector<int> v;
+ DetachableVector<int> v2;
+
+ size_t one_size = 1;
+ EXPECT_TRUE(v2.empty());
+
+ // Force allocation of the backing store.
+ v.push_back(1);
+ // Bit-copy the data structure.
+ memcpy(&v2, &v, sizeof(DetachableVector<int>));
+ // The backing store should be leaked here - free was not called.
+ v.detach();
+
+ // We have transferred the backing store to the second vector.
+ EXPECT_EQ(one_size, v2.size());
+ EXPECT_TRUE(v.empty());
+
+ // The destructor of v2 will release the backing store.
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/eh-frame-iterator-unittest.cc b/deps/v8/test/unittests/eh-frame-iterator-unittest.cc
index 1b8adddaee..b228cc9caf 100644
--- a/deps/v8/test/unittests/eh-frame-iterator-unittest.cc
+++ b/deps/v8/test/unittests/eh-frame-iterator-unittest.cc
@@ -5,12 +5,13 @@
#include "src/eh-frame.h"
#include "testing/gtest/include/gtest/gtest.h"
+namespace v8 {
+namespace internal {
+
// Test enabled only on supported architectures.
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM) || \
defined(V8_TARGET_ARCH_ARM64)
-using namespace v8::internal;
-
namespace {
class EhFrameIteratorTest : public testing::Test {};
@@ -59,3 +60,6 @@ TEST_F(EhFrameIteratorTest, SLEB128DecodingNegative) {
}
#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/eh-frame-writer-unittest.cc b/deps/v8/test/unittests/eh-frame-writer-unittest.cc
index 13b970f448..0213835e9f 100644
--- a/deps/v8/test/unittests/eh-frame-writer-unittest.cc
+++ b/deps/v8/test/unittests/eh-frame-writer-unittest.cc
@@ -5,12 +5,13 @@
#include "src/eh-frame.h"
#include "test/unittests/test-utils.h"
+namespace v8 {
+namespace internal {
+
// Test enabled only on supported architectures.
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM) || \
defined(V8_TARGET_ARCH_ARM64)
-using namespace v8::internal;
-
namespace {
class EhFrameWriterTest : public TestWithZone {
@@ -467,3 +468,6 @@ TEST_F(EhFrameWriterTest, EhFrameHdrLayout) {
}
#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/barrier-unittest.cc b/deps/v8/test/unittests/heap/barrier-unittest.cc
new file mode 100644
index 0000000000..1d42f97a4f
--- /dev/null
+++ b/deps/v8/test/unittests/heap/barrier-unittest.cc
@@ -0,0 +1,145 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/barrier.h"
+#include "src/base/platform/platform.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+namespace heap {
+
+TEST(OneshotBarrier, InitializeNotDone) {
+ OneshotBarrier barrier;
+ EXPECT_FALSE(barrier.DoneForTesting());
+}
+
+TEST(OneshotBarrier, DoneAfterWait_Sequential) {
+ OneshotBarrier barrier;
+ barrier.Start();
+ barrier.Wait();
+ EXPECT_TRUE(barrier.DoneForTesting());
+}
+
+namespace {
+
+class ThreadWaitingOnBarrier final : public base::Thread {
+ public:
+ ThreadWaitingOnBarrier()
+ : base::Thread(Options("ThreadWaitingOnBarrier")), barrier_(nullptr) {}
+
+ void Initialize(OneshotBarrier* barrier) { barrier_ = barrier; }
+
+ void Run() final { barrier_->Wait(); }
+
+ private:
+ OneshotBarrier* barrier_;
+};
+
+} // namespace
+
+TEST(OneshotBarrier, DoneAfterWait_Concurrent) {
+ const int kThreadCount = 2;
+ OneshotBarrier barrier;
+ ThreadWaitingOnBarrier threads[kThreadCount];
+ for (int i = 0; i < kThreadCount; i++) {
+ threads[i].Initialize(&barrier);
+ // All threads need to call Wait() to be done.
+ barrier.Start();
+ }
+ for (int i = 0; i < kThreadCount; i++) {
+ threads[i].Start();
+ }
+ for (int i = 0; i < kThreadCount; i++) {
+ threads[i].Join();
+ }
+ EXPECT_TRUE(barrier.DoneForTesting());
+}
+
+TEST(OneshotBarrier, EarlyFinish_Concurrent) {
+ const int kThreadCount = 2;
+ OneshotBarrier barrier;
+ ThreadWaitingOnBarrier threads[kThreadCount];
+ // Test that one thread that actually finishes processing work before other
+ // threads call Start() will move the barrier in Done state.
+ barrier.Start();
+ barrier.Wait();
+ EXPECT_TRUE(barrier.DoneForTesting());
+ for (int i = 0; i < kThreadCount; i++) {
+ threads[i].Initialize(&barrier);
+ // All threads need to call Wait() to be done.
+ barrier.Start();
+ }
+ for (int i = 0; i < kThreadCount; i++) {
+ threads[i].Start();
+ }
+ for (int i = 0; i < kThreadCount; i++) {
+ threads[i].Join();
+ }
+ EXPECT_TRUE(barrier.DoneForTesting());
+}
+
+namespace {
+
+class CountingThread final : public base::Thread {
+ public:
+ CountingThread(OneshotBarrier* barrier, base::Mutex* mutex, size_t* work)
+ : base::Thread(Options("CountingThread")),
+ barrier_(barrier),
+ mutex_(mutex),
+ work_(work),
+ processed_work_(0) {}
+
+ void Run() final {
+ do {
+ ProcessWork();
+ } while (!barrier_->Wait());
+ // Main thread is not processing work, so we need one last step.
+ ProcessWork();
+ }
+
+ size_t processed_work() const { return processed_work_; }
+
+ private:
+ void ProcessWork() {
+ base::LockGuard<base::Mutex> guard(mutex_);
+ processed_work_ += *work_;
+ *work_ = 0;
+ }
+
+ OneshotBarrier* const barrier_;
+ base::Mutex* const mutex_;
+ size_t* const work_;
+ size_t processed_work_;
+};
+
+} // namespace
+
+TEST(OneshotBarrier, Processing_Concurrent) {
+ const size_t kWorkCounter = 173173;
+ OneshotBarrier barrier;
+ base::Mutex mutex;
+ size_t work = 0;
+ CountingThread counting_thread(&barrier, &mutex, &work);
+ barrier.Start();
+ barrier.Start();
+ EXPECT_FALSE(barrier.DoneForTesting());
+ counting_thread.Start();
+
+ for (size_t i = 0; i < kWorkCounter; i++) {
+ {
+ base::LockGuard<base::Mutex> guard(&mutex);
+ work++;
+ }
+ barrier.NotifyAll();
+ }
+ barrier.Wait();
+ counting_thread.Join();
+ EXPECT_TRUE(barrier.DoneForTesting());
+ EXPECT_EQ(kWorkCounter, counting_thread.processed_work());
+}
+
+} // namespace heap
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/heap-unittest.cc b/deps/v8/test/unittests/heap/heap-unittest.cc
index 3b7b610c8c..1f2aab06d6 100644
--- a/deps/v8/test/unittests/heap/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-unittest.cc
@@ -99,7 +99,7 @@ TEST_F(HeapTest, ASLR) {
}
if (hints.size() == 1) {
EXPECT_TRUE((*hints.begin()) == nullptr);
- EXPECT_TRUE(base::OS::GetRandomMmapAddr() == nullptr);
+ EXPECT_TRUE(v8::internal::GetRandomMmapAddr() == nullptr);
} else {
// It is unlikely that 1000 random samples will collide to less then 500
// values.
diff --git a/deps/v8/test/unittests/heap/marking-unittest.cc b/deps/v8/test/unittests/heap/marking-unittest.cc
index 073105f494..9dd432c175 100644
--- a/deps/v8/test/unittests/heap/marking-unittest.cc
+++ b/deps/v8/test/unittests/heap/marking-unittest.cc
@@ -32,7 +32,7 @@ TEST(Marking, TransitionWhiteBlackWhite) {
free(bitmap);
}
-TEST(Marking, TransitionWhiteGreyBlackGrey) {
+TEST(Marking, TransitionWhiteGreyBlack) {
Bitmap* bitmap = reinterpret_cast<Bitmap*>(
calloc(Bitmap::kSize / kPointerSize, kPointerSize));
const int kLocationsSize = 3;
@@ -51,10 +51,6 @@ TEST(Marking, TransitionWhiteGreyBlackGrey) {
CHECK(Marking::IsBlack(mark_bit));
CHECK(Marking::IsBlackOrGrey(mark_bit));
CHECK(!Marking::IsImpossible(mark_bit));
- Marking::BlackToGrey(mark_bit);
- CHECK(Marking::IsGrey(mark_bit));
- CHECK(Marking::IsBlackOrGrey(mark_bit));
- CHECK(!Marking::IsImpossible(mark_bit));
Marking::MarkWhite(mark_bit);
CHECK(Marking::IsWhite(mark_bit));
CHECK(!Marking::IsImpossible(mark_bit));
diff --git a/deps/v8/test/unittests/heap/worklist-unittest.cc b/deps/v8/test/unittests/heap/worklist-unittest.cc
index 1a16ac60c0..49423dbe77 100644
--- a/deps/v8/test/unittests/heap/worklist-unittest.cc
+++ b/deps/v8/test/unittests/heap/worklist-unittest.cc
@@ -301,5 +301,31 @@ TEST(WorkListTest, MultipleSegmentsStolen) {
EXPECT_TRUE(worklist.IsGlobalEmpty());
}
+TEST(WorkListTest, MergeGlobalPool) {
+ TestWorklist worklist1;
+ TestWorklist::View worklist_view1(&worklist1, 0);
+ SomeObject dummy;
+ for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
+ EXPECT_TRUE(worklist_view1.Push(&dummy));
+ }
+ SomeObject* retrieved = nullptr;
+ // One more push/pop to publish the full segment.
+ EXPECT_TRUE(worklist_view1.Push(nullptr));
+ EXPECT_TRUE(worklist_view1.Pop(&retrieved));
+ EXPECT_EQ(nullptr, retrieved);
+ // Merging global pool into a new Worklist.
+ TestWorklist worklist2;
+ TestWorklist::View worklist_view2(&worklist2, 0);
+ worklist2.MergeGlobalPool(&worklist1);
+ EXPECT_FALSE(worklist2.IsGlobalEmpty());
+ for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
+ EXPECT_TRUE(worklist_view2.Pop(&retrieved));
+ EXPECT_EQ(&dummy, retrieved);
+ EXPECT_FALSE(worklist_view1.Pop(&retrieved));
+ }
+ EXPECT_TRUE(worklist1.IsGlobalEmpty());
+ EXPECT_TRUE(worklist2.IsGlobalEmpty());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index fa93cb3f9e..5cdce7fc00 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -145,6 +145,9 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
builder.CreateArrayLiteral(0, 0, 0);
builder.CreateObjectLiteral(0, 0, 0, reg);
+ // Emit tagged template operations.
+ builder.GetTemplateObject(0);
+
// Call operations.
builder.CallAnyReceiver(reg, reg_list, 1)
.CallProperty(reg, reg_list, 1)
@@ -190,9 +193,12 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.BinaryOperationSmiLiteral(Token::Value::SAR, Smi::FromInt(42), 2)
.BinaryOperationSmiLiteral(Token::Value::SHR, Smi::FromInt(42), 2);
- // Emit count operatior invocations
- builder.CountOperation(Token::Value::ADD, 1)
- .CountOperation(Token::Value::SUB, 1);
+ // Emit unary and count operator invocations.
+ builder.UnaryOperation(Token::Value::INC, 1)
+ .UnaryOperation(Token::Value::DEC, 1)
+ .UnaryOperation(Token::Value::ADD, 1)
+ .UnaryOperation(Token::Value::SUB, 1)
+ .UnaryOperation(Token::Value::BIT_NOT, 1);
// Emit unary operator invocations.
builder.LogicalNot(ToBooleanMode::kConvertToBoolean)
@@ -221,7 +227,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CompareNull();
// Emit conversion operator invocations.
- builder.ToNumber(reg, 1).ToObject(reg).ToName(reg);
+ builder.ToNumber(1).ToObject(reg).ToName(reg);
// Emit GetSuperConstructor.
builder.GetSuperConstructor(reg);
@@ -295,7 +301,8 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
BytecodeLabel after_rethrow;
builder.ReThrow().Bind(&after_rethrow);
- builder.ForInPrepare(reg, triple)
+ builder.ForInEnumerate(reg)
+ .ForInPrepare(triple, 1)
.ForInContinue(reg, reg)
.ForInNext(reg, reg, pair, 1)
.ForInStep(reg);
@@ -374,6 +381,12 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Emit debugger bytecode.
builder.Debugger();
+ // Emit abort bytecode.
+ {
+ BytecodeLabel after;
+ builder.Abort(kGenerator).Bind(&after);
+ }
+
// Insert dummy ops to force longer jumps.
for (int i = 0; i < 256; i++) {
builder.Debugger();
@@ -420,11 +433,9 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Insert entry for illegal bytecode as this is never willingly emitted.
scorecard[Bytecodes::ToByte(Bytecode::kIllegal)] = 1;
- if (!FLAG_type_profile) {
- // Bytecode for CollectTypeProfile is only emitted when
- // Type Information for DevTools is turned on.
- scorecard[Bytecodes::ToByte(Bytecode::kCollectTypeProfile)] = 1;
- }
+ // Bytecode for CollectTypeProfile is only emitted when
+ // Type Information for DevTools is turned on.
+ scorecard[Bytecodes::ToByte(Bytecode::kCollectTypeProfile)] = 1;
// Check return occurs at the end and only once in the BytecodeArray.
CHECK_EQ(final_bytecode, Bytecode::kReturn);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index 2befb103d6..aefef108bf 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -58,7 +58,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
- .ForInPrepare(reg_0, triple)
+ .ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
.LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
@@ -70,217 +70,216 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
const int kPrefixByteSize = 1;
int offset = 0;
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK(iterator.GetConstantForIndexOperand(0).is_identical_to(
heap_num_0->value()));
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK(iterator.GetConstantForIndexOperand(0).is_identical_to(
heap_num_1->value()));
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaZero);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaZero);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_0);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_0);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
- CHECK_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_1);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
+ EXPECT_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) +
kPrefixByteSize;
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
- CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdar);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdar);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kAdd);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
- CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
- CHECK_EQ(iterator.GetIndexOperand(1), name_index);
- CHECK_EQ(iterator.GetIndexOperand(2), feedback_slot);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ EXPECT_EQ(iterator.GetIndexOperand(1), name_index);
+ EXPECT_EQ(iterator.GetIndexOperand(2), feedback_slot);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kAdd);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetRegisterOperand(0).index(), param.index());
- CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), param.index());
+ EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kCallRuntimeForPair);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadLookupSlotForCall);
- CHECK_EQ(iterator.GetRegisterOperand(1).index(), param.index());
- CHECK_EQ(iterator.GetRegisterOperandRange(1), 1);
- CHECK_EQ(iterator.GetRegisterCountOperand(2), 1u);
- CHECK_EQ(iterator.GetRegisterOperand(3).index(), reg_0.index());
- CHECK_EQ(iterator.GetRegisterOperandRange(3), 2);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntimeForPair);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadLookupSlotForCall);
+ EXPECT_EQ(iterator.GetRegisterOperand(1).index(), param.index());
+ EXPECT_EQ(iterator.GetRegisterOperandRange(1), 1);
+ EXPECT_EQ(iterator.GetRegisterCountOperand(2), 1u);
+ EXPECT_EQ(iterator.GetRegisterOperand(3).index(), reg_0.index());
+ EXPECT_EQ(iterator.GetRegisterOperandRange(3), 2);
CHECK(!iterator.done());
offset +=
Bytecodes::Size(Bytecode::kCallRuntimeForPair, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kForInPrepare);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
- CHECK_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index());
- CHECK_EQ(iterator.GetRegisterOperandRange(1), 3);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kForInPrepare);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ EXPECT_EQ(iterator.GetRegisterOperandRange(0), 3);
+ EXPECT_EQ(iterator.GetIndexOperand(1), feedback_slot);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kForInPrepare, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kCallRuntime);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadIC_Miss);
- CHECK_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index());
- CHECK_EQ(iterator.GetRegisterCountOperand(2), 1u);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntime);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadIC_Miss);
+ EXPECT_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index());
+ EXPECT_EQ(iterator.GetRegisterCountOperand(2), 1u);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kCallRuntime, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kDebugger);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kDebugger);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kDebugger, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaGlobal);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
- CHECK_EQ(iterator.current_bytecode_size(), 10);
- CHECK_EQ(iterator.GetIndexOperand(1), 0x10000000u);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaGlobal);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
+ EXPECT_EQ(iterator.current_bytecode_size(), 10);
+ EXPECT_EQ(iterator.GetIndexOperand(1), 0x10000000u);
offset += Bytecodes::Size(Bytecode::kLdaGlobal, OperandScale::kQuadruple) +
kPrefixByteSize;
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
- CHECK_EQ(iterator.current_offset(), offset);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn);
+ EXPECT_EQ(iterator.current_offset(), offset);
+ EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK(!iterator.done());
iterator.Advance();
CHECK(iterator.done());
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
index 294adf711f..7d9bcd09c0 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
@@ -57,7 +57,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidBeforeStart) {
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
- .ForInPrepare(reg_0, triple)
+ .ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
.LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
@@ -111,7 +111,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidAfterEnd) {
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
- .ForInPrepare(reg_0, triple)
+ .ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
.LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
@@ -165,7 +165,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesFirst) {
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
- .ForInPrepare(reg_0, triple)
+ .ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
.LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
@@ -224,7 +224,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesLast) {
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
- .ForInPrepare(reg_0, triple)
+ .ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
.LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
@@ -284,7 +284,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
- .ForInPrepare(reg_0, triple)
+ .ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
.LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
@@ -472,7 +472,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
- .ForInPrepare(reg_0, triple)
+ .ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
.LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
@@ -680,9 +680,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
- EXPECT_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index());
- EXPECT_EQ(iterator.GetRegisterOperandRange(1), 3);
+ EXPECT_EQ(iterator.GetRegisterOperandRange(0), 3);
+ EXPECT_EQ(iterator.GetIndexOperand(1), feedback_slot);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kForInPrepare, OperandScale::kSingle);
++iterator;
@@ -764,7 +763,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
- .ForInPrepare(reg_0, triple)
+ .ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
.LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
@@ -822,9 +821,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
- EXPECT_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index());
- EXPECT_EQ(iterator.GetRegisterOperandRange(1), 3);
+ EXPECT_EQ(iterator.GetRegisterOperandRange(0), 3);
+ EXPECT_EQ(iterator.GetIndexOperand(1), feedback_slot);
ASSERT_TRUE(iterator.IsValid());
--iterator;
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
index 680d8197b2..bd8f702d5f 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
@@ -169,7 +169,7 @@ TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
/* 5 68 S> */ B(JumpIfUndefined), U8(39),
/* 7 */ B(JumpIfNull), U8(37),
/* 9 */ B(ToObject), R8(3),
- /* 11 */ B(ForInPrepare), R8(3), R8(4),
+ /* 11 */ B(ForInPrepare), R8(3), U8(4),
/* 14 */ B(LdaZero),
/* 15 */ B(Star), R8(7),
/* 17 63 S> */ B(ForInContinue), R8(7), R8(6),
@@ -201,7 +201,7 @@ TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
WriteJump(Bytecode::kJumpIfUndefined, &jump_end_1, {68, true});
WriteJump(Bytecode::kJumpIfNull, &jump_end_2);
Write(Bytecode::kToObject, R(3));
- Write(Bytecode::kForInPrepare, R(3), R(4));
+ Write(Bytecode::kForInPrepare, R(3), U8(4));
Write(Bytecode::kLdaZero);
Write(Bytecode::kStar, R(7));
writer()->BindLabel(&back_jump);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
index f9c0877664..612b96e32c 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
@@ -6,6 +6,7 @@
#include "src/v8.h"
+#include "src/contexts.h"
#include "src/interpreter/bytecode-decoder.h"
#include "src/runtime/runtime.h"
#include "test/unittests/interpreter/bytecode-utils.h"
@@ -42,10 +43,10 @@ TEST(BytecodeDecoder, DecodeBytecodeAndOperands) {
10,
0,
"CallAnyReceiver.Wide r134, r135-r144, [177]"},
- {{B(ForInPrepare), R8(10), R8(11)},
+ {{B(ForInPrepare), R8(10), U8(11)},
3,
0,
- " ForInPrepare r10, r11-r13"},
+ " ForInPrepare r10-r12, [11]"},
{{B(CallRuntime), U16(Runtime::FunctionId::kIsDate), R8(0), U8(0)},
5,
0,
@@ -64,7 +65,10 @@ TEST(BytecodeDecoder, DecodeBytecodeAndOperands) {
6,
0,
"JumpIfNull.ExtraWide [123456789]"},
- };
+ {{B(CallJSRuntime), U8(Context::BOOLEAN_FUNCTION_INDEX), R8(0), U8(0)},
+ 4,
+ 0,
+ " CallJSRuntime [boolean_function], r0-r0"}};
for (size_t i = 0; i < arraysize(cases); ++i) {
// Generate reference string by prepending formatted bytes.
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index d06b5987e5..2c3f182395 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -13,20 +13,12 @@
#include "test/unittests/compiler/node-test-utils.h"
using ::testing::_;
+using v8::internal::compiler::Node;
+
+namespace c = v8::internal::compiler;
namespace v8 {
namespace internal {
-
-using namespace compiler;
-
-#ifdef ENABLE_VERIFY_CSA
-#define IS_BITCAST_WORD_TO_TAGGED_SIGNED(x) IsBitcastWordToTaggedSigned(x)
-#define IS_BITCAST_TAGGED_TO_WORD(x) IsBitcastTaggedToWord(x)
-#else
-#define IS_BITCAST_WORD_TO_TAGGED_SIGNED(x) (x)
-#define IS_BITCAST_TAGGED_TO_WORD(x) (x)
-#endif
-
namespace interpreter {
InterpreterAssemblerTestState::InterpreterAssemblerTestState(
@@ -34,8 +26,8 @@ InterpreterAssemblerTestState::InterpreterAssemblerTestState(
: compiler::CodeAssemblerState(
test->isolate(), test->zone(),
InterpreterDispatchDescriptor(test->isolate()),
- Code::ComputeFlags(Code::BYTECODE_HANDLER),
- Bytecodes::ToString(bytecode), Bytecodes::ReturnCount(bytecode)) {}
+ Code::BYTECODE_HANDLER, Bytecodes::ToString(bytecode),
+ Bytecodes::ReturnCount(bytecode)) {}
const interpreter::Bytecode kBytecodes[] = {
#define DEFINE_BYTECODE(Name, ...) interpreter::Bytecode::k##Name,
@@ -43,58 +35,6 @@ const interpreter::Bytecode kBytecodes[] = {
#undef DEFINE_BYTECODE
};
-Matcher<Node*> IsIntPtrConstant(const intptr_t value) {
- return kPointerSize == 8 ? IsInt64Constant(static_cast<int64_t>(value))
- : IsInt32Constant(static_cast<int32_t>(value));
-}
-
-Matcher<Node*> IsIntPtrAdd(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher) {
- return kPointerSize == 8 ? IsInt64Add(lhs_matcher, rhs_matcher)
- : IsInt32Add(lhs_matcher, rhs_matcher);
-}
-
-Matcher<Node*> IsIntPtrSub(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher) {
- return kPointerSize == 8 ? IsInt64Sub(lhs_matcher, rhs_matcher)
- : IsInt32Sub(lhs_matcher, rhs_matcher);
-}
-
-Matcher<Node*> IsIntPtrMul(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher) {
- return kPointerSize == 8 ? IsInt64Mul(lhs_matcher, rhs_matcher)
- : IsInt32Mul(lhs_matcher, rhs_matcher);
-}
-
-Matcher<Node*> IsWordShl(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher) {
- return kPointerSize == 8 ? IsWord64Shl(lhs_matcher, rhs_matcher)
- : IsWord32Shl(lhs_matcher, rhs_matcher);
-}
-
-Matcher<Node*> IsWordSar(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher) {
- return kPointerSize == 8 ? IsWord64Sar(lhs_matcher, rhs_matcher)
- : IsWord32Sar(lhs_matcher, rhs_matcher);
-}
-
-Matcher<Node*> IsWordOr(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher) {
- return kPointerSize == 8 ? IsWord64Or(lhs_matcher, rhs_matcher)
- : IsWord32Or(lhs_matcher, rhs_matcher);
-}
-
-Matcher<Node*> IsChangeInt32ToIntPtr(const Matcher<Node*>& matcher) {
- return kPointerSize == 8 ? IsChangeInt32ToInt64(matcher) : matcher;
-}
-
-Matcher<Node*> IsChangeUint32ToWord(const Matcher<Node*>& matcher) {
- return kPointerSize == 8 ? IsChangeUint32ToUint64(matcher) : matcher;
-}
-
-Matcher<Node*> IsTruncateWordToWord32(const Matcher<Node*>& matcher) {
- return kPointerSize == 8 ? IsTruncateInt64ToInt32(matcher) : matcher;
-}
InterpreterAssemblerTest::InterpreterAssemblerForTest::
~InterpreterAssemblerForTest() {
@@ -109,13 +49,13 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::
}
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
- const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<c::LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher) {
return ::i::compiler::IsLoad(rep_matcher, base_matcher, index_matcher, _, _);
}
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
- const Matcher<StoreRepresentation>& rep_matcher,
+ const Matcher<c::StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher) {
return ::i::compiler::IsStore(rep_matcher, base_matcher, index_matcher,
@@ -127,9 +67,10 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedByteOperand(
int offset) {
return IsLoad(
MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- IsIntPtrConstant(offset)));
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrAdd(
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ c::IsIntPtrConstant(offset)));
}
Matcher<Node*>
@@ -137,9 +78,10 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedByteOperand(
int offset) {
return IsLoad(
MachineType::Int8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- IsIntPtrConstant(offset)));
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrAdd(
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ c::IsIntPtrConstant(offset)));
}
Matcher<Node*>
@@ -148,9 +90,10 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand(
if (TargetSupportsUnalignedAccess()) {
return IsLoad(
MachineType::Uint16(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- IsIntPtrConstant(offset)));
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrAdd(
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ c::IsIntPtrConstant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
const int kStep = -1;
@@ -165,13 +108,13 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand(
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- IsIntPtrAdd(
- IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrAdd(
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ c::IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
}
- return IsWord32Or(IsWord32Shl(bytes[0], IsInt32Constant(kBitsPerByte)),
- bytes[1]);
+ return c::IsWord32Or(
+ c::IsWord32Shl(bytes[0], c::IsInt32Constant(kBitsPerByte)), bytes[1]);
}
}
@@ -181,9 +124,10 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand(
if (TargetSupportsUnalignedAccess()) {
return IsLoad(
MachineType::Int16(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- IsIntPtrConstant(offset)));
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrAdd(
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ c::IsIntPtrConstant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
const int kStep = -1;
@@ -198,13 +142,13 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand(
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
(i == 0) ? MachineType::Int8() : MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- IsIntPtrAdd(
- IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrAdd(
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ c::IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
}
- return IsWord32Or(IsWord32Shl(bytes[0], IsInt32Constant(kBitsPerByte)),
- bytes[1]);
+ return c::IsWord32Or(
+ c::IsWord32Shl(bytes[0], c::IsInt32Constant(kBitsPerByte)), bytes[1]);
}
}
@@ -214,9 +158,10 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand(
if (TargetSupportsUnalignedAccess()) {
return IsLoad(
MachineType::Uint32(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- IsIntPtrConstant(offset)));
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrAdd(
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ c::IsIntPtrConstant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
const int kStep = -1;
@@ -231,17 +176,18 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand(
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- IsIntPtrAdd(
- IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrAdd(
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ c::IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
}
- return IsWord32Or(
- IsWord32Shl(bytes[0], IsInt32Constant(3 * kBitsPerByte)),
- IsWord32Or(
- IsWord32Shl(bytes[1], IsInt32Constant(2 * kBitsPerByte)),
- IsWord32Or(IsWord32Shl(bytes[2], IsInt32Constant(1 * kBitsPerByte)),
- bytes[3])));
+ return c::IsWord32Or(
+ c::IsWord32Shl(bytes[0], c::IsInt32Constant(3 * kBitsPerByte)),
+ c::IsWord32Or(
+ c::IsWord32Shl(bytes[1], c::IsInt32Constant(2 * kBitsPerByte)),
+ c::IsWord32Or(
+ c::IsWord32Shl(bytes[2], c::IsInt32Constant(1 * kBitsPerByte)),
+ bytes[3])));
}
}
@@ -251,9 +197,10 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
if (TargetSupportsUnalignedAccess()) {
return IsLoad(
MachineType::Int32(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- IsIntPtrConstant(offset)));
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrAdd(
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ c::IsIntPtrConstant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
const int kStep = -1;
@@ -268,17 +215,18 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
(i == 0) ? MachineType::Int8() : MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- IsIntPtrAdd(
- IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrAdd(
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ c::IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
}
- return IsWord32Or(
- IsWord32Shl(bytes[0], IsInt32Constant(3 * kBitsPerByte)),
- IsWord32Or(
- IsWord32Shl(bytes[1], IsInt32Constant(2 * kBitsPerByte)),
- IsWord32Or(IsWord32Shl(bytes[2], IsInt32Constant(1 * kBitsPerByte)),
- bytes[3])));
+ return c::IsWord32Or(
+ c::IsWord32Shl(bytes[0], c::IsInt32Constant(3 * kBitsPerByte)),
+ c::IsWord32Or(
+ c::IsWord32Shl(bytes[1], c::IsInt32Constant(2 * kBitsPerByte)),
+ c::IsWord32Or(
+ c::IsWord32Shl(bytes[2], c::IsInt32Constant(1 * kBitsPerByte)),
+ bytes[3])));
}
}
@@ -327,25 +275,27 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
InterpreterAssemblerForTest m(&state, bytecode);
Node* tail_call_node = m.Jump(m.IntPtrConstant(jump_offset));
- Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
- IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- IsIntPtrConstant(jump_offset));
+ Matcher<Node*> next_bytecode_offset_matcher = c::IsIntPtrAdd(
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ c::IsIntPtrConstant(jump_offset));
Matcher<Node*> target_bytecode_matcher =
m.IsLoad(MachineType::Uint8(), _, next_bytecode_offset_matcher);
- target_bytecode_matcher = IsChangeUint32ToWord(target_bytecode_matcher);
- Matcher<Node*> code_target_matcher =
- m.IsLoad(MachineType::Pointer(),
- IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
- IsWordShl(target_bytecode_matcher,
- IsIntPtrConstant(kPointerSizeLog2)));
+ target_bytecode_matcher =
+ c::IsChangeUint32ToWord(target_bytecode_matcher);
+ Matcher<Node*> code_target_matcher = m.IsLoad(
+ MachineType::Pointer(),
+ c::IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
+ c::IsWordShl(target_bytecode_matcher,
+ c::IsIntPtrConstant(kPointerSizeLog2)));
EXPECT_THAT(
tail_call_node,
- IsTailCall(_, code_target_matcher,
- IsParameter(InterpreterDispatchDescriptor::kAccumulator),
- next_bytecode_offset_matcher, _,
- IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
- _, _));
+ c::IsTailCall(
+ _, code_target_matcher,
+ c::IsParameter(InterpreterDispatchDescriptor::kAccumulator),
+ next_bytecode_offset_matcher, _,
+ c::IsParameter(InterpreterDispatchDescriptor::kDispatchTable), _,
+ _));
}
}
}
@@ -377,7 +327,12 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
break;
case interpreter::OperandType::kIdx:
EXPECT_THAT(m.BytecodeOperandIdx(i),
- IsChangeUint32ToWord(
+ c::IsChangeUint32ToWord(
+ m.IsUnsignedOperand(offset, operand_size)));
+ break;
+ case interpreter::OperandType::kNativeContextIndex:
+ EXPECT_THAT(m.BytecodeOperandNativeContextIndex(i),
+ c::IsChangeUint32ToWord(
m.IsUnsignedOperand(offset, operand_size)));
break;
case interpreter::OperandType::kUImm:
@@ -396,9 +351,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
case interpreter::OperandType::kRegOutPair:
case interpreter::OperandType::kRegOutTriple:
case interpreter::OperandType::kRegPair:
- EXPECT_THAT(
- m.BytecodeOperandReg(i),
- IsChangeInt32ToIntPtr(m.IsSignedOperand(offset, operand_size)));
+ EXPECT_THAT(m.BytecodeOperandReg(i),
+ c::IsChangeInt32ToIntPtr(
+ m.IsSignedOperand(offset, operand_size)));
break;
case interpreter::OperandType::kRuntimeId:
EXPECT_THAT(m.BytecodeOperandRuntimeId(i),
@@ -423,9 +378,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, GetContext) {
InterpreterAssemblerForTest m(&state, bytecode);
EXPECT_THAT(
m.GetContext(),
- m.IsLoad(MachineType::AnyTagged(), IsLoadParentFramePointer(),
- IsIntPtrConstant(Register::current_context().ToOperand()
- << kPointerSizeLog2)));
+ m.IsLoad(MachineType::AnyTagged(), c::IsLoadParentFramePointer(),
+ c::IsIntPtrConstant(Register::current_context().ToOperand()
+ << kPointerSizeLog2)));
}
}
@@ -433,12 +388,13 @@ TARGET_TEST_F(InterpreterAssemblerTest, RegisterLocation) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerTestState state(this, bytecode);
InterpreterAssemblerForTest m(&state, bytecode);
- Node* reg_index_node = m.IntPtrConstant(44);
+ Node* reg_index_node = m.Parameter(0);
Node* reg_location_node = m.RegisterLocation(reg_index_node);
- EXPECT_THAT(reg_location_node,
- IsIntPtrAdd(IsLoadParentFramePointer(),
- IsWordShl(reg_index_node,
- IsIntPtrConstant(kPointerSizeLog2))));
+ EXPECT_THAT(
+ reg_location_node,
+ c::IsIntPtrAdd(c::IsLoadParentFramePointer(),
+ c::IsWordShl(reg_index_node,
+ c::IsIntPtrConstant(kPointerSizeLog2))));
}
}
@@ -446,12 +402,13 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadRegister) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerTestState state(this, bytecode);
InterpreterAssemblerForTest m(&state, bytecode);
- Node* reg_index_node = m.IntPtrConstant(44);
+ Node* reg_index_node = m.Parameter(0);
Node* load_reg_node = m.LoadRegister(reg_index_node);
- EXPECT_THAT(load_reg_node,
- m.IsLoad(MachineType::AnyTagged(), IsLoadParentFramePointer(),
- IsWordShl(reg_index_node,
- IsIntPtrConstant(kPointerSizeLog2))));
+ EXPECT_THAT(
+ load_reg_node,
+ m.IsLoad(MachineType::AnyTagged(), c::IsLoadParentFramePointer(),
+ c::IsWordShl(reg_index_node,
+ c::IsIntPtrConstant(kPointerSizeLog2))));
}
}
@@ -460,62 +417,15 @@ TARGET_TEST_F(InterpreterAssemblerTest, StoreRegister) {
InterpreterAssemblerTestState state(this, bytecode);
InterpreterAssemblerForTest m(&state, bytecode);
Node* store_value = m.Int32Constant(0xdeadbeef);
- Node* reg_index_node = m.IntPtrConstant(44);
+ Node* reg_index_node = m.Parameter(0);
Node* store_reg_node = m.StoreRegister(store_value, reg_index_node);
- EXPECT_THAT(
- store_reg_node,
- m.IsStore(StoreRepresentation(MachineRepresentation::kTagged,
- kNoWriteBarrier),
- IsLoadParentFramePointer(),
- IsWordShl(reg_index_node, IsIntPtrConstant(kPointerSizeLog2)),
- store_value));
- }
-}
-
-TARGET_TEST_F(InterpreterAssemblerTest, SmiTag) {
- TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerTestState state(this, bytecode);
- InterpreterAssemblerForTest m(&state, bytecode);
- Node* value = m.Int32Constant(44);
- EXPECT_THAT(
- m.SmiTag(value),
- IS_BITCAST_WORD_TO_TAGGED_SIGNED(IsIntPtrConstant(
- static_cast<intptr_t>(44) << (kSmiShiftSize + kSmiTagSize))));
- EXPECT_THAT(m.SmiUntag(value),
- IsWordSar(IS_BITCAST_TAGGED_TO_WORD(value),
- IsIntPtrConstant(kSmiShiftSize + kSmiTagSize)));
- }
-}
-
-TARGET_TEST_F(InterpreterAssemblerTest, IntPtrAdd) {
- TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerTestState state(this, bytecode);
- InterpreterAssemblerForTest m(&state, bytecode);
- Node* a = m.Parameter(0);
- Node* b = m.Int32Constant(1);
- Node* add = m.IntPtrAdd(a, b);
- EXPECT_THAT(add, IsIntPtrAdd(a, b));
- }
-}
-
-TARGET_TEST_F(InterpreterAssemblerTest, IntPtrSub) {
- TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerTestState state(this, bytecode);
- InterpreterAssemblerForTest m(&state, bytecode);
- Node* a = m.Parameter(0);
- Node* b = m.Int32Constant(1);
- Node* add = m.IntPtrSub(a, b);
- EXPECT_THAT(add, IsIntPtrSub(a, b));
- }
-}
-
-TARGET_TEST_F(InterpreterAssemblerTest, WordShl) {
- TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerTestState state(this, bytecode);
- InterpreterAssemblerForTest m(&state, bytecode);
- Node* a = m.IntPtrConstant(0);
- Node* add = m.WordShl(a, 10);
- EXPECT_THAT(add, IsWordShl(a, IsIntPtrConstant(10)));
+ EXPECT_THAT(store_reg_node,
+ m.IsStore(c::StoreRepresentation(MachineRepresentation::kTagged,
+ kNoWriteBarrier),
+ c::IsLoadParentFramePointer(),
+ c::IsWordShl(reg_index_node,
+ c::IsIntPtrConstant(kPointerSizeLog2)),
+ store_value));
}
}
@@ -526,31 +436,32 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
{
Node* index = m.IntPtrConstant(2);
Node* load_constant = m.LoadConstantPoolEntry(index);
- Matcher<Node*> constant_pool_matcher =
- m.IsLoad(MachineType::AnyTagged(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
- kHeapObjectTag));
- EXPECT_THAT(load_constant,
- m.IsLoad(MachineType::AnyTagged(), constant_pool_matcher,
- IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) -
- kHeapObjectTag)));
+ Matcher<Node*> constant_pool_matcher = m.IsLoad(
+ MachineType::AnyTagged(),
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
+ kHeapObjectTag));
+ EXPECT_THAT(
+ load_constant,
+ m.IsLoad(MachineType::AnyTagged(), constant_pool_matcher,
+ c::IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) -
+ kHeapObjectTag)));
}
{
Node* index = m.Parameter(2);
Node* load_constant = m.LoadConstantPoolEntry(index);
- Matcher<Node*> constant_pool_matcher =
- m.IsLoad(MachineType::AnyTagged(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
- kHeapObjectTag));
+ Matcher<Node*> constant_pool_matcher = m.IsLoad(
+ MachineType::AnyTagged(),
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
+ kHeapObjectTag));
EXPECT_THAT(
load_constant,
m.IsLoad(
MachineType::AnyTagged(), constant_pool_matcher,
- IsIntPtrAdd(
- IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
- IsWordShl(index, IsIntPtrConstant(kPointerSizeLog2)))));
+ c::IsIntPtrAdd(
+ c::IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
+ c::IsWordShl(index, c::IsIntPtrConstant(kPointerSizeLog2)))));
}
}
}
@@ -564,7 +475,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
Node* load_field = m.LoadObjectField(object, offset);
EXPECT_THAT(load_field,
m.IsLoad(MachineType::AnyTagged(), object,
- IsIntPtrConstant(offset - kHeapObjectTag)));
+ c::IsIntPtrConstant(offset - kHeapObjectTag)));
}
}
@@ -576,8 +487,8 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime2) {
Node* arg2 = m.Int32Constant(3);
Node* context = m.Int32Constant(4);
Node* call_runtime = m.CallRuntime(Runtime::kAdd, context, arg1, arg2);
- EXPECT_THAT(call_runtime,
- IsCall(_, _, arg1, arg2, _, IsInt32Constant(2), context, _, _));
+ EXPECT_THAT(call_runtime, c::IsCall(_, _, arg1, arg2, _,
+ c::IsInt32Constant(2), context, _, _));
}
}
@@ -596,21 +507,21 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
Node* arg_count = m.Int32Constant(2);
Node* context = m.IntPtrConstant(4);
- Matcher<Node*> function_table = IsExternalConstant(
+ Matcher<Node*> function_table = c::IsExternalConstant(
ExternalReference::runtime_function_table_address(isolate()));
- Matcher<Node*> function = IsIntPtrAdd(
+ Matcher<Node*> function = c::IsIntPtrAdd(
function_table,
- IsChangeUint32ToWord(IsInt32Mul(
- function_id, IsInt32Constant(sizeof(Runtime::Function)))));
+ c::IsChangeUint32ToWord(c::IsInt32Mul(
+ function_id, c::IsInt32Constant(sizeof(Runtime::Function)))));
Matcher<Node*> function_entry =
m.IsLoad(MachineType::Pointer(), function,
- IsIntPtrConstant(offsetof(Runtime::Function, entry)));
+ c::IsIntPtrConstant(offsetof(Runtime::Function, entry)));
Node* call_runtime = m.CallRuntimeN(function_id, context, first_arg,
arg_count, result_size);
EXPECT_THAT(call_runtime,
- IsCall(_, IsHeapConstant(builtin.code()), arg_count,
- first_arg, function_entry, context, _, _));
+ c::IsCall(_, c::IsHeapConstant(builtin.code()), arg_count,
+ first_arg, function_entry, context, _, _));
}
}
}
@@ -623,16 +534,17 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFeedbackVector) {
Node* feedback_vector = m.LoadFeedbackVector();
Matcher<Node*> load_function_matcher =
- m.IsLoad(MachineType::AnyTagged(), IsLoadParentFramePointer(),
- IsIntPtrConstant(Register::function_closure().ToOperand()
- << kPointerSizeLog2));
- Matcher<Node*> load_vector_cell_matcher = m.IsLoad(
- MachineType::AnyTagged(), load_function_matcher,
- IsIntPtrConstant(JSFunction::kFeedbackVectorOffset - kHeapObjectTag));
+ m.IsLoad(MachineType::AnyTagged(), c::IsLoadParentFramePointer(),
+ c::IsIntPtrConstant(Register::function_closure().ToOperand()
+ << kPointerSizeLog2));
+ Matcher<Node*> load_vector_cell_matcher =
+ m.IsLoad(MachineType::AnyTagged(), load_function_matcher,
+ c::IsIntPtrConstant(JSFunction::kFeedbackVectorOffset -
+ kHeapObjectTag));
EXPECT_THAT(
feedback_vector,
m.IsLoad(MachineType::AnyTagged(), load_vector_cell_matcher,
- IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)));
+ c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)));
}
}
diff --git a/deps/v8/test/unittests/libplatform/default-platform-unittest.cc b/deps/v8/test/unittests/libplatform/default-platform-unittest.cc
index 73816596cf..f9b3e0b98f 100644
--- a/deps/v8/test/unittests/libplatform/default-platform-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/default-platform-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/libplatform/default-platform.h"
+#include "src/base/platform/time.h"
#include "testing/gmock/include/gmock/gmock.h"
using testing::InSequence;
@@ -10,6 +11,7 @@ using testing::StrictMock;
namespace v8 {
namespace platform {
+namespace default_platform_unittest {
namespace {
@@ -30,6 +32,9 @@ class DefaultPlatformWithMockTime : public DefaultPlatform {
DefaultPlatformWithMockTime()
: DefaultPlatform(IdleTaskSupport::kEnabled), time_(0) {}
double MonotonicallyIncreasingTime() override { return time_; }
+ double CurrentClockTimeMillis() override {
+ return time_ * base::Time::kMillisecondsPerSecond;
+ }
void IncreaseTime(double seconds) { time_ += seconds; }
private:
@@ -162,5 +167,6 @@ TEST(DefaultPlatformTest, PendingIdleTasksAreDestroyedOnShutdown) {
}
}
+} // namespace default_platform_unittest
} // namespace platform
} // namespace v8
diff --git a/deps/v8/test/unittests/libplatform/task-queue-unittest.cc b/deps/v8/test/unittests/libplatform/task-queue-unittest.cc
index 2de104b9c0..9bb160dd31 100644
--- a/deps/v8/test/unittests/libplatform/task-queue-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/task-queue-unittest.cc
@@ -13,6 +13,7 @@ using testing::StrictMock;
namespace v8 {
namespace platform {
+namespace task_queue_unittest {
namespace {
@@ -56,5 +57,6 @@ TEST(TaskQueueTest, TerminateMultipleReaders) {
thread2.Join();
}
+} // namespace task_queue_unittest
} // namespace platform
} // namespace v8
diff --git a/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc b/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
index f0b41e78dd..7079d22eb6 100644
--- a/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
@@ -24,6 +24,25 @@ struct MockTask : public Task {
} // namespace
+// Needs to be in v8::platform due to BlockUntilQueueEmptyForTesting
+// being private.
+TEST(WorkerThreadTest, PostSingleTask) {
+ TaskQueue queue;
+ WorkerThread thread1(&queue);
+ WorkerThread thread2(&queue);
+
+ InSequence s;
+ StrictMock<MockTask>* task = new StrictMock<MockTask>;
+ EXPECT_CALL(*task, Run());
+ EXPECT_CALL(*task, Die());
+ queue.Append(task);
+
+ // The next call should not time out.
+ queue.BlockUntilQueueEmptyForTesting();
+ queue.Terminate();
+}
+
+namespace worker_thread_unittest {
TEST(WorkerThreadTest, Basic) {
static const size_t kNumTasks = 10;
@@ -44,21 +63,6 @@ TEST(WorkerThreadTest, Basic) {
queue.Terminate();
}
-TEST(WorkerThreadTest, PostSingleTask) {
- TaskQueue queue;
- WorkerThread thread1(&queue);
- WorkerThread thread2(&queue);
-
- InSequence s;
- StrictMock<MockTask>* task = new StrictMock<MockTask>;
- EXPECT_CALL(*task, Run());
- EXPECT_CALL(*task, Die());
- queue.Append(task);
-
- // The next call should not time out.
- queue.BlockUntilQueueEmptyForTesting();
- queue.Terminate();
-}
-
+} // namespace worker_thread_unittest
} // namespace platform
} // namespace v8
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index 3a58bbb569..f433926e53 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -8,7 +8,6 @@
#include "src/base/platform/time.h"
#include "src/flags.h"
#include "src/isolate.h"
-#include "src/list-inl.h"
#include "src/objects-inl.h"
#include "src/v8.h"
@@ -101,9 +100,9 @@ SaveFlags::SaveFlags() { non_default_flags_ = FlagList::argv(); }
SaveFlags::~SaveFlags() {
FlagList::ResetAllFlags();
- int argc = non_default_flags_->length();
+ int argc = static_cast<int>(non_default_flags_->size());
FlagList::SetFlagsFromCommandLine(
- &argc, const_cast<char**>(non_default_flags_->begin()),
+ &argc, const_cast<char**>(non_default_flags_->data()),
false /* remove_flags */);
for (auto flag = non_default_flags_->begin();
flag != non_default_flags_->end(); ++flag) {
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index a5a4e5969b..3d832e6500 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -5,10 +5,11 @@
#ifndef V8_UNITTESTS_TEST_UTILS_H_
#define V8_UNITTESTS_TEST_UTILS_H_
+#include <vector>
+
#include "include/v8.h"
#include "src/base/macros.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/list.h"
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone.h"
#include "testing/gtest-support.h"
@@ -142,7 +143,7 @@ class SaveFlags {
~SaveFlags();
private:
- List<const char*>* non_default_flags_;
+ std::vector<const char*>* non_default_flags_;
DISALLOW_COPY_AND_ASSIGN(SaveFlags);
};
diff --git a/deps/v8/test/unittests/unicode-unittest.cc b/deps/v8/test/unittests/unicode-unittest.cc
index 67edfb7331..c4df42c1c6 100644
--- a/deps/v8/test/unittests/unicode-unittest.cc
+++ b/deps/v8/test/unittests/unicode-unittest.cc
@@ -4,8 +4,10 @@
#include <memory>
#include <string>
+#include <vector>
#include "src/unicode-decoder.h"
+#include "src/unicode-inl.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
@@ -23,6 +25,30 @@ void Decode(Utf8Decoder* decoder, const std::string& str) {
decoder->Reset(buffer.get(), str.length());
}
+void DecodeNormally(const std::vector<byte>& bytes,
+ std::vector<unibrow::uchar>* output) {
+ size_t cursor = 0;
+ while (cursor < bytes.size()) {
+ output->push_back(
+ unibrow::Utf8::ValueOf(bytes.data() + cursor, bytes.size(), &cursor));
+ }
+}
+
+void DecodeIncrementally(const std::vector<byte>& bytes,
+ std::vector<unibrow::uchar>* output) {
+ unibrow::Utf8::Utf8IncrementalBuffer buffer = 0;
+ for (auto b : bytes) {
+ unibrow::uchar result = unibrow::Utf8::ValueOfIncremental(b, &buffer);
+ if (result != unibrow::Utf8::kIncomplete) {
+ output->push_back(result);
+ }
+ }
+ unibrow::uchar result = unibrow::Utf8::ValueOfIncrementalFinish(&buffer);
+ if (result != unibrow::Utf8::kBufferEmpty) {
+ output->push_back(result);
+ }
+}
+
} // namespace
TEST(UnicodeTest, ReadOffEndOfUtf8String) {
@@ -35,5 +61,382 @@ TEST(UnicodeTest, ReadOffEndOfUtf8String) {
Decode(&decoder, "\xF4");
}
+TEST(UnicodeTest, IncrementalUTF8DecodingVsNonIncrementalUtf8Decoding) {
+ // Unfortunately, V8 has two UTF-8 decoders. This test checks that they
+ // produce the same result. This test was inspired by
+ // https://www.cl.cam.ac.uk/~mgk25/ucs/examples/UTF-8-test.txt .
+ typedef struct {
+ std::vector<byte> bytes;
+ std::vector<unibrow::uchar> unicode_expected;
+ } TestCase;
+
+ TestCase data[] = {
+ // Correct UTF-8 text.
+ {{0xce, 0xba, 0xe1, 0xbd, 0xb9, 0xcf, 0x83, 0xce, 0xbc, 0xce, 0xb5},
+ {0x3ba, 0x1f79, 0x3c3, 0x3bc, 0x3b5}},
+
+ // First possible sequence of a certain length:
+ // 1 byte
+ {{0x00}, {0x0}},
+ // 2 bytes
+ {{0xc2, 0x80}, {0x80}},
+ // 3 bytes
+ {{0xe0, 0xa0, 0x80}, {0x800}},
+ // 4 bytes
+ {{0xf0, 0x90, 0x80, 0x80}, {0x10000}},
+ // 5 bytes (not supported)
+ {{0xf8, 0x88, 0x80, 0x80, 0x80},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ // 6 bytes (not supported)
+ {{0xfc, 0x84, 0x80, 0x80, 0x80, 0x80},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+
+ // Last possible sequence of certain length:
+ // 1 byte
+ {{0x7f}, {0x7f}},
+ // 2 bytes
+ {{0xdf, 0xbf}, {0x7ff}},
+ // 3 bytes
+ {{0xef, 0xbf, 0xbf}, {0xffff}},
+ // 4 bytes (this sequence is not a valid code point)
+ {{0xf7, 0xbf, 0xbf, 0xbf}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ // 5 bytes (not supported)
+ {{0xfb, 0xbf, 0xbf, 0xbf, 0xbf},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ // 6 bytes (not supported)
+ {{0xfd, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ // Other boundary conditions:
+ {{0xed, 0x9f, 0xbf}, {0xd7ff}},
+ {{0xee, 0x80, 0x80}, {0xe000}},
+ // U+fffd (invalid code point)
+ {{0xef, 0xbf, 0xbd}, {0xfffd}},
+ // U+10ffff (last valid code point)
+ {{0xf4, 0x8f, 0xbf, 0xbf}, {0x10ffff}},
+ // First invalid (too large) code point
+ {{0xf4, 0x90, 0x80, 0x80}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+
+ // Malformed sequences:
+ // Unexpected continuation bytes:
+ // First continuation byte
+ {{0x80}, {0xfffd}},
+ // Last continuation byte
+ {{0xbf}, {0xfffd}},
+ // 2 continuation bytes
+ {{0x80, 0xbf}, {0xfffd, 0xfffd}},
+ // 3 continuation bytes
+ {{0x80, 0xbf, 0x80}, {0xfffd, 0xfffd, 0xfffd}},
+ // 4 continuation bytes
+ {{0x80, 0xbf, 0x80, 0xbf}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ // 5 continuation bytes
+ {{0x80, 0xbf, 0x80, 0xbf, 0x80},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ // 6 continuation bytes
+ {{0x80, 0xbf, 0x80, 0xbf, 0x80, 0xbf},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ // 7 continuation bytes
+ {{0x80, 0xbf, 0x80, 0xbf, 0x80, 0xbf, 0xbf},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ // Sequence of all 64 possible continuation bytes
+ {{0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a,
+ 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95,
+ 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0,
+ 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab,
+ 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
+ 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
+ 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
+ 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
+ 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
+ 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
+ 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
+ 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
+ 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ // Using each possible continuation byte in a two-byte sequence:
+ {{0xd0, 0x80, 0xd0, 0x81, 0xd0, 0x82, 0xd0, 0x83, 0xd0, 0x84, 0xd0, 0x85,
+ 0xd0, 0x86, 0xd0, 0x87, 0xd0, 0x88, 0xd0, 0x89, 0xd0, 0x8a, 0xd0, 0x8b,
+ 0xd0, 0x8c, 0xd0, 0x8d, 0xd0, 0x8e, 0xd0, 0x8f, 0xd0, 0x90, 0xd0, 0x91,
+ 0xd0, 0x92, 0xd0, 0x93, 0xd0, 0x94, 0xd0, 0x95, 0xd0, 0x96, 0xd0, 0x97,
+ 0xd0, 0x98, 0xd0, 0x99, 0xd0, 0x9a, 0xd0, 0x9b, 0xd0, 0x9c, 0xd0, 0x9d,
+ 0xd0, 0x9e, 0xd0, 0x9f, 0xd0, 0xa0, 0xd0, 0xa1, 0xd0, 0xa2, 0xd0, 0xa3,
+ 0xd0, 0xa4, 0xd0, 0xa5, 0xd0, 0xa6, 0xd0, 0xa7, 0xd0, 0xa8, 0xd0, 0xa9,
+ 0xd0, 0xaa, 0xd0, 0xab, 0xd0, 0xac, 0xd0, 0xad, 0xd0, 0xae, 0xd0, 0xaf,
+ 0xd0, 0xb0, 0xd0, 0xb1, 0xd0, 0xb2, 0xd0, 0xb3, 0xd0, 0xb4, 0xd0, 0xb5,
+ 0xd0, 0xb6, 0xd0, 0xb7, 0xd0, 0xb8, 0xd0, 0xb9, 0xd0, 0xba, 0xd0, 0xbb,
+ 0xd0, 0xbc, 0xd0, 0xbd, 0xd0, 0xbe, 0xd0, 0xbf},
+ {0x400, 0x401, 0x402, 0x403, 0x404, 0x405, 0x406, 0x407, 0x408, 0x409,
+ 0x40a, 0x40b, 0x40c, 0x40d, 0x40e, 0x40f, 0x410, 0x411, 0x412, 0x413,
+ 0x414, 0x415, 0x416, 0x417, 0x418, 0x419, 0x41a, 0x41b, 0x41c, 0x41d,
+ 0x41e, 0x41f, 0x420, 0x421, 0x422, 0x423, 0x424, 0x425, 0x426, 0x427,
+ 0x428, 0x429, 0x42a, 0x42b, 0x42c, 0x42d, 0x42e, 0x42f, 0x430, 0x431,
+ 0x432, 0x433, 0x434, 0x435, 0x436, 0x437, 0x438, 0x439, 0x43a, 0x43b,
+ 0x43c, 0x43d, 0x43e, 0x43f}},
+
+ // Lonely first bytes:
+ // All 32 first bytes of 32-byte sequences, each followed by a space
+ // (generates 32 invalid char + space sequences.
+ {{0xc0, 0x20, 0xc1, 0x20, 0xc2, 0x20, 0xc3, 0x20, 0xc4, 0x20, 0xc5,
+ 0x20, 0xc6, 0x20, 0xc7, 0x20, 0xc8, 0x20, 0xc9, 0x20, 0xca, 0x20,
+ 0xcb, 0x20, 0xcc, 0x20, 0xcd, 0x20, 0xce, 0x20, 0xcf, 0x20, 0xd0,
+ 0x20, 0xd1, 0x20, 0xd2, 0x20, 0xd3, 0x20, 0xd4, 0x20, 0xd5, 0x20,
+ 0xd6, 0x20, 0xd7, 0x20, 0xd8, 0x20, 0xd9, 0x20, 0xda, 0x20, 0xdb,
+ 0x20, 0xdc, 0x20, 0xdd, 0x20, 0xde, 0x20, 0xdf, 0x20},
+ {0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
+ 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
+ 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
+ 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
+ 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
+ 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
+ 0xfffd, 0x20, 0xfffd, 0x20}},
+ // All 16 first bytes of 3-byte sequences, each followed by a space
+ // (generates 16 invalid char + space sequences):
+ {{0xe0, 0x20, 0xe1, 0x20, 0xe2, 0x20, 0xe3, 0x20, 0xe4, 0x20, 0xe5,
+ 0x20, 0xe6, 0x20, 0xe7, 0x20, 0xe8, 0x20, 0xe9, 0x20, 0xea, 0x20,
+ 0xeb, 0x20, 0xec, 0x20, 0xed, 0x20, 0xee, 0x20, 0xef, 0x20},
+ {0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
+ 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
+ 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
+ 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20}},
+ // All 8 first bytes of 4-byte sequences, each followed by a space
+ // (generates 8 invalid char + space sequences):
+ {{0xf0, 0x20, 0xf1, 0x20, 0xf2, 0x20, 0xf3, 0x20, 0xf4, 0x20, 0xf5, 0x20,
+ 0xf6, 0x20, 0xf7, 0x20},
+ {0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20,
+ 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20}},
+ // All 4 first bytes of 5-byte sequences (not supported), each followed by
+ // a space (generates 4 invalid char + space sequences):
+ {{0xf8, 0x20, 0xf9, 0x20, 0xfa, 0x20, 0xfb, 0x20},
+ {0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20, 0xfffd, 0x20}},
+ // All 2 first bytes of 6-byte sequences (not supported), each followed by
+ // a space (generates 2 invalid char + space sequences):
+ {{0xfc, 0x20, 0xfd, 0x20}, {0xfffd, 0x20, 0xfffd, 0x20}},
+
+ // Sequences with last continuation byte missing. Normally the whole
+ // incomplete sequence generates a single invalid character (exceptions
+ // explained below).
+
+ // 2-byte sequences with last byte missing
+ {{0xc0}, {0xfffd}},
+ {{0xdf}, {0xfffd}},
+ // 3-byte sequences with last byte missing.
+ {{0xe8, 0x80}, {0xfffd}},
+ {{0xe0, 0xbf}, {0xfffd}},
+ {{0xef, 0xbf}, {0xfffd}},
+ // Start of an overlong sequence. The first "maximal subpart" is the first
+ // byte; it creates an invalid character. Each following byte generates an
+ // invalid character too.
+ {{0xe0, 0x80}, {0xfffd, 0xfffd}},
+ // 4-byte sequences with last byte missing
+ {{0xf1, 0x80, 0x80}, {0xfffd}},
+ {{0xf4, 0x8f, 0xbf}, {0xfffd}},
+ // Start of an overlong sequence. The first "maximal subpart" is the first
+ // byte; it creates an invalid character. Each following byte generates an
+ // invalid character too.
+ {{0xf0, 0x80, 0x80}, {0xfffd, 0xfffd, 0xfffd}},
+ // 5-byte sequences (not supported) with last byte missing
+ {{0xf8, 0x80, 0x80, 0x80}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xfb, 0xbf, 0xbf, 0xbf}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ // 6-byte sequences (not supported) with last byte missing
+ {{0xfc, 0x80, 0x80, 0x80, 0x80},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xfd, 0xbf, 0xbf, 0xbf, 0xbf},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+
+ // Concatenation of incomplete sequences: above incomplete sequences
+ // concatenated.
+ {{0xc0, 0xdf, 0xe8, 0x80, 0xe0, 0xbf, 0xef, 0xbf, 0xe0, 0x80,
+ 0xf1, 0x80, 0x80, 0xf4, 0x8f, 0xbf, 0xf0, 0x80, 0x80, 0xf8,
+ 0x80, 0x80, 0x80, 0xfb, 0xbf, 0xbf, 0xbf, 0xfc, 0x80, 0x80,
+ 0x80, 0x80, 0xfd, 0xbf, 0xbf, 0xbf, 0xbf},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
+ 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
+ 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd,
+ 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+
+ // Incomplete sequence tests repeated with a space after the incomplete
+ // sequence.
+
+ // 2-byte sequences with last byte missing
+ {{0xc0, 0x20}, {0xfffd, 0x20}},
+ {{0xdf, 0x20}, {0xfffd, 0x20}},
+ // 3-byte sequences with last byte missing
+ {{0xe8, 0x80, 0x20}, {0xfffd, 0x20}},
+ {{0xe0, 0xbf, 0x20}, {0xfffd, 0x20}},
+ {{0xef, 0xbf, 0x20}, {0xfffd, 0x20}},
+ // Start of overlong 3-byte sequence with last byte missing
+ {{0xe0, 0x80, 0x20}, {0xfffd, 0xfffd, 0x20}},
+ // 4-byte sequences with last byte missing
+ {{0xf1, 0x80, 0x80, 0x20}, {0xfffd, 0x20}},
+ {{0xf4, 0x8f, 0xbf, 0x20}, {0xfffd, 0x20}},
+ // Start of overlong 4-byte sequence with last byte missing
+ {{0xf0, 0x80, 0x80, 0x20}, {0xfffd, 0xfffd, 0xfffd, 0x20}},
+ // 5-byte sequences (not supported) with last byte missing
+ {{0xf8, 0x80, 0x80, 0x80, 0x20}, {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0x20}},
+ {{0xfb, 0xbf, 0xbf, 0xbf, 0x20}, {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0x20}},
+ // 6-byte sequences (not supported) with last byte missing
+ {{0xfc, 0x80, 0x80, 0x80, 0x80, 0x20},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0x20}},
+ {{0xfd, 0xbf, 0xbf, 0xbf, 0xbf, 0x20},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0x20}},
+
+ // Impossible bytes
+ {{0xfe}, {0xfffd}},
+ {{0xff}, {0xfffd}},
+ {{0xfe, 0xfe, 0xff, 0xff}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ // Lead-byte-like bytes which aren't valid lead bytes.
+ {{0xc0}, {0xfffd}},
+ {{0xc0, 0xaa}, {0xfffd, 0xfffd}},
+ {{0xc1}, {0xfffd}},
+ {{0xc1, 0xaa}, {0xfffd, 0xfffd}},
+ {{0xf5}, {0xfffd}},
+ {{0xf5, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xf6}, {0xfffd}},
+ {{0xf6, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xf7}, {0xfffd}},
+ {{0xf7, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xf8}, {0xfffd}},
+ {{0xf8, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xf9}, {0xfffd}},
+ {{0xf9, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xfa}, {0xfffd}},
+ {{0xfa, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xfb}, {0xfffd}},
+ {{0xfb, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xfc}, {0xfffd}},
+ {{0xfc, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xfd}, {0xfffd}},
+ {{0xfd, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xfe}, {0xfffd}},
+ {{0xfe, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xff}, {0xfffd}},
+ {{0xff, 0xaa, 0xaa, 0xaa}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+
+ // Overlong sequences:
+
+ // Overlong encodings for "/"
+ {{0xc0, 0xaf}, {0xfffd, 0xfffd}},
+ {{0xe0, 0x80, 0xaf}, {0xfffd, 0xfffd, 0xfffd}},
+ {{0xf0, 0x80, 0x80, 0xaf}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ // 5-byte sequence (not supported anyway)
+ {{0xf8, 0x80, 0x80, 0x80, 0xaf},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ // 6-byte sequence (not supported anyway)
+ {{0xfc, 0x80, 0x80, 0x80, 0x80, 0xaf},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+
+ // Maximum overlong sequences
+ {{0xc1, 0xbf}, {0xfffd, 0xfffd}},
+ {{0xe0, 0x9f, 0xbf}, {0xfffd, 0xfffd, 0xfffd}},
+ {{0xf0, 0x8f, 0xbf, 0xbf}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ // 5-byte sequence (not supported anyway)
+ {{0xf8, 0x87, 0xbf, 0xbf, 0xbf},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ // 6-byte sequence (not supported anyway)
+ {{0xfc, 0x83, 0xbf, 0xbf, 0xbf, 0xbf},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+
+ // Overlong encodings for 0
+ {{0xc0, 0x80}, {0xfffd, 0xfffd}},
+ {{0xe0, 0x80, 0x80}, {0xfffd, 0xfffd, 0xfffd}},
+ {{0xf0, 0x80, 0x80, 0x80}, {0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ // 5-byte sequence (not supported anyway)
+ {{0xf8, 0x80, 0x80, 0x80, 0x80},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ // 6-byte sequence (not supported anyway)
+ {{0xfc, 0x80, 0x80, 0x80, 0x80, 0x80},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+
+ // Illegal code positions:
+
+ // Single UTF-16 surrogates
+ {{0xed, 0xa0, 0x80}, {0xfffd, 0xfffd, 0xfffd}},
+ {{0xed, 0xa0, 0x80}, {0xfffd, 0xfffd, 0xfffd}},
+ {{0xed, 0xad, 0xbf}, {0xfffd, 0xfffd, 0xfffd}},
+ {{0xed, 0xae, 0x80}, {0xfffd, 0xfffd, 0xfffd}},
+ {{0xed, 0xaf, 0xbf}, {0xfffd, 0xfffd, 0xfffd}},
+ {{0xed, 0xb0, 0x80}, {0xfffd, 0xfffd, 0xfffd}},
+ {{0xed, 0xbe, 0x80}, {0xfffd, 0xfffd, 0xfffd}},
+ {{0xed, 0xbf, 0xbf}, {0xfffd, 0xfffd, 0xfffd}},
+
+ // Paired surrogates
+ {{0xed, 0xa0, 0x80, 0xed, 0xb0, 0x80},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xed, 0xa0, 0x80, 0xed, 0xbf, 0xbf},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xed, 0xad, 0xbf, 0xed, 0xb0, 0x80},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xed, 0xad, 0xbf, 0xed, 0xbf, 0xbf},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xed, 0xae, 0x80, 0xed, 0xb0, 0x80},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xed, 0xae, 0x80, 0xed, 0xbf, 0xbf},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xed, 0xaf, 0xbf, 0xed, 0xb0, 0x80},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+ {{0xed, 0xaf, 0xbf, 0xed, 0xbf, 0xbf},
+ {0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd, 0xfffd}},
+
+ // Surrogates with the last byte missing.
+ {{0xed, 0xa0}, {0xfffd, 0xfffd}},
+ {{0xed, 0xa0}, {0xfffd, 0xfffd}},
+ {{0xed, 0xad}, {0xfffd, 0xfffd}},
+ {{0xed, 0xae}, {0xfffd, 0xfffd}},
+ {{0xed, 0xaf}, {0xfffd, 0xfffd}},
+ {{0xed, 0xb0}, {0xfffd, 0xfffd}},
+ {{0xed, 0xbe}, {0xfffd, 0xfffd}},
+ {{0xed, 0xbf}, {0xfffd, 0xfffd}},
+
+ // Other non-characters
+ {{0xef, 0xbf, 0xbe}, {0xfffe}},
+ {{0xef, 0xbf, 0xbf}, {0xffff}},
+ {{0xef, 0xb7, 0x90, 0xef, 0xb7, 0x91, 0xef, 0xb7, 0x92, 0xef, 0xb7, 0x93,
+ 0xef, 0xb7, 0x94, 0xef, 0xb7, 0x95, 0xef, 0xb7, 0x96, 0xef, 0xb7, 0x97,
+ 0xef, 0xb7, 0x98, 0xef, 0xb7, 0x99, 0xef, 0xb7, 0x9a, 0xef, 0xb7, 0x9b,
+ 0xef, 0xb7, 0x9c, 0xef, 0xb7, 0x9d, 0xef, 0xb7, 0x9e, 0xef, 0xb7, 0x9f,
+ 0xef, 0xb7, 0xa0, 0xef, 0xb7, 0xa1, 0xef, 0xb7, 0xa2, 0xef, 0xb7, 0xa3,
+ 0xef, 0xb7, 0xa4, 0xef, 0xb7, 0xa5, 0xef, 0xb7, 0xa6, 0xef, 0xb7, 0xa7,
+ 0xef, 0xb7, 0xa8, 0xef, 0xb7, 0xa9, 0xef, 0xb7, 0xaa, 0xef, 0xb7, 0xab,
+ 0xef, 0xb7, 0xac, 0xef, 0xb7, 0xad, 0xef, 0xb7, 0xae, 0xef, 0xb7, 0xaf},
+ {0xfdd0, 0xfdd1, 0xfdd2, 0xfdd3, 0xfdd4, 0xfdd5, 0xfdd6, 0xfdd7,
+ 0xfdd8, 0xfdd9, 0xfdda, 0xfddb, 0xfddc, 0xfddd, 0xfdde, 0xfddf,
+ 0xfde0, 0xfde1, 0xfde2, 0xfde3, 0xfde4, 0xfde5, 0xfde6, 0xfde7,
+ 0xfde8, 0xfde9, 0xfdea, 0xfdeb, 0xfdec, 0xfded, 0xfdee, 0xfdef}},
+ {{0xf0, 0x9f, 0xbf, 0xbe, 0xf0, 0x9f, 0xbf, 0xbf, 0xf0, 0xaf, 0xbf,
+ 0xbe, 0xf0, 0xaf, 0xbf, 0xbf, 0xf0, 0xbf, 0xbf, 0xbe, 0xf0, 0xbf,
+ 0xbf, 0xbf, 0xf1, 0x8f, 0xbf, 0xbe, 0xf1, 0x8f, 0xbf, 0xbf, 0xf1,
+ 0x9f, 0xbf, 0xbe, 0xf1, 0x9f, 0xbf, 0xbf, 0xf1, 0xaf, 0xbf, 0xbe,
+ 0xf1, 0xaf, 0xbf, 0xbf, 0xf1, 0xbf, 0xbf, 0xbe, 0xf1, 0xbf, 0xbf,
+ 0xbf, 0xf2, 0x8f, 0xbf, 0xbe, 0xf2, 0x8f, 0xbf, 0xbf},
+ {0x1fffe, 0x1ffff, 0x2fffe, 0x2ffff, 0x3fffe, 0x3ffff, 0x4fffe, 0x4ffff,
+ 0x5fffe, 0x5ffff, 0x6fffe, 0x6ffff, 0x7fffe, 0x7ffff, 0x8fffe,
+ 0x8ffff}},
+ };
+
+ for (auto test : data) {
+ // For figuring out which test fails:
+ fprintf(stderr, "test: ");
+ for (auto b : test.bytes) {
+ fprintf(stderr, "%x ", b);
+ }
+ fprintf(stderr, "\n");
+
+ std::vector<unibrow::uchar> output_normal;
+ DecodeNormally(test.bytes, &output_normal);
+
+ CHECK_EQ(output_normal.size(), test.unicode_expected.size());
+ for (size_t i = 0; i < output_normal.size(); ++i) {
+ CHECK_EQ(output_normal[i], test.unicode_expected[i]);
+ }
+
+ std::vector<unibrow::uchar> output_incremental;
+ DecodeIncrementally(test.bytes, &output_incremental);
+
+ CHECK_EQ(output_incremental.size(), test.unicode_expected.size());
+ for (size_t i = 0; i < output_incremental.size(); ++i) {
+ CHECK_EQ(output_incremental[i], test.unicode_expected[i]);
+ }
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/unittests.gyp b/deps/v8/test/unittests/unittests.gyp
index c3f8554c26..38d071d71e 100644
--- a/deps/v8/test/unittests/unittests.gyp
+++ b/deps/v8/test/unittests/unittests.gyp
@@ -38,9 +38,13 @@
'base/utils/random-number-generator-unittest.cc',
'cancelable-tasks-unittest.cc',
'char-predicates-unittest.cc',
+ "code-stub-assembler-unittest.cc",
+ "code-stub-assembler-unittest.h",
'compiler/branch-elimination-unittest.cc',
'compiler/bytecode-analysis-unittest.cc',
'compiler/checkpoint-elimination-unittest.cc',
+ "compiler/code-assembler-unittest.cc",
+ "compiler/code-assembler-unittest.h",
'compiler/common-operator-reducer-unittest.cc',
'compiler/common-operator-unittest.cc',
'compiler/compiler-test-utils.h',
@@ -49,7 +53,6 @@
'compiler/dead-code-elimination-unittest.cc',
'compiler/diamond-unittest.cc',
'compiler/effect-control-linearizer-unittest.cc',
- 'compiler/escape-analysis-unittest.cc',
'compiler/graph-reducer-unittest.cc',
'compiler/graph-reducer-unittest.h',
'compiler/graph-trimmer-unittest.cc',
@@ -98,8 +101,10 @@
'compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc',
'compiler-dispatcher/unoptimized-compile-job-unittest.cc',
'counters-unittest.cc',
+ 'detachable-vector-unittest.cc',
'eh-frame-iterator-unittest.cc',
'eh-frame-writer-unittest.cc',
+ 'heap/barrier-unittest.cc',
'heap/bitmap-unittest.cc',
'heap/embedder-tracing-unittest.cc',
'heap/gc-idle-time-handler-unittest.cc',
@@ -150,6 +155,7 @@
'wasm/control-transfer-unittest.cc',
'wasm/decoder-unittest.cc',
'wasm/function-body-decoder-unittest.cc',
+ 'wasm/wasm-heap-unittest.cc',
'wasm/leb-helper-unittest.cc',
'wasm/loop-assignment-analysis-unittest.cc',
'wasm/module-decoder-unittest.cc',
@@ -266,7 +272,7 @@
],
}],
['v8_use_snapshot=="true"', {
- 'dependencies': ['../../src/v8.gyp:v8_builtins_generators'],
+ 'dependencies': ['../../src/v8.gyp:v8_initializers'],
}],
],
},
diff --git a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
index f49ba9c862..2b1a034179 100644
--- a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
+++ b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
@@ -20,13 +20,6 @@ namespace v8 {
namespace internal {
namespace wasm {
-#define B1(a) kExprBlock, a, kExprEnd
-#define B2(a, b) kExprBlock, a, b, kExprEnd
-#define B3(a, b, c) kExprBlock, a, b, c, kExprEnd
-
-#define TRANSFER_VOID 0
-#define TRANSFER_ONE 1
-
struct ExpectedControlTransfer {
pc_t pc;
pcdiff_t pc_diff;
diff --git a/deps/v8/test/unittests/wasm/decoder-unittest.cc b/deps/v8/test/unittests/wasm/decoder-unittest.cc
index e0c7908b90..0f11933383 100644
--- a/deps/v8/test/unittests/wasm/decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/decoder-unittest.cc
@@ -676,6 +676,11 @@ TEST_F(DecoderTest, FailOnNullData) {
EXPECT_FALSE(decoder.toResult(nullptr).ok());
}
+#undef CHECK_UINT32V_INLINE
+#undef CHECK_INT32V_INLINE
+#undef CHECK_UINT64V_INLINE
+#undef CHECK_INT64V_INLINE
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index 5dc5b46cc5..bda1073281 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -19,9 +19,10 @@
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-using namespace v8::internal::wasm::testing;
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace function_body_decoder_unittest {
#define B1(a) WASM_BLOCK(a)
#define B2(a, b) WASM_BLOCK(a, b)
@@ -523,6 +524,8 @@ TEST_F(FunctionBodyDecoderTest, Block0_end) {
EXPECT_FAILURE(v_v, WASM_EMPTY_BLOCK, kExprEnd);
}
+#undef WASM_EMPTY_BLOCK
+
TEST_F(FunctionBodyDecoderTest, Block1) {
byte code[] = {WASM_BLOCK_I(WASM_GET_LOCAL(0))};
EXPECT_VERIFIES_C(i_i, code);
@@ -1341,6 +1344,16 @@ TEST_F(FunctionBodyDecoderTest, StoreMemOffset_varint) {
VARINT4(0x44444444));
}
+#undef BYTE0
+#undef BYTE1
+#undef BYTE2
+#undef BYTE3
+
+#undef VARINT1
+#undef VARINT2
+#undef VARINT3
+#undef VARINT4
+
TEST_F(FunctionBodyDecoderTest, AllLoadMemCombinations) {
TestModuleBuilder builder;
module = builder.module();
@@ -1846,6 +1859,14 @@ TEST_F(FunctionBodyDecoderTest, BreakIfBinop_fail) {
WASM_BLOCK_I(WASM_F32_ABS(WASM_BRV_IF(0, WASM_F32(0.0f), WASM_ZERO))));
}
+TEST_F(FunctionBodyDecoderTest, BreakIfUnrNarrow) {
+ EXPECT_FAILURE_S(
+ sigs.f_ff(),
+ WASM_BLOCK_I(WASM_BRV_IF(0, WASM_UNREACHABLE, WASM_UNREACHABLE),
+ WASM_RETURN0),
+ WASM_F32(0.0));
+}
+
TEST_F(FunctionBodyDecoderTest, BreakNesting1) {
for (int i = 0; i < 5; i++) {
// (block[2] (loop[2] (if (get p) break[N]) (set p 1)) p)
@@ -2145,6 +2166,69 @@ TEST_F(FunctionBodyDecoderTest, BrTable_invalid_br2) {
}
}
+TEST_F(FunctionBodyDecoderTest, BrTable_arity_mismatch1) {
+ EXPECT_FAILURE(
+ v_v,
+ WASM_BLOCK(WASM_BLOCK_I(
+ WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+}
+
+TEST_F(FunctionBodyDecoderTest, BrTable_arity_mismatch2) {
+ EXPECT_FAILURE(
+ v_v,
+ WASM_BLOCK_I(WASM_BLOCK(
+ WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+}
+
+TEST_F(FunctionBodyDecoderTest, BrTable_arity_mismatch_loop1) {
+ EXPECT_FAILURE(
+ v_v,
+ WASM_LOOP(WASM_BLOCK_I(
+ WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+}
+
+TEST_F(FunctionBodyDecoderTest, BrTable_arity_mismatch_loop2) {
+ EXPECT_FAILURE(
+ v_v,
+ WASM_BLOCK_I(WASM_LOOP(
+ WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+}
+
+TEST_F(FunctionBodyDecoderTest, BrTable_loop_block) {
+ EXPECT_VERIFIES(
+ v_v,
+ WASM_LOOP(WASM_BLOCK(
+ WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+}
+
+TEST_F(FunctionBodyDecoderTest, BrTable_block_loop) {
+ EXPECT_VERIFIES(
+ v_v,
+ WASM_LOOP(WASM_BLOCK(
+ WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+}
+
+TEST_F(FunctionBodyDecoderTest, BrTable_type_mismatch1) {
+ EXPECT_FAILURE(
+ v_v,
+ WASM_BLOCK_I(WASM_BLOCK_F(
+ WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+}
+
+TEST_F(FunctionBodyDecoderTest, BrTable_type_mismatch2) {
+ EXPECT_FAILURE(
+ v_v,
+ WASM_BLOCK_F(WASM_BLOCK_I(
+ WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+}
+
+TEST_F(FunctionBodyDecoderTest, BrTable_type_mismatch_unreachable) {
+ EXPECT_FAILURE(v_v,
+ WASM_BLOCK_F(WASM_BLOCK_I(
+ WASM_UNREACHABLE,
+ WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+}
+
TEST_F(FunctionBodyDecoderTest, BrUnreachable1) {
EXPECT_VERIFIES(v_i, WASM_GET_LOCAL(0), kExprBrTable, 0, BR_TARGET(0));
}
@@ -2253,8 +2337,7 @@ TEST_F(FunctionBodyDecoderTest, Throw) {
// exception index out of range.
EXPECT_FAILURE(v_v, kExprThrow, 2);
- // TODO(kschimpf): Fix when we can create exceptions with values.
- EXPECT_FAILURE(v_v, WASM_I32V(0), kExprThrow, 1);
+ EXPECT_VERIFIES(v_v, WASM_I32V(0), kExprThrow, 1);
// TODO(kschimpf): Add more tests.
}
@@ -2275,7 +2358,6 @@ TEST_F(FunctionBodyDecoderTest, ThrowUnreachable) {
}
#define WASM_TRY_OP kExprTry, kLocalVoid
-
#define WASM_CATCH(index) kExprCatch, static_cast<byte>(index)
TEST_F(FunctionBodyDecoderTest, TryCatch) {
@@ -2300,6 +2382,9 @@ TEST_F(FunctionBodyDecoderTest, TryCatch) {
EXPECT_FAILURE(v_i, WASM_TRY_OP, WASM_CATCH(0), WASM_CATCH(1), kExprEnd);
}
+#undef WASM_TRY_OP
+#undef WASM_CATCH
+
TEST_F(FunctionBodyDecoderTest, MultiValBlock1) {
EXPERIMENTAL_FLAG_SCOPE(mv);
EXPECT_VERIFIES(i_ii, WASM_BLOCK_TT(kWasmI32, kWasmI32, WASM_GET_LOCAL(0),
@@ -2421,6 +2506,9 @@ TEST_F(BranchTableIteratorTest, error0) {
CHECK_BR_TABLE_ERROR(1, U32V_1(33));
}
+#undef CHECK_BR_TABLE_LENGTH
+#undef CHECK_BR_TABLE_ERROR
+
class WasmOpcodeLengthTest : public TestWithZone {
public:
WasmOpcodeLengthTest() : TestWithZone() {}
@@ -2657,11 +2745,13 @@ TEST_F(WasmOpcodeLengthTest, SimdExpressions) {
FOREACH_SIMD_1_OPERAND_OPCODE(TEST_SIMD)
#undef TEST_SIMD
EXPECT_LENGTH_N(18, kSimdPrefix, static_cast<byte>(kExprS8x16Shuffle & 0xff));
-#undef TEST_SIMD
// test for bad simd opcode
EXPECT_LENGTH_N(2, kSimdPrefix, 0xff);
}
+#undef EXPECT_LENGTH
+#undef EXPECT_LENGTH_N
+
typedef ZoneVector<ValueType> TypesOfLocals;
class LocalDeclDecoderTest : public TestWithZone {
@@ -2845,3 +2935,23 @@ TEST_F(BytecodeIteratorTest, WithLocalDecls) {
iter.next();
EXPECT_FALSE(iter.has_next());
}
+
+#undef B1
+#undef B2
+#undef B3
+#undef WASM_IF_OP
+#undef WASM_LOOP_OP
+#undef WASM_BRV_IF_ZERO
+#undef EXPECT_VERIFIES_C
+#undef EXPECT_FAILURE_C
+#undef EXPECT_VERIFIES_SC
+#undef EXPECT_FAILURE_SC
+#undef EXPECT_VERIFIES_S
+#undef EXPECT_FAILURE_S
+#undef EXPECT_VERIFIES
+#undef EXPECT_FAILURE
+
+} // namespace function_body_decoder_unittest
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
index 7b380c5490..474d49c1c5 100644
--- a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
+++ b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
@@ -108,6 +108,8 @@ DECLARE_ENCODE_DECODE_CHECKER(uint32_t, u32v)
DECLARE_ENCODE_DECODE_CHECKER(int64_t, i64v)
DECLARE_ENCODE_DECODE_CHECKER(uint64_t, u64v)
+#undef DECLARE_ENCODE_DECODE_CHECKER
+
TEST_F(LEBHelperTest, WriteAndDecode_u32v) {
CheckEncodeDecode_u32v(0);
CheckEncodeDecode_u32v(1);
diff --git a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
index e6507b9685..d089d94ca2 100644
--- a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
+++ b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
@@ -14,12 +14,12 @@
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
-#define WASM_SET_ZERO(i) WASM_SET_LOCAL(i, WASM_ZERO)
-
namespace v8 {
namespace internal {
namespace wasm {
+#define WASM_SET_ZERO(i) WASM_SET_LOCAL(i, WASM_ZERO)
+
class WasmLoopAssignmentAnalyzerTest : public TestWithZone {
public:
WasmLoopAssignmentAnalyzerTest() : num_locals(0) {}
@@ -191,6 +191,8 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, regress_642867) {
Analyze(code, code + arraysize(code));
}
+#undef WASM_SET_ZERO
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 377e47db4c..2e76d374d3 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -15,6 +15,7 @@
namespace v8 {
namespace internal {
namespace wasm {
+namespace module_decoder_unittest {
#define WASM_INIT_EXPR_I32V_1(val) WASM_I32V_1(val), kExprEnd
#define WASM_INIT_EXPR_I32V_2(val) WASM_I32V_2(val), kExprEnd
@@ -1649,6 +1650,8 @@ TEST_F(WasmModuleVerifyTest, InitExpr_f64) {
EXPECT_INIT_EXPR(F64, f64, 77999.1, WASM_F64(77999.1));
}
+#undef EXPECT_INIT_EXPR
+
#define EXPECT_INIT_EXPR_FAIL(...) \
{ \
static const byte data[] = {__VA_ARGS__, kExprEnd}; \
@@ -1665,6 +1668,8 @@ TEST_F(WasmModuleVerifyTest, InitExpr_illegal) {
EXPECT_INIT_EXPR_FAIL(WASM_IF_ELSE(WASM_ZERO, WASM_ZERO, WASM_ZERO));
}
+#undef EXPECT_INIT_EXPR_FAIL
+
TEST_F(WasmModuleVerifyTest, Multiple_Named_Sections) {
static const byte data[] = {
SECTION(Unknown, 4), 1, 'X', 17, 18, // --
@@ -1760,6 +1765,55 @@ TEST_F(WasmModuleCustomSectionTest, TwoKnownTwoUnknownSections) {
CheckSections(data, data + sizeof(data), expected, arraysize(expected));
}
+#undef WASM_INIT_EXPR_I32V_1
+#undef WASM_INIT_EXPR_I32V_2
+#undef WASM_INIT_EXPR_I32V_3
+#undef WASM_INIT_EXPR_I32V_4
+#undef WASM_INIT_EXPR_I32V_5
+#undef WASM_INIT_EXPR_F32
+#undef WASM_INIT_EXPR_I64
+#undef WASM_INIT_EXPR_F64
+#undef WASM_INIT_EXPR_GLOBAL
+#undef SIZEOF_EMPTY_FUNCTION
+#undef EMPTY_BODY
+#undef SIZEOF_EMPTY_BODY
+#undef NOP_BODY
+#undef SIZEOF_NOP_BODY
+#undef SIG_ENTRY_i_i
+#undef UNKNOWN_SECTION
+#undef SECTION
+#undef SIGNATURES_SECTION
+#undef FUNCTION_SIGNATURES_SECTION
+#undef FOO_STRING
+#undef NO_LOCAL_NAMES
+#undef EMPTY_SIGNATURES_SECTION
+#undef EMPTY_FUNCTION_SIGNATURES_SECTION
+#undef EMPTY_FUNCTION_BODIES_SECTION
+#undef SECTION_NAMES
+#undef SECTION_EXCEPTIONS
+#undef EMPTY_NAMES_SECTION
+#undef FAIL_IF_NO_EXPERIMENTAL_EH
+#undef X1
+#undef X2
+#undef X3
+#undef X4
+#undef ONE_EMPTY_FUNCTION
+#undef TWO_EMPTY_FUNCTIONS
+#undef THREE_EMPTY_FUNCTIONS
+#undef FOUR_EMPTY_FUNCTIONS
+#undef ONE_EMPTY_BODY
+#undef TWO_EMPTY_BODIES
+#undef THREE_EMPTY_BODIES
+#undef FOUR_EMPTY_BODIES
+#undef SIGNATURES_SECTION_VOID_VOID
+#undef LINEAR_MEMORY_INDEX_0
+#undef EXPECT_VERIFIES
+#undef EXPECT_FAILURE_LEN
+#undef EXPECT_FAILURE
+#undef EXPECT_OFF_END_FAILURE
+#undef EXPECT_OK
+
+} // namespace module_decoder_unittest
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
index 98b3e25457..2ed28125f0 100644
--- a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
@@ -17,38 +17,121 @@ namespace v8 {
namespace internal {
namespace wasm {
+class MockStreamingProcessor : public StreamingProcessor {
+ public:
+ bool ProcessModuleHeader(Vector<const uint8_t> bytes,
+ uint32_t offset) override {
+ // TODO(ahaas): Share code with the module-decoder.
+ Decoder decoder(bytes.begin(), bytes.end());
+ uint32_t magic_word = decoder.consume_u32("wasm magic");
+ if (decoder.failed() || magic_word != kWasmMagic) {
+ ok_ = false;
+ return false;
+ }
+ uint32_t magic_version = decoder.consume_u32("wasm version");
+ if (decoder.failed() || magic_version != kWasmVersion) {
+ ok_ = false;
+ return false;
+ }
+ return true;
+ }
+ // Process all sections but the code section.
+ bool ProcessSection(SectionCode section_code, Vector<const uint8_t> bytes,
+ uint32_t offset) override {
+ ++num_sections_;
+ return true;
+ }
+
+ bool ProcessCodeSectionHeader(size_t num_functions,
+ uint32_t offset) override {
+ return true;
+ }
+
+ // Process a function body.
+ bool ProcessFunctionBody(Vector<const uint8_t> bytes,
+ uint32_t offset) override {
+ ++num_functions_;
+ return true;
+ }
+
+ void OnFinishedChunk() override {}
+
+ // Finish the processing of the stream.
+ void OnFinishedStream(std::unique_ptr<uint8_t[]> bytes,
+ size_t length) override {
+ received_bytes_ = std::move(bytes);
+ length_ = length;
+ }
+
+ // Report an error detected in the StreamingDecoder.
+ void OnError(DecodeResult result) override { ok_ = false; }
+
+ void OnAbort() override {}
+
+ size_t num_sections() const { return num_sections_; }
+ size_t num_functions() const { return num_functions_; }
+ bool ok() const { return ok_; }
+ Vector<const uint8_t> received_bytes() {
+ return Vector<const uint8_t>(received_bytes_.get(), length_);
+ }
+
+ private:
+ size_t num_sections_ = 0;
+ size_t num_functions_ = 0;
+ bool ok_ = true;
+ std::unique_ptr<uint8_t[]> received_bytes_;
+ size_t length_;
+};
+
class WasmStreamingDecoderTest : public ::testing::Test {
public:
- void ExpectVerifies(Vector<const uint8_t> data) {
+ void ExpectVerifies(Vector<const uint8_t> data, size_t expected_sections,
+ size_t expected_functions) {
for (int split = 0; split <= data.length(); ++split) {
- StreamingDecoder stream(nullptr);
+ // Use a unique_ptr so that the StreamingDecoder can own the processor.
+ std::unique_ptr<MockStreamingProcessor> p(new MockStreamingProcessor());
+ MockStreamingProcessor* processor = p.get();
+ StreamingDecoder stream(std::move(p));
stream.OnBytesReceived(data.SubVector(0, split));
stream.OnBytesReceived(data.SubVector(split, data.length()));
- EXPECT_TRUE(stream.FinishForTesting());
+ stream.Finish();
+ EXPECT_TRUE(processor->ok());
+ EXPECT_EQ(expected_sections, processor->num_sections());
+ EXPECT_EQ(expected_functions, processor->num_functions());
+ EXPECT_EQ(data, processor->received_bytes());
}
}
void ExpectFailure(Vector<const uint8_t> data) {
for (int split = 0; split <= data.length(); ++split) {
- StreamingDecoder stream(nullptr);
+ std::unique_ptr<MockStreamingProcessor> p(new MockStreamingProcessor());
+ MockStreamingProcessor* processor = p.get();
+ StreamingDecoder stream(std::move(p));
stream.OnBytesReceived(data.SubVector(0, split));
stream.OnBytesReceived(data.SubVector(split, data.length()));
- EXPECT_FALSE(stream.FinishForTesting());
+ stream.Finish();
+ EXPECT_FALSE(processor->ok());
}
}
};
TEST_F(WasmStreamingDecoderTest, EmptyStream) {
- StreamingDecoder stream(nullptr);
- EXPECT_FALSE(stream.FinishForTesting());
+ std::unique_ptr<MockStreamingProcessor> p(new MockStreamingProcessor());
+ MockStreamingProcessor* processor = p.get();
+ StreamingDecoder stream(std::move(p));
+ stream.Finish();
+ EXPECT_FALSE(processor->ok());
}
TEST_F(WasmStreamingDecoderTest, IncompleteModuleHeader) {
const uint8_t data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion)};
{
- StreamingDecoder stream(nullptr);
+ std::unique_ptr<MockStreamingProcessor> p(new MockStreamingProcessor());
+ MockStreamingProcessor* processor = p.get();
+ StreamingDecoder stream(std::move(p));
stream.OnBytesReceived(Vector<const uint8_t>(data, 1));
- EXPECT_FALSE(stream.FinishForTesting());
+ stream.Finish();
+ EXPECT_FALSE(processor->ok());
}
for (int length = 1; length < static_cast<int>(arraysize(data)); ++length) {
ExpectFailure(Vector<const uint8_t>(data, length));
@@ -57,7 +140,7 @@ TEST_F(WasmStreamingDecoderTest, IncompleteModuleHeader) {
TEST_F(WasmStreamingDecoderTest, MagicAndVersion) {
const uint8_t data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion)};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 0, 0);
}
TEST_F(WasmStreamingDecoderTest, BadMagic) {
@@ -87,7 +170,7 @@ TEST_F(WasmStreamingDecoderTest, OneSection) {
0x0, // 5
0x0 // 6
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 1, 0);
}
TEST_F(WasmStreamingDecoderTest, OneSection_b) {
@@ -104,7 +187,7 @@ TEST_F(WasmStreamingDecoderTest, OneSection_b) {
0x0, // 5
0x0 // 6
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 1, 0);
}
TEST_F(WasmStreamingDecoderTest, OneShortSection) {
@@ -118,7 +201,7 @@ TEST_F(WasmStreamingDecoderTest, OneShortSection) {
0x0, // Payload
0x0 // 2
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 1, 0);
}
TEST_F(WasmStreamingDecoderTest, OneShortSection_b) {
@@ -132,7 +215,7 @@ TEST_F(WasmStreamingDecoderTest, OneShortSection_b) {
0x0, // Payload
0x0 // 2
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 1, 0);
}
TEST_F(WasmStreamingDecoderTest, OneEmptySection) {
@@ -142,7 +225,7 @@ TEST_F(WasmStreamingDecoderTest, OneEmptySection) {
0x1, // Section ID
0x0 // Section Length
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 1, 0);
}
TEST_F(WasmStreamingDecoderTest, OneSectionNotEnoughPayload1) {
@@ -207,7 +290,7 @@ TEST_F(WasmStreamingDecoderTest, TwoLongSections) {
0x0, // 6
0x0 // 7
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 2, 0);
}
TEST_F(WasmStreamingDecoderTest, TwoShortSections) {
@@ -222,7 +305,7 @@ TEST_F(WasmStreamingDecoderTest, TwoShortSections) {
0x0, // Payload
0x0, // 2
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 2, 0);
}
TEST_F(WasmStreamingDecoderTest, TwoSectionsShortLong) {
@@ -242,7 +325,7 @@ TEST_F(WasmStreamingDecoderTest, TwoSectionsShortLong) {
0x0, // 6
0x0 // 7
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 2, 0);
}
TEST_F(WasmStreamingDecoderTest, TwoEmptySections) {
@@ -254,19 +337,7 @@ TEST_F(WasmStreamingDecoderTest, TwoEmptySections) {
0x2, // Section ID
0x0 // Section Length
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
-}
-
-TEST_F(WasmStreamingDecoderTest, EmptyCodeSection) {
- const uint8_t data[] = {
- U32_LE(kWasmMagic), // --
- U32_LE(kWasmVersion), // --
- kCodeSectionCode, // Section ID
- 0x0, // Section Length
- 0xb, // Section ID
- 0x0 // Section Length
- };
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 2, 0);
}
TEST_F(WasmStreamingDecoderTest, OneFunction) {
@@ -284,7 +355,7 @@ TEST_F(WasmStreamingDecoderTest, OneFunction) {
0x0, // 5
0x0, // 6
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 0, 1);
}
TEST_F(WasmStreamingDecoderTest, OneShortFunction) {
@@ -297,7 +368,7 @@ TEST_F(WasmStreamingDecoderTest, OneShortFunction) {
0x1, // Function Length
0x0, // Function
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 0, 1);
}
TEST_F(WasmStreamingDecoderTest, EmptyFunction) {
@@ -335,7 +406,7 @@ TEST_F(WasmStreamingDecoderTest, TwoFunctions) {
0x0, // 6
0x0, // 7
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 0, 2);
}
TEST_F(WasmStreamingDecoderTest, TwoFunctions_b) {
@@ -356,7 +427,17 @@ TEST_F(WasmStreamingDecoderTest, TwoFunctions_b) {
0x0, // 6
0x0, // 7
};
- ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 0, 2);
+}
+
+TEST_F(WasmStreamingDecoderTest, CodeSectionLengthZero) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ kCodeSectionCode, // Section ID
+ 0x0, // Section Length
+ };
+ ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHigh) {
@@ -493,6 +574,66 @@ TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooLow) {
};
ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
}
+
+TEST_F(WasmStreamingDecoderTest, TwoCodeSections) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ kCodeSectionCode, // Section ID
+ 0x3, // Section Length
+ 0x1, // Number of Functions
+ 0x1, // Function Length
+ 0x0, // Function
+ kCodeSectionCode, // Section ID
+ 0x3, // Section Length
+ 0x1, // Number of Functions
+ 0x1, // Function Length
+ 0x0, // Function
+ };
+ ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, UnknownSection) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ kCodeSectionCode, // Section ID
+ 0x3, // Section Length
+ 0x1, // Number of Functions
+ 0x1, // Function Length
+ 0x0, // Function
+ kUnknownSectionCode, // Section ID
+ 0x3, // Section Length
+ 0x1, // Name Length
+ 0x1, // Name
+ 0x0, // Content
+ };
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)), 1, 1);
+}
+
+TEST_F(WasmStreamingDecoderTest, UnknownSectionSandwich) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ kCodeSectionCode, // Section ID
+ 0x3, // Section Length
+ 0x1, // Number of Functions
+ 0x1, // Function Length
+ 0x0, // Function
+ kUnknownSectionCode, // Section ID
+ 0x3, // Section Length
+ 0x1, // Name Length
+ 0x1, // Name
+ 0x0, // Content
+ kCodeSectionCode, // Section ID
+ 0x3, // Section Length
+ 0x1, // Number of Functions
+ 0x1, // Function Length
+ 0x0, // Function
+ };
+ ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/wasm-heap-unittest.cc b/deps/v8/test/unittests/wasm/wasm-heap-unittest.cc
new file mode 100644
index 0000000000..6e75e84b43
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-heap-unittest.cc
@@ -0,0 +1,157 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+#include "src/wasm/wasm-heap.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace wasm_heap_unittest {
+
+class DisjointAllocationPoolTest : public ::testing::Test {
+ public:
+ Address A(size_t n) { return reinterpret_cast<Address>(n); }
+ void CheckLooksLike(const DisjointAllocationPool& mem,
+ std::vector<std::pair<size_t, size_t>> expectation);
+ DisjointAllocationPool Make(std::vector<std::pair<size_t, size_t>> model);
+};
+
+void DisjointAllocationPoolTest::CheckLooksLike(
+ const DisjointAllocationPool& mem,
+ std::vector<std::pair<size_t, size_t>> expectation) {
+ const auto& ranges = mem.ranges();
+ CHECK_EQ(ranges.size(), expectation.size());
+ auto iter = expectation.begin();
+ for (auto it = ranges.begin(), e = ranges.end(); it != e; ++it, ++iter) {
+ CHECK_EQ(it->first, A(iter->first));
+ CHECK_EQ(it->second, A(iter->second));
+ }
+}
+
+DisjointAllocationPool DisjointAllocationPoolTest::Make(
+ std::vector<std::pair<size_t, size_t>> model) {
+ DisjointAllocationPool ret;
+ for (auto& pair : model) {
+ ret.Merge(DisjointAllocationPool(A(pair.first), A(pair.second)));
+ }
+ return ret;
+}
+
+TEST_F(DisjointAllocationPoolTest, Construct) {
+ DisjointAllocationPool a;
+ CHECK(a.IsEmpty());
+ CHECK_EQ(a.ranges().size(), 0);
+ DisjointAllocationPool b = Make({{1, 5}});
+ CHECK(!b.IsEmpty());
+ CHECK_EQ(b.ranges().size(), 1);
+ a.Merge(std::move(b));
+ CheckLooksLike(a, {{1, 5}});
+ DisjointAllocationPool c;
+ a.Merge(std::move(c));
+ CheckLooksLike(a, {{1, 5}});
+ DisjointAllocationPool e, f;
+ e.Merge(std::move(f));
+ CHECK(e.IsEmpty());
+}
+
+TEST_F(DisjointAllocationPoolTest, SimpleExtract) {
+ DisjointAllocationPool a = Make({{1, 5}});
+ DisjointAllocationPool b = a.AllocatePool(2);
+ CheckLooksLike(a, {{3, 5}});
+ CheckLooksLike(b, {{1, 3}});
+ a.Merge(std::move(b));
+ CheckLooksLike(a, {{1, 5}});
+ CHECK_EQ(a.ranges().size(), 1);
+ CHECK_EQ(a.ranges().front().first, A(1));
+ CHECK_EQ(a.ranges().front().second, A(5));
+}
+
+TEST_F(DisjointAllocationPoolTest, ExtractAll) {
+ DisjointAllocationPool a(A(1), A(5));
+ DisjointAllocationPool b = a.AllocatePool(4);
+ CheckLooksLike(b, {{1, 5}});
+ CHECK(a.IsEmpty());
+ a.Merge(std::move(b));
+ CheckLooksLike(a, {{1, 5}});
+}
+
+TEST_F(DisjointAllocationPoolTest, ExtractAccross) {
+ DisjointAllocationPool a = Make({{1, 5}, {10, 20}});
+ DisjointAllocationPool b = a.AllocatePool(5);
+ CheckLooksLike(a, {{11, 20}});
+ CheckLooksLike(b, {{1, 5}, {10, 11}});
+ a.Merge(std::move(b));
+ CheckLooksLike(a, {{1, 5}, {10, 20}});
+}
+
+TEST_F(DisjointAllocationPoolTest, ReassembleOutOfOrder) {
+ DisjointAllocationPool a = Make({{1, 5}, {10, 15}});
+ DisjointAllocationPool b = Make({{7, 8}, {20, 22}});
+ a.Merge(std::move(b));
+ CheckLooksLike(a, {{1, 5}, {7, 8}, {10, 15}, {20, 22}});
+
+ DisjointAllocationPool c = Make({{1, 5}, {10, 15}});
+ DisjointAllocationPool d = Make({{7, 8}, {20, 22}});
+ d.Merge(std::move(c));
+ CheckLooksLike(d, {{1, 5}, {7, 8}, {10, 15}, {20, 22}});
+}
+
+TEST_F(DisjointAllocationPoolTest, FailToExtract) {
+ DisjointAllocationPool a = Make({{1, 5}});
+ DisjointAllocationPool b = a.AllocatePool(5);
+ CheckLooksLike(a, {{1, 5}});
+ CHECK(b.IsEmpty());
+}
+
+TEST_F(DisjointAllocationPoolTest, FailToExtractExact) {
+ DisjointAllocationPool a = Make({{1, 5}, {10, 14}});
+ DisjointAllocationPool b = a.Allocate(5);
+ CheckLooksLike(a, {{1, 5}, {10, 14}});
+ CHECK(b.IsEmpty());
+}
+
+TEST_F(DisjointAllocationPoolTest, ExtractExact) {
+ DisjointAllocationPool a = Make({{1, 5}, {10, 15}});
+ DisjointAllocationPool b = a.Allocate(5);
+ CheckLooksLike(a, {{1, 5}});
+ CheckLooksLike(b, {{10, 15}});
+}
+
+TEST_F(DisjointAllocationPoolTest, Merging) {
+ DisjointAllocationPool a = Make({{10, 15}, {20, 25}});
+ a.Merge(Make({{15, 20}}));
+ CheckLooksLike(a, {{10, 25}});
+}
+
+TEST_F(DisjointAllocationPoolTest, MergingMore) {
+ DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
+ a.Merge(Make({{15, 20}, {25, 30}}));
+ CheckLooksLike(a, {{10, 35}});
+}
+
+TEST_F(DisjointAllocationPoolTest, MergingSkip) {
+ DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
+ a.Merge(Make({{25, 30}}));
+ CheckLooksLike(a, {{10, 15}, {20, 35}});
+}
+
+TEST_F(DisjointAllocationPoolTest, MergingSkipLargerSrc) {
+ DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
+ a.Merge(Make({{25, 30}, {35, 40}}));
+ CheckLooksLike(a, {{10, 15}, {20, 40}});
+}
+
+TEST_F(DisjointAllocationPoolTest, MergingSkipLargerSrcWithGap) {
+ DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
+ a.Merge(Make({{25, 30}, {36, 40}}));
+ CheckLooksLike(a, {{10, 15}, {20, 35}, {36, 40}});
+}
+
+} // namespace wasm_heap_unittest
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
index 63309db078..45a4c5a59b 100644
--- a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
@@ -309,6 +309,9 @@ TEST_F(WasmMacroGenTest, LoadsAndStoresWithOffset) {
WASM_GET_LOCAL(0)));
}
}
+
+#undef EXPECT_SIZE
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/wasm-opcodes-unittest.cc b/deps/v8/test/unittests/wasm/wasm-opcodes-unittest.cc
index 4bb04c7152..12739ff44f 100644
--- a/deps/v8/test/unittests/wasm/wasm-opcodes-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-opcodes-unittest.cc
@@ -19,6 +19,7 @@ TEST_F(WasmOpcodesTest, EveryOpcodeHasAName) {
} kValues[] = {
#define DECLARE_ELEMENT(name, opcode, sig) {kExpr##name, "kExpr" #name},
FOREACH_OPCODE(DECLARE_ELEMENT)};
+#undef DECLARE_ELEMENT
for (size_t i = 0; i < arraysize(kValues); i++) {
const char* result = WasmOpcodes::OpcodeName(kValues[i].opcode);
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index 76c4ddc21f..240b11b803 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-ca6a2bd223a88c489d9f5fbfb21bf4eb8700e62e \ No newline at end of file
+2c29039a7935576bb6d01f9b4b6c96b8861b5bbe \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
index f0e4af7e79..c97ac33824 100644
--- a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
@@ -5,7 +5,6 @@
[
[ALWAYS, {
#TODO(ahaas): Add additional stack checks on mips.
- # Issue 6318: Stack checks for functions with huge stack frames fail on x64 and ia32
'tests/skip-stack-guard-page': [PASS, ['arch == mipsel or arch == mips64el or ((arch == ppc or arch == ppc64 or arch == s390 or arch == s390x) and simulator_run)', SKIP]],
}], # ALWAYS
diff --git a/deps/v8/test/webkit/js-continue-break-restrictions-expected.txt b/deps/v8/test/webkit/js-continue-break-restrictions-expected.txt
index e42cf3dc37..5cf59d2346 100644
--- a/deps/v8/test/webkit/js-continue-break-restrictions-expected.txt
+++ b/deps/v8/test/webkit/js-continue-break-restrictions-expected.txt
@@ -42,17 +42,17 @@ PASS if (0) { L:A:{ continue; } } threw exception SyntaxError: Illegal continue
PASS if(0){ L:for(;;) continue L; } is undefined.
PASS if(0){ L:A:for(;;) continue L; } is undefined.
PASS if(0){ A:L:for(;;) continue L; } is undefined.
-PASS if(0){ A:for(;;) L:continue L; } threw exception SyntaxError: Illegal continue statement: 'L' does not denote an iteration statement.
+PASS if(0){ A:for(;;) L:continue L; } threw exception SyntaxError: Undefined label 'L'.
PASS if(0){ L:for(;;) A:continue L; } is undefined.
PASS if(0){ L:do continue L; while(0); } is undefined.
PASS if(0){ L:A:do continue L; while(0); } is undefined.
PASS if(0){ A:L:do continue L; while(0);} is undefined.
-PASS if(0){ A:do L:continue L; while(0); } threw exception SyntaxError: Illegal continue statement: 'L' does not denote an iteration statement.
+PASS if(0){ A:do L:continue L; while(0); } threw exception SyntaxError: Undefined label 'L'.
PASS if(0){ L:do A:continue L; while(0); } is undefined.
PASS if(0){ L:while(0) continue L; } is undefined.
PASS if(0){ L:A:while(0) continue L; } is undefined.
PASS if(0){ A:L:while(0) continue L; } is undefined.
-PASS if(0){ A:while(0) L:continue L; } threw exception SyntaxError: Illegal continue statement: 'L' does not denote an iteration statement.
+PASS if(0){ A:while(0) L:continue L; } threw exception SyntaxError: Undefined label 'L'.
PASS if(0){ L:while(0) A:continue L; } is undefined.
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index e8a24abd50..322454ba17 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -36,6 +36,10 @@
# Irregexp interpreter overflows stack. We should just not crash.
'fast/js/regexp-stack-overflow': [PASS, FAIL],
}], # ALWAYS
+['system == windows', {
+ # Exceeds call stack on windows after MSVS2017 switch.
+ 'fast/js/excessive-comma-usage': [SKIP],
+}], # system == windows
['mode == debug', {
# Too slow in debug mode.
'dfg-int-overflow-in-loop': [SKIP],
diff --git a/deps/v8/third_party/eu-strip/README.v8 b/deps/v8/third_party/eu-strip/README.v8
new file mode 100644
index 0000000000..e84974d92b
--- /dev/null
+++ b/deps/v8/third_party/eu-strip/README.v8
@@ -0,0 +1,24 @@
+Name: eu-strip
+URL: https://sourceware.org/elfutils/
+Version: 0.158
+Security Critical: no
+License: LGPL 3
+License File: NOT_SHIPPED
+
+Description:
+
+Patched eu-strip from elfutils.
+
+Build instructions (on Trusty; note that this will build the
+Ubuntu-patched version of elfutils):
+$ mkdir elfutils
+$ cd elfutils
+$ apt-get source elfutils
+$ cd elfutils-0.158
+[ Edit libelf/elf_end.c and remove the free() on line 164. ]
+$ ./configure
+$ make
+$ gcc -std=gnu99 -Wall -Wshadow -Wunused -Wextra -fgnu89-inline
+ -Wformat=2 -Werror -g -O2 -Wl,-rpath-link,libelf:libdw -o eu-strip
+ src/strip.o libebl/libebl.a libelf/libelf.a lib/libeu.a -ldl
+$ eu-strip ./eu-strip # Keep the binary small, please.
diff --git a/deps/v8/third_party/eu-strip/bin/eu-strip b/deps/v8/third_party/eu-strip/bin/eu-strip
new file mode 100755
index 0000000000..994e2263b9
--- /dev/null
+++ b/deps/v8/third_party/eu-strip/bin/eu-strip
Binary files differ
diff --git a/deps/v8/tools/check-inline-includes.sh b/deps/v8/tools/check-inline-includes.sh
index 536afb1dd4..6def974d04 100755
--- a/deps/v8/tools/check-inline-includes.sh
+++ b/deps/v8/tools/check-inline-includes.sh
@@ -4,16 +4,19 @@
# found in the LICENSE file.
v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
-headers=$(find "$v8_root/src" -name '*.h' -not -name '*-inl.h')
+directories="src test/cctest test/unittests"
-for header in $headers; do
- inline_header_include=$(grep '#include ".*-inl.h"' "$header")
- if [ -n "$inline_header_include" ]; then
- echo "The following non-inline header seems to include an inline header:"
- echo " Header : $header"
- echo " Include: $inline_header_include"
- echo
- fi
+for directory in $directories; do
+ headers=$(find "$v8_root/$directory" -name '*.h' -not -name '*-inl.h')
+ for header in $headers; do
+ inline_header_include=$(grep '#include ".*-inl.h"' "$header")
+ if [ -n "$inline_header_include" ]; then
+ echo "The following non-inline header seems to include an inline header:"
+ echo " Header : $header"
+ echo " Include: $inline_header_include"
+ echo
+ fi
+ done
done
echo "Kthxbye."
diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py
index 21af4ff31c..1ce17e4f31 100755
--- a/deps/v8/tools/dev/gm.py
+++ b/deps/v8/tools/dev/gm.py
@@ -23,6 +23,7 @@ import errno
import multiprocessing
import os
import pty
+import re
import subprocess
import sys
@@ -41,7 +42,8 @@ MODES = ["release", "debug", "optdebug"]
# Modes that get built/run when you don't specify any.
DEFAULT_MODES = ["release", "debug"]
# Build targets that can be manually specified.
-TARGETS = ["d8", "cctest", "unittests", "v8_fuzzers", "mkgrokdump"]
+TARGETS = ["d8", "cctest", "unittests", "v8_fuzzers", "mkgrokdump",
+ "generate-bytecode-expectations"]
# Build targets that get built when you don't specify any (and specified tests
# don't imply any other targets).
DEFAULT_TARGETS = ["d8"]
@@ -253,12 +255,16 @@ class Config(object):
return_code, output = _CallWithOutput("ninja -C %s %s %s" %
(path, build_opts, targets))
if return_code != 0 and "FAILED: gen/snapshot.cc" in output:
+ csa_trap = re.compile("Specify option( --csa-trap-on-node=[^ ]*)")
+ match = csa_trap.search(output)
+ extra_opt = match.group(1) if match else ""
_Notify("V8 build requires your attention",
"Detected mksnapshot failure, re-running in GDB...")
_Call("gdb -args %(path)s/mksnapshot "
"--startup_src %(path)s/gen/snapshot.cc "
"--random-seed 314159265 "
- "--startup-blob %(path)s/snapshot_blob.bin" % {"path": path})
+ "--startup-blob %(path)s/snapshot_blob.bin"
+ "%(extra)s"% {"path": path, "extra": extra_opt})
return return_code
def RunTests(self):
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index 4d22333d7b..bb784ce806 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -35,6 +35,7 @@ import codecs
import ctypes
import datetime
import disasm
+import inspect
import mmap
import optparse
import os
@@ -1965,7 +1966,9 @@ class InspectionPadawan(object):
# Frame markers only occur directly after a frame pointer and only on the
# stack.
if not self.reader.IsExceptionStackAddress(slot): return False
- next_address = self.reader.ReadUIntPtr(slot + self.reader.PointerSize())
+ next_slot = slot + self.reader.PointerSize()
+ if not self.reader.IsValidAddress(next_slot): return False
+ next_address = self.reader.ReadUIntPtr(next_slot)
return self.reader.IsExceptionStackAddress(next_address)
def FormatSmi(self, address):
@@ -2057,10 +2060,12 @@ class InspectionPadawan(object):
ptr_size = self.reader.PointerSize()
if start is None:
start = self.reader.ExceptionSP()
- end = start + ptr_size * 1024
+ if not self.reader.IsValidAddress(start): return start
+ end = start + ptr_size * 1024 * 4
message_start = 0
magic1 = None
for slot in xrange(start, end, ptr_size):
+ if not self.reader.IsValidAddress(slot + ptr_size): break
magic1 = self.reader.ReadUIntPtr(slot)
magic2 = self.reader.ReadUIntPtr(slot + ptr_size)
pair = (magic1 & 0xFFFFFFFF, magic2 & 0xFFFFFFFF)
@@ -2073,7 +2078,9 @@ class InspectionPadawan(object):
On Mac we don't always get proper magic markers, so just try printing
the first long ascii string found on the stack.
"""
- message_start, message = self.FindFirstAsciiString(start)
+ magic1 = None
+ magic2 = None
+ message_start, message = self.FindFirstAsciiString(start, end, 128)
if message_start is None: return start
else:
message = self.reader.ReadAsciiString(message_start)
@@ -3323,6 +3330,23 @@ class InspectionShell(cmd.Cmd):
expr, result , e))
return address
+ def do_help(self, cmd=None):
+ if len(cmd) == 0:
+ print "Available commands"
+ print "=" * 79
+ prefix = "do_"
+ methods = inspect.getmembers(InspectionShell, predicate=inspect.ismethod)
+ for name,method in methods:
+ if not name.startswith(prefix): continue
+ doc = inspect.getdoc(method)
+ if not doc: continue
+ name = prefix.join(name.split(prefix)[1:])
+ description = doc.splitlines()[0]
+ print (name + ": ").ljust(16) + description
+ print "=" * 79
+ else:
+ return super(InspectionShell, self).do_help(cmd)
+
def do_p(self, cmd):
""" see print """
return self.do_print(cmd)
@@ -3371,9 +3395,11 @@ class InspectionShell(cmd.Cmd):
def do_dd(self, args):
"""
Interpret memory in the given region [address, address + num * word_size)
+
(if available) as a sequence of words. Automatic alignment is not performed.
If the num is not specified, a default value of 16 words is usif not self.Is
If no address is given, dd continues printing at the next word.
+
Synopsis: dd 0x<address>|$register [0x<num>]
"""
if len(args) != 0:
@@ -3393,9 +3419,10 @@ class InspectionShell(cmd.Cmd):
def do_display_object(self, address):
"""
- Interpret memory at the given address as a V8 object. Automatic
- alignment makes sure that you can pass tagged as well as un-tagged
- addresses.
+ Interpret memory at the given address as a V8 object.
+
+ Automatic alignment makes sure that you can pass tagged as well as
+ un-tagged addresses.
"""
address = self.ParseAddressExpr(address)
if self.reader.IsAlignedAddress(address):
@@ -3415,8 +3442,11 @@ class InspectionShell(cmd.Cmd):
def do_display_stack_objects(self, args):
"""
+ Find and Print object pointers in the given range.
+
Print all possible object pointers that are on the stack or in the given
address range.
+
Usage: dso [START_ADDR,[END_ADDR]]
"""
start = self.reader.StackTop()
@@ -3443,7 +3473,7 @@ class InspectionShell(cmd.Cmd):
def do_do_map(self, address):
"""
- Print a descriptor array in a readable format.
+ Print a Map in a readable format.
"""
start = self.ParseAddressExpr(address)
if ((start & 1) == 1): start = start - 1
@@ -3463,6 +3493,8 @@ class InspectionShell(cmd.Cmd):
def do_display_page(self, address):
"""
+ Prints details about the V8 heap page of the given address.
+
Interpret memory at the given address as being on a V8 heap page
and print information about the page header (if available).
"""
@@ -3476,9 +3508,10 @@ class InspectionShell(cmd.Cmd):
def do_k(self, arguments):
"""
- Teach V8 heap layout information to the inspector. This increases
- the amount of annotations the inspector can produce while dumping
- data. The first page of each heap space is of particular interest
+ Teach V8 heap layout information to the inspector.
+
+ This increases the amount of annotations the inspector can produce while
+ dumping data. The first page of each heap space is of particular interest
because it contains known objects that do not move.
"""
self.padawan.PrintKnowledge()
@@ -3489,8 +3522,9 @@ class InspectionShell(cmd.Cmd):
def do_known_oldspace(self, address):
"""
- Teach V8 heap layout information to the inspector. Set the first
- old space page by passing any pointer into that page.
+ Teach V8 heap layout information to the inspector.
+
+ Set the first old space page by passing any pointer into that page.
"""
address = self.ParseAddressExpr(address)
page_address = address & ~self.heap.PageAlignmentMask()
@@ -3502,8 +3536,9 @@ class InspectionShell(cmd.Cmd):
def do_known_map(self, address):
"""
- Teach V8 heap layout information to the inspector. Set the first
- map-space page by passing any pointer into that page.
+ Teach V8 heap layout information to the inspector.
+
+ Set the first map-space page by passing any pointer into that page.
"""
address = self.ParseAddressExpr(address)
page_address = address & ~self.heap.PageAlignmentMask()
@@ -3526,9 +3561,10 @@ class InspectionShell(cmd.Cmd):
def do_list_modules(self, arg):
"""
- List details for all loaded modules in the minidump. An argument can
- be passed to limit the output to only those modules that contain the
- argument as a substring (case insensitive match).
+ List details for all loaded modules in the minidump.
+
+ An argument can be passed to limit the output to only those modules that
+ contain the argument as a substring (case insensitive match).
"""
for module in self.reader.module_list.modules:
if arg:
@@ -3545,9 +3581,10 @@ class InspectionShell(cmd.Cmd):
def do_search(self, word):
"""
- Search for a given word in available memory regions. The given word
- is expanded to full pointer size and searched at aligned as well as
- un-aligned memory locations. Use 'sa' to search aligned locations
+ Search for a given word in available memory regions.
+
+ The given word is expanded to full pointer size and searched at aligned
+ as well as un-aligned memory locations. Use 'sa' to search aligned locations
only.
"""
try:
@@ -3560,8 +3597,9 @@ class InspectionShell(cmd.Cmd):
def do_sh(self, none):
"""
- Search for the V8 Heap object in all available memory regions. You
- might get lucky and find this rare treasure full of invaluable
+ Search for the V8 Heap object in all available memory regions.
+
+ You might get lucky and find this rare treasure full of invaluable
information.
"""
print "**** Not Implemented"
@@ -3572,8 +3610,9 @@ class InspectionShell(cmd.Cmd):
def do_disassemble(self, args):
"""
- Unassemble memory in the region [address, address + size). If the
- size is not specified, a default value of 32 bytes is used.
+ Unassemble memory in the region [address, address + size).
+
+ If the size is not specified, a default value of 32 bytes is used.
Synopsis: u 0x<address> 0x<size>
"""
if len(args) != 0:
@@ -3590,6 +3629,12 @@ class InspectionShell(cmd.Cmd):
self.reader.FormatIntPtr(self.u_start))
return
lines = self.reader.GetDisasmLines(self.u_start, self.u_size)
+ if len(lines) == 0:
+ print "Address %s could not be disassembled!" % (
+ self.reader.FormatIntPtr(self.u_start))
+ print " Could not disassemble using %s." % OBJDUMP_BIN
+ print " Pass path to architecture specific objdump via --objdump?"
+ return
for line in lines:
if skip:
skip = False
diff --git a/deps/v8/tools/luci-go/linux64/isolate.sha1 b/deps/v8/tools/luci-go/linux64/isolate.sha1
deleted file mode 100644
index acecc79c48..0000000000
--- a/deps/v8/tools/luci-go/linux64/isolate.sha1
+++ /dev/null
@@ -1 +0,0 @@
-21410c557b49620e8a44ec0f861f94605bdc6d5c
diff --git a/deps/v8/tools/luci-go/mac64/isolate.sha1 b/deps/v8/tools/luci-go/mac64/isolate.sha1
deleted file mode 100644
index 16a7dd6a07..0000000000
--- a/deps/v8/tools/luci-go/mac64/isolate.sha1
+++ /dev/null
@@ -1 +0,0 @@
-1966687828a068eee4c5da45bbb8afd91cddda6f
diff --git a/deps/v8/tools/luci-go/win64/isolate.exe.sha1 b/deps/v8/tools/luci-go/win64/isolate.exe.sha1
deleted file mode 100644
index 6f5491d9dd..0000000000
--- a/deps/v8/tools/luci-go/win64/isolate.exe.sha1
+++ /dev/null
@@ -1 +0,0 @@
-35482264cea0f9b9dd2efe0a01620557fc15b7c1
diff --git a/deps/v8/tools/perf-to-html.py b/deps/v8/tools/perf-to-html.py
index 7ec9c50f21..ac9f53f617 100755
--- a/deps/v8/tools/perf-to-html.py
+++ b/deps/v8/tools/perf-to-html.py
@@ -70,20 +70,24 @@ class Result:
self.notable_ = 0
self.percentage_string_ = ""
# compute notability and significance.
- if hasScoreUnits:
- compare_num = 100*self.result_/self.master_result_ - 100
- else:
- compare_num = 100*self.master_result_/self.result_ - 100
- if abs(compare_num) > 0.1:
- self.percentage_string_ = "%3.1f" % (compare_num)
- z = ComputeZ(self.master_result_, self.master_sigma_, self.result_, count)
- p = ComputeProbability(z)
- if p < PROBABILITY_CONSIDERED_SIGNIFICANT:
- self.significant_ = True
- if compare_num >= PERCENT_CONSIDERED_SIGNIFICANT:
- self.notable_ = 1
- elif compare_num <= -PERCENT_CONSIDERED_SIGNIFICANT:
- self.notable_ = -1
+ try:
+ if hasScoreUnits:
+ compare_num = 100*self.result_/self.master_result_ - 100
+ else:
+ compare_num = 100*self.master_result_/self.result_ - 100
+ if abs(compare_num) > 0.1:
+ self.percentage_string_ = "%3.1f" % (compare_num)
+ z = ComputeZ(self.master_result_, self.master_sigma_, self.result_, count)
+ p = ComputeProbability(z)
+ if p < PROBABILITY_CONSIDERED_SIGNIFICANT:
+ self.significant_ = True
+ if compare_num >= PERCENT_CONSIDERED_SIGNIFICANT:
+ self.notable_ = 1
+ elif compare_num <= -PERCENT_CONSIDERED_SIGNIFICANT:
+ self.notable_ = -1
+ except ZeroDivisionError:
+ self.percentage_string_ = "NaN"
+ self.significant_ = True
def result(self):
return self.result_
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index 99486cce84..c4ee310ce9 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -60,7 +60,6 @@ from testrunner.local import utils
LINT_RULES = """
-build/header_guard
-build/include_what_you_use
--build/namespaces
-readability/check
-readability/fn_size
+readability/streams
@@ -228,8 +227,9 @@ class CppLintProcessor(SourceFileProcessor):
or (name in CppLintProcessor.IGNORE_LINT))
def GetPathsToSearch(self):
- return ['src', 'include', 'samples', join('test', 'cctest'),
- join('test', 'unittests'), join('test', 'inspector')]
+ dirs = ['include', 'samples', 'src']
+ test_dirs = ['cctest', 'common', 'fuzzer', 'inspector', 'unittests']
+ return dirs + [join('test', dir) for dir in test_dirs]
def GetCpplintScript(self, prio_path):
for path in [prio_path] + os.environ["PATH"].split(os.pathsep):
@@ -366,7 +366,6 @@ class SourceProcessor(SourceFileProcessor):
'regexp-pcre.js',
'resources-123.js',
'rjsmin.py',
- 'script-breakpoint.h',
'sqlite.js',
'sqlite-change-heap.js',
'sqlite-pointer-masking.js',
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index 6dfbea2501..2dd3782ae5 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -113,6 +113,7 @@ VARIANTS = ["default"]
MORE_VARIANTS = [
"stress",
+ "stress_incremental_marking",
"nooptimization",
"stress_asm_wasm",
"wasm_traps",
@@ -276,10 +277,11 @@ def BuildOptions():
result.add_option("--no-i18n", "--noi18n",
help="Skip internationalization tests",
default=False, action="store_true")
+ result.add_option("--network", help="Distribute tests on the network",
+ default=False, dest="network", action="store_true")
result.add_option("--no-network", "--nonetwork",
help="Don't distribute tests on the network",
- default=(utils.GuessOS() != "linux"),
- dest="no_network", action="store_true")
+ dest="network", action="store_false")
result.add_option("--no-presubmit", "--nopresubmit",
help='Skip presubmit checks (deprecated)',
default=False, dest="no_presubmit", action="store_true")
@@ -563,11 +565,11 @@ def ProcessOptions(options):
# Special processing of other options, sorted alphabetically.
if options.buildbot:
- options.no_network = True
- if options.command_prefix:
+ options.network = False
+ if options.command_prefix and options.network:
print("Specifying --command-prefix disables network distribution, "
"running tests locally.")
- options.no_network = True
+ options.network = False
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
@@ -641,7 +643,7 @@ def ProcessOptions(options):
options.shell_dir = os.path.dirname(options.shell)
if options.valgrind:
run_valgrind = os.path.join("tools", "run-valgrind.py")
- # This is OK for distributed running, so we don't need to set no_network.
+ # This is OK for distributed running, so we don't need to disable network.
options.command_prefix = (["python", "-u", run_valgrind] +
options.command_prefix)
def CheckTestMode(name, option):
@@ -912,7 +914,7 @@ def Execute(arch, mode, args, options, suites):
progress_indicator.Register(progress.FlakinessTestProgressIndicator(
options.flakiness_results))
- run_networked = not options.no_network
+ run_networked = options.network
if not run_networked:
if options.verbose:
print("Network distribution disabled, running tests locally.")
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index 4d274abc56..9efa060bba 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -6,25 +6,27 @@
ALL_VARIANT_FLAGS = {
"default": [[]],
"stress": [["--stress-opt", "--always-opt"]],
+ "stress_incremental_marking": [["--stress-incremental-marking"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see
# https://chromium-review.googlesource.com/c/452620/ for more discussion.
"nooptimization": [["--noopt"]],
"stress_asm_wasm": [["--validate-asm", "--stress-validate-asm", "--suppress-asm-messages"]],
- "wasm_traps": [["--wasm_guard_pages", "--wasm_trap_handler", "--invoke-weak-callbacks"]],
+ "wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks"]],
}
# FAST_VARIANTS implies no --always-opt.
FAST_VARIANT_FLAGS = {
"default": [[]],
"stress": [["--stress-opt"]],
+ "stress_incremental_marking": [["--stress-incremental-marking"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see
# https://chromium-review.googlesource.com/c/452620/ for more discussion.
"nooptimization": [["--noopt"]],
"stress_asm_wasm": [["--validate-asm", "--stress-validate-asm", "--suppress-asm-messages"]],
- "wasm_traps": [["--wasm_guard_pages", "--wasm_trap_handler", "--invoke-weak-callbacks"]],
+ "wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks"]],
}
-ALL_VARIANTS = set(["default", "stress", "nooptimization", "stress_asm_wasm",
- "wasm_traps"])
+ALL_VARIANTS = set(["default", "stress", "stress_incremental_marking",
+ "nooptimization", "stress_asm_wasm", "wasm_traps"])
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 54997ab3ff..915365c25d 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -31,272 +31,274 @@ INSTANCE_TYPES = {
114: "SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
128: "SYMBOL_TYPE",
129: "HEAP_NUMBER_TYPE",
- 130: "ODDBALL_TYPE",
- 131: "MAP_TYPE",
- 132: "CODE_TYPE",
- 133: "MUTABLE_HEAP_NUMBER_TYPE",
- 134: "FOREIGN_TYPE",
- 135: "BYTE_ARRAY_TYPE",
- 136: "BYTECODE_ARRAY_TYPE",
- 137: "FREE_SPACE_TYPE",
- 138: "FIXED_INT8_ARRAY_TYPE",
- 139: "FIXED_UINT8_ARRAY_TYPE",
- 140: "FIXED_INT16_ARRAY_TYPE",
- 141: "FIXED_UINT16_ARRAY_TYPE",
- 142: "FIXED_INT32_ARRAY_TYPE",
- 143: "FIXED_UINT32_ARRAY_TYPE",
- 144: "FIXED_FLOAT32_ARRAY_TYPE",
- 145: "FIXED_FLOAT64_ARRAY_TYPE",
- 146: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
- 147: "FIXED_DOUBLE_ARRAY_TYPE",
- 148: "FILLER_TYPE",
- 149: "ACCESSOR_INFO_TYPE",
- 150: "ACCESSOR_PAIR_TYPE",
- 151: "ACCESS_CHECK_INFO_TYPE",
- 152: "INTERCEPTOR_INFO_TYPE",
- 153: "FUNCTION_TEMPLATE_INFO_TYPE",
- 154: "OBJECT_TEMPLATE_INFO_TYPE",
- 155: "ALLOCATION_SITE_TYPE",
- 156: "ALLOCATION_MEMENTO_TYPE",
- 157: "SCRIPT_TYPE",
- 158: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 159: "PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE",
- 160: "PROMISE_REACTION_JOB_INFO_TYPE",
- 161: "DEBUG_INFO_TYPE",
- 162: "STACK_FRAME_INFO_TYPE",
- 163: "PROTOTYPE_INFO_TYPE",
- 164: "TUPLE2_TYPE",
- 165: "TUPLE3_TYPE",
- 166: "CONTEXT_EXTENSION_TYPE",
- 167: "MODULE_TYPE",
- 168: "MODULE_INFO_ENTRY_TYPE",
- 169: "ASYNC_GENERATOR_REQUEST_TYPE",
- 170: "FIXED_ARRAY_TYPE",
- 171: "HASH_TABLE_TYPE",
- 172: "FEEDBACK_VECTOR_TYPE",
- 173: "TRANSITION_ARRAY_TYPE",
- 174: "PROPERTY_ARRAY_TYPE",
- 175: "SHARED_FUNCTION_INFO_TYPE",
- 176: "CELL_TYPE",
- 177: "WEAK_CELL_TYPE",
- 178: "PROPERTY_CELL_TYPE",
- 179: "SMALL_ORDERED_HASH_MAP_TYPE",
- 180: "SMALL_ORDERED_HASH_SET_TYPE",
- 181: "JS_PROXY_TYPE",
- 182: "JS_GLOBAL_OBJECT_TYPE",
- 183: "JS_GLOBAL_PROXY_TYPE",
- 184: "JS_MODULE_NAMESPACE_TYPE",
- 185: "JS_SPECIAL_API_OBJECT_TYPE",
- 186: "JS_VALUE_TYPE",
- 187: "JS_MESSAGE_OBJECT_TYPE",
- 188: "JS_DATE_TYPE",
- 189: "JS_API_OBJECT_TYPE",
- 190: "JS_OBJECT_TYPE",
- 191: "JS_ARGUMENTS_TYPE",
- 192: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 193: "JS_GENERATOR_OBJECT_TYPE",
- 194: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
- 195: "JS_ARRAY_TYPE",
- 196: "JS_ARRAY_BUFFER_TYPE",
- 197: "JS_TYPED_ARRAY_TYPE",
- 198: "JS_DATA_VIEW_TYPE",
- 199: "JS_SET_TYPE",
- 200: "JS_MAP_TYPE",
- 201: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
- 202: "JS_SET_VALUE_ITERATOR_TYPE",
- 203: "JS_MAP_KEY_ITERATOR_TYPE",
- 204: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
- 205: "JS_MAP_VALUE_ITERATOR_TYPE",
- 206: "JS_WEAK_MAP_TYPE",
- 207: "JS_WEAK_SET_TYPE",
- 208: "JS_PROMISE_CAPABILITY_TYPE",
- 209: "JS_PROMISE_TYPE",
- 210: "JS_REGEXP_TYPE",
- 211: "JS_ERROR_TYPE",
- 212: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
- 213: "JS_STRING_ITERATOR_TYPE",
- 214: "JS_TYPED_ARRAY_KEY_ITERATOR_TYPE",
- 215: "JS_FAST_ARRAY_KEY_ITERATOR_TYPE",
- 216: "JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE",
- 217: "JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 218: "JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 219: "JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 220: "JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 221: "JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 222: "JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 223: "JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 224: "JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 225: "JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 226: "JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 227: "JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 228: "JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 229: "JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 230: "JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 231: "JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 232: "JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 233: "JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE",
- 234: "JS_INT8_ARRAY_VALUE_ITERATOR_TYPE",
- 235: "JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE",
- 236: "JS_INT16_ARRAY_VALUE_ITERATOR_TYPE",
- 237: "JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE",
- 238: "JS_INT32_ARRAY_VALUE_ITERATOR_TYPE",
- 239: "JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE",
- 240: "JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE",
- 241: "JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE",
- 242: "JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE",
- 243: "JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE",
- 244: "JS_FAST_ARRAY_VALUE_ITERATOR_TYPE",
- 245: "JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE",
- 246: "JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
- 247: "JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
- 248: "JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE",
- 249: "WASM_INSTANCE_TYPE",
- 250: "WASM_MEMORY_TYPE",
- 251: "WASM_MODULE_TYPE",
- 252: "WASM_TABLE_TYPE",
- 253: "JS_BOUND_FUNCTION_TYPE",
- 254: "JS_FUNCTION_TYPE",
+ 130: "BIGINT_TYPE",
+ 131: "ODDBALL_TYPE",
+ 132: "MAP_TYPE",
+ 133: "CODE_TYPE",
+ 134: "MUTABLE_HEAP_NUMBER_TYPE",
+ 135: "FOREIGN_TYPE",
+ 136: "BYTE_ARRAY_TYPE",
+ 137: "BYTECODE_ARRAY_TYPE",
+ 138: "FREE_SPACE_TYPE",
+ 139: "FIXED_INT8_ARRAY_TYPE",
+ 140: "FIXED_UINT8_ARRAY_TYPE",
+ 141: "FIXED_INT16_ARRAY_TYPE",
+ 142: "FIXED_UINT16_ARRAY_TYPE",
+ 143: "FIXED_INT32_ARRAY_TYPE",
+ 144: "FIXED_UINT32_ARRAY_TYPE",
+ 145: "FIXED_FLOAT32_ARRAY_TYPE",
+ 146: "FIXED_FLOAT64_ARRAY_TYPE",
+ 147: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
+ 148: "FIXED_DOUBLE_ARRAY_TYPE",
+ 149: "FILLER_TYPE",
+ 150: "ACCESSOR_INFO_TYPE",
+ 151: "ACCESSOR_PAIR_TYPE",
+ 152: "ACCESS_CHECK_INFO_TYPE",
+ 153: "INTERCEPTOR_INFO_TYPE",
+ 154: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 155: "OBJECT_TEMPLATE_INFO_TYPE",
+ 156: "ALLOCATION_SITE_TYPE",
+ 157: "ALLOCATION_MEMENTO_TYPE",
+ 158: "SCRIPT_TYPE",
+ 159: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 160: "PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE",
+ 161: "PROMISE_REACTION_JOB_INFO_TYPE",
+ 162: "PROMISE_CAPABILITY_TYPE",
+ 163: "DEBUG_INFO_TYPE",
+ 164: "STACK_FRAME_INFO_TYPE",
+ 165: "PROTOTYPE_INFO_TYPE",
+ 166: "TUPLE2_TYPE",
+ 167: "TUPLE3_TYPE",
+ 168: "CONTEXT_EXTENSION_TYPE",
+ 169: "MODULE_TYPE",
+ 170: "MODULE_INFO_ENTRY_TYPE",
+ 171: "ASYNC_GENERATOR_REQUEST_TYPE",
+ 172: "FIXED_ARRAY_TYPE",
+ 173: "HASH_TABLE_TYPE",
+ 174: "FEEDBACK_VECTOR_TYPE",
+ 175: "TRANSITION_ARRAY_TYPE",
+ 176: "PROPERTY_ARRAY_TYPE",
+ 177: "SHARED_FUNCTION_INFO_TYPE",
+ 178: "CELL_TYPE",
+ 179: "WEAK_CELL_TYPE",
+ 180: "PROPERTY_CELL_TYPE",
+ 181: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 182: "SMALL_ORDERED_HASH_SET_TYPE",
+ 183: "JS_PROXY_TYPE",
+ 184: "JS_GLOBAL_OBJECT_TYPE",
+ 185: "JS_GLOBAL_PROXY_TYPE",
+ 186: "JS_MODULE_NAMESPACE_TYPE",
+ 187: "JS_SPECIAL_API_OBJECT_TYPE",
+ 188: "JS_VALUE_TYPE",
+ 189: "JS_MESSAGE_OBJECT_TYPE",
+ 190: "JS_DATE_TYPE",
+ 191: "JS_API_OBJECT_TYPE",
+ 192: "JS_OBJECT_TYPE",
+ 193: "JS_ARGUMENTS_TYPE",
+ 194: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 195: "JS_GENERATOR_OBJECT_TYPE",
+ 196: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
+ 197: "JS_ARRAY_TYPE",
+ 198: "JS_ARRAY_BUFFER_TYPE",
+ 199: "JS_TYPED_ARRAY_TYPE",
+ 200: "JS_DATA_VIEW_TYPE",
+ 201: "JS_SET_TYPE",
+ 202: "JS_MAP_TYPE",
+ 203: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
+ 204: "JS_SET_VALUE_ITERATOR_TYPE",
+ 205: "JS_MAP_KEY_ITERATOR_TYPE",
+ 206: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
+ 207: "JS_MAP_VALUE_ITERATOR_TYPE",
+ 208: "JS_WEAK_MAP_TYPE",
+ 209: "JS_WEAK_SET_TYPE",
+ 210: "JS_PROMISE_TYPE",
+ 211: "JS_REGEXP_TYPE",
+ 212: "JS_ERROR_TYPE",
+ 213: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
+ 214: "JS_STRING_ITERATOR_TYPE",
+ 215: "JS_TYPED_ARRAY_KEY_ITERATOR_TYPE",
+ 216: "JS_FAST_ARRAY_KEY_ITERATOR_TYPE",
+ 217: "JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE",
+ 218: "JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 219: "JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 220: "JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 221: "JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 222: "JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 223: "JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 224: "JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 225: "JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 226: "JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 227: "JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 228: "JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 229: "JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 230: "JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 231: "JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 232: "JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 233: "JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 234: "JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE",
+ 235: "JS_INT8_ARRAY_VALUE_ITERATOR_TYPE",
+ 236: "JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE",
+ 237: "JS_INT16_ARRAY_VALUE_ITERATOR_TYPE",
+ 238: "JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 239: "JS_INT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 240: "JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 241: "JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE",
+ 242: "JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE",
+ 243: "JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE",
+ 244: "JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE",
+ 245: "JS_FAST_ARRAY_VALUE_ITERATOR_TYPE",
+ 246: "JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE",
+ 247: "JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
+ 248: "JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
+ 249: "JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE",
+ 250: "WASM_INSTANCE_TYPE",
+ 251: "WASM_MEMORY_TYPE",
+ 252: "WASM_MODULE_TYPE",
+ 253: "WASM_TABLE_TYPE",
+ 254: "JS_BOUND_FUNCTION_TYPE",
+ 255: "JS_FUNCTION_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
- 0x02201: (137, "FreeSpaceMap"),
- 0x02259: (131, "MetaMap"),
- 0x022b1: (130, "NullMap"),
- 0x02309: (170, "FixedArrayMap"),
- 0x02361: (148, "OnePointerFillerMap"),
- 0x023b9: (148, "TwoPointerFillerMap"),
- 0x02411: (130, "UninitializedMap"),
- 0x02469: (8, "OneByteInternalizedStringMap"),
- 0x024c1: (130, "UndefinedMap"),
- 0x02519: (129, "HeapNumberMap"),
- 0x02571: (130, "TheHoleMap"),
- 0x025c9: (130, "BooleanMap"),
- 0x02621: (135, "ByteArrayMap"),
- 0x02679: (170, "FixedCOWArrayMap"),
- 0x026d1: (171, "HashTableMap"),
- 0x02729: (128, "SymbolMap"),
- 0x02781: (72, "OneByteStringMap"),
- 0x027d9: (170, "ScopeInfoMap"),
- 0x02831: (175, "SharedFunctionInfoMap"),
- 0x02889: (132, "CodeMap"),
- 0x028e1: (170, "FunctionContextMap"),
- 0x02939: (176, "CellMap"),
- 0x02991: (177, "WeakCellMap"),
- 0x029e9: (178, "GlobalPropertyCellMap"),
- 0x02a41: (134, "ForeignMap"),
- 0x02a99: (173, "TransitionArrayMap"),
- 0x02af1: (130, "ArgumentsMarkerMap"),
- 0x02b49: (130, "ExceptionMap"),
- 0x02ba1: (130, "TerminationExceptionMap"),
- 0x02bf9: (130, "OptimizedOutMap"),
- 0x02c51: (130, "StaleRegisterMap"),
- 0x02ca9: (170, "NativeContextMap"),
- 0x02d01: (170, "ModuleContextMap"),
- 0x02d59: (170, "EvalContextMap"),
- 0x02db1: (170, "ScriptContextMap"),
- 0x02e09: (170, "BlockContextMap"),
- 0x02e61: (170, "CatchContextMap"),
- 0x02eb9: (170, "WithContextMap"),
- 0x02f11: (147, "FixedDoubleArrayMap"),
- 0x02f69: (133, "MutableHeapNumberMap"),
- 0x02fc1: (171, "OrderedHashTableMap"),
- 0x03019: (170, "SloppyArgumentsElementsMap"),
- 0x03071: (179, "SmallOrderedHashMapMap"),
- 0x030c9: (180, "SmallOrderedHashSetMap"),
- 0x03121: (187, "JSMessageObjectMap"),
- 0x03179: (136, "BytecodeArrayMap"),
- 0x031d1: (170, "ModuleInfoMap"),
- 0x03229: (176, "NoClosuresCellMap"),
- 0x03281: (176, "OneClosureCellMap"),
- 0x032d9: (176, "ManyClosuresCellMap"),
- 0x03331: (174, "PropertyArrayMap"),
- 0x03389: (64, "StringMap"),
- 0x033e1: (73, "ConsOneByteStringMap"),
- 0x03439: (65, "ConsStringMap"),
- 0x03491: (77, "ThinOneByteStringMap"),
- 0x034e9: (69, "ThinStringMap"),
- 0x03541: (67, "SlicedStringMap"),
- 0x03599: (75, "SlicedOneByteStringMap"),
- 0x035f1: (66, "ExternalStringMap"),
- 0x03649: (82, "ExternalStringWithOneByteDataMap"),
- 0x036a1: (74, "ExternalOneByteStringMap"),
- 0x036f9: (98, "ShortExternalStringMap"),
- 0x03751: (114, "ShortExternalStringWithOneByteDataMap"),
- 0x037a9: (0, "InternalizedStringMap"),
- 0x03801: (2, "ExternalInternalizedStringMap"),
- 0x03859: (18, "ExternalInternalizedStringWithOneByteDataMap"),
- 0x038b1: (10, "ExternalOneByteInternalizedStringMap"),
- 0x03909: (34, "ShortExternalInternalizedStringMap"),
- 0x03961: (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
- 0x039b9: (42, "ShortExternalOneByteInternalizedStringMap"),
- 0x03a11: (106, "ShortExternalOneByteStringMap"),
- 0x03a69: (139, "FixedUint8ArrayMap"),
- 0x03ac1: (138, "FixedInt8ArrayMap"),
- 0x03b19: (141, "FixedUint16ArrayMap"),
- 0x03b71: (140, "FixedInt16ArrayMap"),
- 0x03bc9: (143, "FixedUint32ArrayMap"),
- 0x03c21: (142, "FixedInt32ArrayMap"),
- 0x03c79: (144, "FixedFloat32ArrayMap"),
- 0x03cd1: (145, "FixedFloat64ArrayMap"),
- 0x03d29: (146, "FixedUint8ClampedArrayMap"),
- 0x03d81: (157, "ScriptMap"),
- 0x03dd9: (172, "FeedbackVectorMap"),
- 0x03e31: (170, "DebugEvaluateContextMap"),
- 0x03e89: (170, "ScriptContextTableMap"),
- 0x03ee1: (171, "UnseededNumberDictionaryMap"),
- 0x03f39: (190, "ExternalMap"),
- 0x03f91: (106, "NativeSourceStringMap"),
- 0x03fe9: (152, "InterceptorInfoMap"),
- 0x04041: (208, "JSPromiseCapabilityMap"),
- 0x04099: (149, "AccessorInfoMap"),
- 0x040f1: (150, "AccessorPairMap"),
- 0x04149: (151, "AccessCheckInfoMap"),
- 0x041a1: (153, "FunctionTemplateInfoMap"),
- 0x041f9: (154, "ObjectTemplateInfoMap"),
- 0x04251: (155, "AllocationSiteMap"),
- 0x042a9: (156, "AllocationMementoMap"),
- 0x04301: (158, "AliasedArgumentsEntryMap"),
- 0x04359: (159, "PromiseResolveThenableJobInfoMap"),
- 0x043b1: (160, "PromiseReactionJobInfoMap"),
- 0x04409: (161, "DebugInfoMap"),
- 0x04461: (162, "StackFrameInfoMap"),
- 0x044b9: (163, "PrototypeInfoMap"),
- 0x04511: (164, "Tuple2Map"),
- 0x04569: (165, "Tuple3Map"),
- 0x045c1: (166, "ContextExtensionMap"),
- 0x04619: (167, "ModuleMap"),
- 0x04671: (168, "ModuleInfoEntryMap"),
- 0x046c9: (169, "AsyncGeneratorRequestMap"),
+ 0x02201: (138, "FreeSpaceMap"),
+ 0x02251: (132, "MetaMap"),
+ 0x022a1: (131, "NullMap"),
+ 0x022f1: (172, "FixedArrayMap"),
+ 0x02341: (149, "OnePointerFillerMap"),
+ 0x02391: (149, "TwoPointerFillerMap"),
+ 0x023e1: (131, "UninitializedMap"),
+ 0x02431: (8, "OneByteInternalizedStringMap"),
+ 0x02481: (131, "UndefinedMap"),
+ 0x024d1: (129, "HeapNumberMap"),
+ 0x02521: (131, "TheHoleMap"),
+ 0x02571: (131, "BooleanMap"),
+ 0x025c1: (136, "ByteArrayMap"),
+ 0x02611: (172, "FixedCOWArrayMap"),
+ 0x02661: (173, "HashTableMap"),
+ 0x026b1: (128, "SymbolMap"),
+ 0x02701: (72, "OneByteStringMap"),
+ 0x02751: (172, "ScopeInfoMap"),
+ 0x027a1: (177, "SharedFunctionInfoMap"),
+ 0x027f1: (133, "CodeMap"),
+ 0x02841: (172, "FunctionContextMap"),
+ 0x02891: (178, "CellMap"),
+ 0x028e1: (179, "WeakCellMap"),
+ 0x02931: (180, "GlobalPropertyCellMap"),
+ 0x02981: (135, "ForeignMap"),
+ 0x029d1: (175, "TransitionArrayMap"),
+ 0x02a21: (131, "ArgumentsMarkerMap"),
+ 0x02a71: (131, "ExceptionMap"),
+ 0x02ac1: (131, "TerminationExceptionMap"),
+ 0x02b11: (131, "OptimizedOutMap"),
+ 0x02b61: (131, "StaleRegisterMap"),
+ 0x02bb1: (172, "NativeContextMap"),
+ 0x02c01: (172, "ModuleContextMap"),
+ 0x02c51: (172, "EvalContextMap"),
+ 0x02ca1: (172, "ScriptContextMap"),
+ 0x02cf1: (172, "BlockContextMap"),
+ 0x02d41: (172, "CatchContextMap"),
+ 0x02d91: (172, "WithContextMap"),
+ 0x02de1: (148, "FixedDoubleArrayMap"),
+ 0x02e31: (134, "MutableHeapNumberMap"),
+ 0x02e81: (173, "OrderedHashTableMap"),
+ 0x02ed1: (172, "SloppyArgumentsElementsMap"),
+ 0x02f21: (181, "SmallOrderedHashMapMap"),
+ 0x02f71: (182, "SmallOrderedHashSetMap"),
+ 0x02fc1: (189, "JSMessageObjectMap"),
+ 0x03011: (137, "BytecodeArrayMap"),
+ 0x03061: (172, "ModuleInfoMap"),
+ 0x030b1: (178, "NoClosuresCellMap"),
+ 0x03101: (178, "OneClosureCellMap"),
+ 0x03151: (178, "ManyClosuresCellMap"),
+ 0x031a1: (176, "PropertyArrayMap"),
+ 0x031f1: (130, "BigIntMap"),
+ 0x03241: (64, "StringMap"),
+ 0x03291: (73, "ConsOneByteStringMap"),
+ 0x032e1: (65, "ConsStringMap"),
+ 0x03331: (77, "ThinOneByteStringMap"),
+ 0x03381: (69, "ThinStringMap"),
+ 0x033d1: (67, "SlicedStringMap"),
+ 0x03421: (75, "SlicedOneByteStringMap"),
+ 0x03471: (66, "ExternalStringMap"),
+ 0x034c1: (82, "ExternalStringWithOneByteDataMap"),
+ 0x03511: (74, "ExternalOneByteStringMap"),
+ 0x03561: (98, "ShortExternalStringMap"),
+ 0x035b1: (114, "ShortExternalStringWithOneByteDataMap"),
+ 0x03601: (0, "InternalizedStringMap"),
+ 0x03651: (2, "ExternalInternalizedStringMap"),
+ 0x036a1: (18, "ExternalInternalizedStringWithOneByteDataMap"),
+ 0x036f1: (10, "ExternalOneByteInternalizedStringMap"),
+ 0x03741: (34, "ShortExternalInternalizedStringMap"),
+ 0x03791: (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
+ 0x037e1: (42, "ShortExternalOneByteInternalizedStringMap"),
+ 0x03831: (106, "ShortExternalOneByteStringMap"),
+ 0x03881: (140, "FixedUint8ArrayMap"),
+ 0x038d1: (139, "FixedInt8ArrayMap"),
+ 0x03921: (142, "FixedUint16ArrayMap"),
+ 0x03971: (141, "FixedInt16ArrayMap"),
+ 0x039c1: (144, "FixedUint32ArrayMap"),
+ 0x03a11: (143, "FixedInt32ArrayMap"),
+ 0x03a61: (145, "FixedFloat32ArrayMap"),
+ 0x03ab1: (146, "FixedFloat64ArrayMap"),
+ 0x03b01: (147, "FixedUint8ClampedArrayMap"),
+ 0x03b51: (158, "ScriptMap"),
+ 0x03ba1: (174, "FeedbackVectorMap"),
+ 0x03bf1: (172, "DebugEvaluateContextMap"),
+ 0x03c41: (172, "ScriptContextTableMap"),
+ 0x03c91: (173, "UnseededNumberDictionaryMap"),
+ 0x03ce1: (192, "ExternalMap"),
+ 0x03d31: (106, "NativeSourceStringMap"),
+ 0x03d81: (166, "Tuple2Map"),
+ 0x03dd1: (153, "InterceptorInfoMap"),
+ 0x03e21: (150, "AccessorInfoMap"),
+ 0x03e71: (151, "AccessorPairMap"),
+ 0x03ec1: (152, "AccessCheckInfoMap"),
+ 0x03f11: (154, "FunctionTemplateInfoMap"),
+ 0x03f61: (155, "ObjectTemplateInfoMap"),
+ 0x03fb1: (156, "AllocationSiteMap"),
+ 0x04001: (157, "AllocationMementoMap"),
+ 0x04051: (159, "AliasedArgumentsEntryMap"),
+ 0x040a1: (160, "PromiseResolveThenableJobInfoMap"),
+ 0x040f1: (161, "PromiseReactionJobInfoMap"),
+ 0x04141: (162, "PromiseCapabilityMap"),
+ 0x04191: (163, "DebugInfoMap"),
+ 0x041e1: (164, "StackFrameInfoMap"),
+ 0x04231: (165, "PrototypeInfoMap"),
+ 0x04281: (167, "Tuple3Map"),
+ 0x042d1: (168, "ContextExtensionMap"),
+ 0x04321: (169, "ModuleMap"),
+ 0x04371: (170, "ModuleInfoEntryMap"),
+ 0x043c1: (171, "AsyncGeneratorRequestMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
("OLD_SPACE", 0x02201): "NullValue",
("OLD_SPACE", 0x02231): "EmptyDescriptorArray",
- ("OLD_SPACE", 0x02241): "EmptyFixedArray",
- ("OLD_SPACE", 0x02251): "UninitializedValue",
- ("OLD_SPACE", 0x022d1): "UndefinedValue",
- ("OLD_SPACE", 0x02301): "NanValue",
- ("OLD_SPACE", 0x02311): "TheHoleValue",
- ("OLD_SPACE", 0x02361): "HoleNanValue",
- ("OLD_SPACE", 0x02371): "TrueValue",
- ("OLD_SPACE", 0x023e1): "FalseValue",
- ("OLD_SPACE", 0x02431): "empty_string",
- ("OLD_SPACE", 0x02449): "EmptyScopeInfo",
- ("OLD_SPACE", 0x02459): "ArgumentsMarker",
- ("OLD_SPACE", 0x024b1): "Exception",
- ("OLD_SPACE", 0x02509): "TerminationException",
- ("OLD_SPACE", 0x02569): "OptimizedOut",
- ("OLD_SPACE", 0x025c1): "StaleRegister",
- ("OLD_SPACE", 0x02619): "EmptyByteArray",
- ("OLD_SPACE", 0x02629): "EmptyFixedUint8Array",
- ("OLD_SPACE", 0x02649): "EmptyFixedInt8Array",
- ("OLD_SPACE", 0x02669): "EmptyFixedUint16Array",
- ("OLD_SPACE", 0x02689): "EmptyFixedInt16Array",
- ("OLD_SPACE", 0x026a9): "EmptyFixedUint32Array",
- ("OLD_SPACE", 0x026c9): "EmptyFixedInt32Array",
- ("OLD_SPACE", 0x026e9): "EmptyFixedFloat32Array",
+ ("OLD_SPACE", 0x02251): "EmptyFixedArray",
+ ("OLD_SPACE", 0x02261): "UninitializedValue",
+ ("OLD_SPACE", 0x022e1): "UndefinedValue",
+ ("OLD_SPACE", 0x02311): "NanValue",
+ ("OLD_SPACE", 0x02321): "TheHoleValue",
+ ("OLD_SPACE", 0x02371): "HoleNanValue",
+ ("OLD_SPACE", 0x02381): "TrueValue",
+ ("OLD_SPACE", 0x023f1): "FalseValue",
+ ("OLD_SPACE", 0x02441): "empty_string",
+ ("OLD_SPACE", 0x02459): "EmptyScopeInfo",
+ ("OLD_SPACE", 0x02469): "ArgumentsMarker",
+ ("OLD_SPACE", 0x024c1): "Exception",
+ ("OLD_SPACE", 0x02519): "TerminationException",
+ ("OLD_SPACE", 0x02579): "OptimizedOut",
+ ("OLD_SPACE", 0x025d1): "StaleRegister",
+ ("OLD_SPACE", 0x02629): "EmptyByteArray",
+ ("OLD_SPACE", 0x02639): "EmptyFixedUint8Array",
+ ("OLD_SPACE", 0x02659): "EmptyFixedInt8Array",
+ ("OLD_SPACE", 0x02679): "EmptyFixedUint16Array",
+ ("OLD_SPACE", 0x02699): "EmptyFixedInt16Array",
+ ("OLD_SPACE", 0x026b9): "EmptyFixedUint32Array",
+ ("OLD_SPACE", 0x026d9): "EmptyFixedInt32Array",
+ ("OLD_SPACE", 0x026f9): "EmptyFixedFloat32Array",
("OLD_SPACE", 0x02719): "EmptyFixedFloat64Array",
("OLD_SPACE", 0x02739): "EmptyFixedUint8ClampedArray",
("OLD_SPACE", 0x02759): "EmptyScript",
@@ -322,7 +324,6 @@ FRAME_MARKERS = (
"ENTRY",
"CONSTRUCT_ENTRY",
"EXIT",
- "JAVA_SCRIPT",
"OPTIMIZED",
"WASM_COMPILED",
"WASM_TO_JS",
diff --git a/deps/v8/tools/wasm/update-wasm-spec-tests.sh b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
index 2a144b2d5e..ffdef0d820 100755
--- a/deps/v8/tools/wasm/update-wasm-spec-tests.sh
+++ b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -16,7 +16,7 @@ rm -rf ./test/wasm-spec-tests/tests/*
./tools/dev/gm.py x64.release d8
cd ${V8_DIR}/test/wasm-js/interpreter
-make
+make clean all
cd ${V8_DIR}/test/wasm-js/test/core
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 5762ef8f64..37b1d82553 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -6,6 +6,6 @@ A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up......
-The autoroller bought a round of Himbeerbrause. Suddenly .....
-The bartender starts to shake the bottles............
+The autoroller bought a round of Himbeerbrause. Suddenly.....
+The bartender starts to shake the bottles..........
.